[
  {
    "path": ".all-contributorsrc",
    "content": "{\n  \"projectName\": \"openmodelz\",\n  \"projectOwner\": \"tensorchord\",\n  \"repoType\": \"github\",\n  \"repoHost\": \"https://github.com\",\n  \"files\": [\n    \"README.md\"\n  ],\n  \"imageSize\": 70,\n  \"commit\": true,\n  \"commitConvention\": \"angular\",\n  \"contributorsSortAlphabetically\": true,\n  \"contributors\": [\n    {\n      \"login\": \"gaocegege\",\n      \"name\": \"Ce Gao\",\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/5100735?v=4\",\n      \"profile\": \"https://github.com/gaocegege\",\n      \"contributions\": [\n        \"code\",\n        \"review\",\n        \"tutorial\"\n      ]\n    },\n    {\n      \"login\": \"tddschn\",\n      \"name\": \"Teddy Xinyuan Chen\",\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/45612704?v=4\",\n      \"profile\": \"https://github.com/tddschn\",\n      \"contributions\": [\n        \"doc\"\n      ]\n    },\n    {\n      \"login\": \"VoVAllen\",\n      \"name\": \"Jinjing Zhou\",\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/8686776?v=4\",\n      \"profile\": \"https://github.com/VoVAllen\",\n      \"contributions\": [\n        \"question\",\n        \"bug\",\n        \"ideas\"\n      ]\n    },\n    {\n      \"login\": \"kemingy\",\n      \"name\": \"Keming\",\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/12974685?v=4\",\n      \"profile\": \"https://blog.mapotofu.org/\",\n      \"contributions\": [\n        \"code\",\n        \"design\",\n        \"infra\"\n      ]\n    },\n    {\n      \"login\": \"cutecutecat\",\n      \"name\": \"cutecutecat\",\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/19801166?v=4\",\n      \"profile\": \"https://github.com/cutecutecat\",\n      \"contributions\": [\n        \"ideas\"\n      ]\n    },\n    {\n      \"login\": \"xieydd\",\n      \"name\": \"xieydd\",\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/20329697?v=4\",\n      \"profile\": \"https://xieydd.github.io/\",\n      \"contributions\": [\n        \"ideas\"\n      ]\n    },\n    {\n      \"login\": \"Xuanwo\",\n      \"name\": \"Xuanwo\",\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/5351546?v=4\",\n      \"profile\": \"https://xuanwo.io/\",\n      \"contributions\": [\n        \"content\",\n        \"design\",\n        \"ideas\"\n      ]\n    },\n    {\n      \"login\": \"Zheaoli\",\n      \"name\": \"Nadeshiko Manju\",\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/7054676?v=4\",\n      \"profile\": \"http://manjusaka.itscoder.com/\",\n      \"contributions\": [\n        \"bug\",\n        \"design\",\n        \"ideas\"\n      ]\n    },\n    {\n      \"login\": \"zwpaper\",\n      \"name\": \"Wei Zhang\",\n      \"avatar_url\": \"https://avatars.githubusercontent.com/u/3764335?v=4\",\n      \"profile\": \"https://page.codespaper.com\",\n      \"contributions\": [\n        \"code\"\n      ]\n    }\n  ],\n  \"contributorsPerLine\": 7,\n  \"commitType\": \"docs\"\n}\n"
  },
  {
    "path": ".github/workflows/CI.yaml",
    "content": "name: CI\n\non:\n  push:\n    branches:\n      - main\n    paths:\n      - '.github/workflows/**'\n      - '**.go'\n      - '**/Makefile'\n      - 'go.**'\n  pull_request:\n    paths:\n      - '.github/workflows/**'\n      - '**.go'\n      - '**/Makefile'\n      - 'go.**'\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\njobs:\n  typos-check:\n    name: Spell Check with Typos\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout Actions Repository\n        uses: actions/checkout@v3\n      - name: Check spelling with custom config file\n        uses: crate-ci/typos@v1.16.2\n        with:\n          config: ./typos.toml\n  test:\n    name: test\n    strategy:\n      matrix:\n        os: [ubuntu-latest]\n        dir: [\"agent\", \"autoscaler\", \"ingress-operator\", \"mdz\", \"modelzetes\"]\n    runs-on: ${{ matrix.os }}\n    steps:\n      - name: Check out code\n        uses: actions/checkout@v3\n      - name: Setup Go\n        uses: actions/setup-go@v4\n        with:\n          go-version: 1.19\n      - uses: actions/cache@v3\n        with:\n          path: |\n            ~/.cache/go-build\n            ~/go/pkg/mod\n          key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}\n          restore-keys: |\n            ${{ runner.os }}-go-\n      - name: test\n        run: |\n          cd ${{ matrix.dir }}\n          make fmt\n          git diff --exit-code || (echo 'Please run \"make fmt\" to format code' && exit 1);\n          make\n          go test -race -coverprofile=${{ matrix.dir }}.out -covermode=atomic ./...\n      - name: Upload coverage report\n        uses: actions/upload-artifact@v3\n        with:\n          name: ${{ matrix.dir }}-out\n          path: ${{ matrix.dir }}/${{ matrix.dir }}.out\n  report:\n    needs:\n      - test\n      - typos-check\n    runs-on: ubuntu-latest\n    steps:\n      - name: Check out code\n        uses: actions/checkout@v3\n      - name: Setup Go\n        uses: actions/setup-go@v4\n        with:\n          go-version: 1.19\n      - name: Install bins\n        run: |\n          go install github.com/mattn/goveralls@latest\n      - name: Get agent coverage report\n        uses: actions/download-artifact@v3\n        with:\n          name: agent-out\n          path: merge\n      - name: Get autoscaler coverage report\n        uses: actions/download-artifact@v3\n        with:\n          name: autoscaler-out\n          path: merge\n      - name: Get ingress-operator coverage report\n        uses: actions/download-artifact@v3\n        with:\n          name: ingress-operator-out\n          path: merge\n      - name: Get mdz coverage report\n        uses: actions/download-artifact@v3\n        with:\n          name: mdz-out\n          path: merge\n      - name: Get modelzetes coverage report\n        uses: actions/download-artifact@v3\n        with:\n          name: modelzetes-out\n          path: merge\n      - name: Merge all coverage reports\n        uses: cutecutecat/go-cover-merge@v1\n        with:\n          input_dir: merge\n          output_file: final.out\n      - name: Send coverage\n        env:\n          COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        run: |\n          goveralls -coverprofile=final.out -service=github\n"
  },
  {
    "path": ".github/workflows/publish.yaml",
    "content": "name: release\n\non:\n  release:\n    types: [published]\n  pull_request:\n    paths:\n    - '.github/workflows/release.yml'\n    - '.goreleaser/'\n    - '.goreleaser.yaml'\n\njobs:\n  goreleaser:\n    if: github.repository == 'tensorchord/openmodelz'\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v3\n        with:\n          fetch-depth: 0\n      - name: Set up Go\n        uses: actions/setup-go@v4\n        with:\n          go-version: 1.19\n      - name: Docker Login\n        uses: docker/login-action@v2\n        with:\n          username: ${{ secrets.DOCKERIO_USERNAME }}\n          password: ${{ secrets.DOCKERIO_TOKEN }}\n      - name: Run GoReleaser\n        uses: goreleaser/goreleaser-action@v4\n        with:\n          distribution: goreleaser\n          version: latest\n          args: release --rm-dist\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n      - name: upload gobin\n        uses: actions/upload-artifact@v3\n        with:\n          name: gobin_${{ github.event.release.tag_name }}\n          retention-days: 1\n          path: |\n            dist/mdz_linux_amd64_v1/mdz\n          if-no-files-found: error\n  pypi_publish:\n    needs: goreleaser\n    # only trigger on main repo when tag starts with v\n    if: github.repository == 'tensorchord/openmodelz' && startsWith(github.ref, 'refs/tags/v')\n    runs-on: ${{ matrix.os }}\n    timeout-minutes: 20\n    strategy:\n      matrix:\n        os: [ubuntu-20.04]\n    steps:\n    - uses: actions/checkout@v3\n    - name: Get gobin\n      uses: actions/download-artifact@v3\n      with:\n        name: gobin_${{ github.event.release.tag_name }}\n        path: dist/\n    - name: Configure linux build environment\n      if: runner.os == 'Linux'\n      run: |\n        mkdir -p mdz/bin\n        mv dist/mdz mdz/bin/mdz\n        chmod +x mdz/bin/mdz\n    - name: Build wheels\n      uses: pypa/cibuildwheel@v2.14.1\n    - name: Build source distribution\n      if: runner.os == 'Linux' # Only release source under linux to avoid conflict\n      run: |\n        python -m pip install wheel setuptools_scm\n        python setup.py sdist\n        mv dist/*.tar.gz wheelhouse/\n    - name: Upload to PyPI\n      env:\n        TWINE_USERNAME: __token__\n        TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}\n      run: |\n        python -m pip install --upgrade pip\n        python -m pip install twine\n        python -m twine upload wheelhouse/*\n"
  },
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n_version.txt\n_version.py\nwheelhouse/\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\n.ruff_cache/\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\ncover/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\n.pybuilder/\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n#   For a library or package, you might want to ignore these files since the code is\n#   intended to run in multiple environments; otherwise, check them in:\n# .python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# poetry\n#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.\n#   This is especially recommended for binary packages to ensure reproducibility, and is more\n#   commonly ignored for libraries.\n#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control\n#poetry.lock\n\n# pdm\n#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.\n#pdm.lock\n#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it\n#   in version control.\n#   https://pdm.fming.dev/#use-with-ide\n.pdm.toml\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# pytype static type analyzer\n.pytype/\n\n# Cython debug symbols\ncython_debug/\n\n# PyCharm\n#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can\n#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore\n#  and can be added to the global gitignore or merged into this file.  For a more nuclear\n#  option (not recommended) you can uncomment the following to ignore the entire idea folder.\n.idea/\n\n# If you prefer the allow list template instead of the deny list, see community template:\n# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore\n#\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n\n# Test binary, built with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# Dependency directories (remove the comment below to include it)\n# vendor/\n\n# Go workspace file\ngo.work\n"
  },
  {
    "path": ".goreleaser.yaml",
    "content": "project_name: openmodelz\nbuilds:\n  - env:\n      - CGO_ENABLED=0\n    goos:\n      - linux\n    goarch:\n      - amd64\n    id: modelzetes\n    main: ./cmd/modelzetes/main.go\n    dir: ./modelzetes\n    binary: modelzetes\n    ldflags:\n      - -s -w\n      - -X github.com/tensorchord/openmodelz/modelzetes/pkg/version.version={{ .Version }}\n      - -X github.com/tensorchord/openmodelz/modelzetes/pkg/version.buildDate={{ .Date }}\n      - -X github.com/tensorchord/openmodelz/modelzetes/pkg/version.gitCommit={{ .Commit }}\n      - -X github.com/tensorchord/openmodelz/modelzetes/pkg/version.gitTreeState=clean\n      - -X github.com/tensorchord/openmodelz/modelzetes/pkg/version.gitTag={{ .Tag }}\n  - env:\n      - CGO_ENABLED=0\n    goos:\n      - linux\n    goarch:\n      - amd64\n    id: agent\n    main: ./cmd/agent/main.go\n    dir: ./agent\n    binary: agent\n    ldflags:\n      - -s -w\n      - -X github.com/tensorchord/openmodelz/agent/pkg/version.version={{ .Version }}\n      - -X github.com/tensorchord/openmodelz/agent/pkg/version.buildDate={{ .Date }}\n      - -X github.com/tensorchord/openmodelz/agent/pkg/version.gitCommit={{ .Commit }}\n      - -X github.com/tensorchord/openmodelz/agent/pkg/version.gitTreeState=clean\n      - -X github.com/tensorchord/openmodelz/agent/pkg/version.gitTag={{ .Tag }}\n  - env:\n      - CGO_ENABLED=0\n    goos:\n      - linux\n    goarch:\n      - amd64\n    id: mdz\n    main: ./cmd/mdz/main.go\n    dir: ./mdz\n    binary: mdz\n    ldflags:\n      - -s -w\n      - -X github.com/tensorchord/openmodelz/mdz/pkg/version.version={{ .Version }}\n      - -X github.com/tensorchord/openmodelz/mdz/pkg/version.buildDate={{ .Date }}\n      - -X github.com/tensorchord/openmodelz/mdz/pkg/version.gitCommit={{ .Commit }}\n      - -X github.com/tensorchord/openmodelz/mdz/pkg/version.gitTreeState=clean\n      - -X github.com/tensorchord/openmodelz/mdz/pkg/version.gitTag={{ .Tag }}\n  - env:\n      - CGO_ENABLED=0\n    goos:\n      - linux\n    goarch:\n      - amd64\n    id: autoscaler\n    main: ./cmd/autoscaler/main.go\n    dir: ./autoscaler\n    binary: autoscaler\n    ldflags:\n      - -s -w\n      - -X github.com/tensorchord/openmodelz/autoscaler/pkg/version.version={{ .Version }}\n      - -X github.com/tensorchord/openmodelz/autoscaler/pkg/version.buildDate={{ .Date }}\n      - -X github.com/tensorchord/openmodelz/autoscaler/pkg/version.gitCommit={{ .Commit }}\n      - -X github.com/tensorchord/openmodelz/autoscaler/pkg/version.gitTreeState=clean\n      - -X github.com/tensorchord/openmodelz/autoscaler/pkg/version.gitTag={{ .Tag }}\n  - env:\n      - CGO_ENABLED=0\n    goos:\n      - linux\n    goarch:\n      - amd64\n    id: ingress-operator\n    main: ./cmd/ingress-operator/main.go\n    dir: ./ingress-operator\n    binary: ingress-operator\n    ldflags:\n      - -s -w\n      - -X github.com/tensorchord/openmodelz/ingress-operator/pkg/version.version={{ .Version }}\n      - -X github.com/tensorchord/openmodelz/ingress-operator/pkg/version.buildDate={{ .Date }}\n      - -X github.com/tensorchord/openmodelz/ingress-operator/pkg/version.gitCommit={{ .Commit }}\n      - -X github.com/tensorchord/openmodelz/ingress-operator/pkg/version.gitTreeState=clean\n      - -X github.com/tensorchord/openmodelz/ingress-operator/pkg/version.gitTag={{ .Tag }}\narchives:\n  - id: mdz\n    format: binary\n    builds:\n      - mdz\n    name_template: >-\n      {{ .Binary }}_{{ .Version }}_{{- title .Os }}_\n      {{- if eq .Arch \"amd64\" }}x86_64\n      {{- else if eq .Arch \"386\" }}i386\n      {{- else }}{{ .Arch }}{{ end }}\nchecksum:\n  name_template: 'checksums.txt'\nsnapshot:\n  name_template: \"{{ incpatch .Version }}-next\"\nchangelog:\n  use: github\n  sort: asc\n  groups:\n    - title: 'Exciting New Features 🎉'\n      regexp: \"^.*feat.*\"\n      order: 0\n    - title: 'Bug Fix 🛠'\n      regexp: \"^.*(Fix|fix|bug).*\"\n      order: 1\n    - title: 'Refactor 🏗️'\n      regexp: \"^.*refact.*\"\n      order: 2\n    - title: 'Documentation 🖊️'\n      regexp: \"^.*docs.*\"\n      order: 3\n    - title: 'Others:'\n      order: 999\ndockers:\n- image_templates:\n  - \"modelzai/openmodelz-modelzetes:v{{ .Version }}-amd64\"\n  use: buildx\n  dockerfile: modelzetes/Dockerfile\n  ids:\n  - modelzetes\n  build_flag_templates:\n  - \"--platform=linux/amd64\"\n# - image_templates:\n#   - \"modelzai/modelzetes:v{{ .Version }}-arm64v8\"\n#   use: buildx\n#   goarch: arm64\n#   ids:\n#   - modelzetes\n#   dockerfile: modelzetes/Dockerfile\n#   build_flag_templates:\n#   - \"--platform=linux/arm64/v8\"\n\n- image_templates:\n  - \"modelzai/openmodelz-agent:v{{ .Version }}-amd64\"\n  use: buildx\n  dockerfile: agent/Dockerfile\n  ids:\n  - agent\n  build_flag_templates:\n  - \"--platform=linux/amd64\"\n# - image_templates:\n#   - \"modelzai/modelz-agent:v{{ .Version }}-arm64v8\"\n#   use: buildx\n#   goarch: arm64\n#   ids:\n#   - agent\n#   dockerfile: agent/Dockerfile\n#   build_flag_templates:\n#   - \"--platform=linux/arm64/v8\"\n\n- image_templates:\n  - \"modelzai/openmodelz-autoscaler:v{{ .Version }}-amd64\"\n  use: buildx\n  dockerfile: autoscaler/Dockerfile\n  ids:\n  - autoscaler\n  build_flag_templates:\n  - \"--platform=linux/amd64\"\n# - image_templates:\n#   - \"modelzai/modelz-autoscaler:v{{ .Version }}-arm64v8\"\n#   use: buildx\n#   goarch: arm64\n#   ids:\n#   - autoscaler\n#   dockerfile: autoscaler/Dockerfile\n#   build_flag_templates:\n#   - \"--platform=linux/arm64/v8\"\n\n- image_templates:\n  - \"modelzai/openmodelz-ingress-operator:v{{ .Version }}-amd64\"\n  use: buildx\n  dockerfile: ingress-operator/Dockerfile\n  ids:\n  - ingress-operator\n  build_flag_templates:\n  - \"--platform=linux/amd64\"\n\ndocker_manifests:\n- name_template: modelzai/openmodelz-modelzetes:v{{ .Version }}\n  image_templates:\n  - modelzai/openmodelz-modelzetes:v{{ .Version }}-amd64\n  # - modelzai/modelzetes:v{{ .Version }}-arm64v8\n- name_template: modelzai/openmodelz-agent:v{{ .Version }}\n  image_templates:\n  - modelzai/openmodelz-agent:v{{ .Version }}-amd64\n  # - modelzai/modelz-agent:v{{ .Version }}-arm64v8\n- name_template: modelzai/openmodelz-autoscaler:v{{ .Version }}\n  image_templates:\n  - modelzai/openmodelz-autoscaler:v{{ .Version }}-amd64\n  # - modelzai/modelz-autoscaler:v{{ .Version }}-arm64v8\n- name_template: modelzai/openmodelz-ingress-operator:v{{ .Version }}\n  image_templates:\n  - modelzai/openmodelz-ingress-operator:v{{ .Version }}-amd64\n  # - modelzai/ingress-operator:v{{ .Version }}-arm64v8\n"
  },
  {
    "path": "LICENSE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "prune autoscaler\nprune ingress-operator\nprune modelzetes\nprune .github\ninclude LICENSE\ninclude README.md\ninclude .goreleaser.yaml\ninclude mdz/Makefile mdz/go.mod mdz/go.sum mdz/LICENSE\ngraft mdz/pkg\ngraft mdz/cmd\nprune mdz/bin\nprune mdz/docs\nprune mdz/examples\ngraft agent/pkg\nprune agent/bin\n"
  },
  {
    "path": "README.md",
    "content": "<div align=\"center\">\n\n# OpenModelZ\n\n</div>\n\n<p align=center>\n<a href=\"https://discord.gg/KqswhpVgdU\"><img alt=\"discord invitation link\" src=\"https://dcbadge.vercel.app/api/server/KqswhpVgdU?style=flat\"></a>\n<a href=\"https://twitter.com/TensorChord\"><img src=\"https://img.shields.io/twitter/follow/tensorchord?style=social\" alt=\"trackgit-views\" /></a>\n<a href=\"https://docs.open.modelz.ai\"><img src=\"https://img.shields.io/badge/docs.open.modelz.ai-455946.svg?style=socail&logo=googlechrome&logoColor=white\" alt=\"docs\" /></a>\n<a href=\"https://github.com/tensorchord/openmodelz#contributors-\"><img alt=\"all-contributors\" src=\"https://img.shields.io/github/all-contributors/tensorchord/openmodelz/main\"></a>\n<a href=\"https://github.com/tensorchord/openmodelz/actions/workflows/CI.yaml\"><img alt=\"CI\" src=\"https://github.com/tensorchord/openmodelz/actions/workflows/CI.yaml/badge.svg\"></a>\n<a href=\"https://badge.fury.io/py/openmodelz\"><img src=\"https://badge.fury.io/py/openmodelz.svg\" alt=\"PyPI version\" height=\"20\"></a>\n<a href='https://coveralls.io/github/tensorchord/openmodelz'><img src='https://coveralls.io/repos/github/tensorchord/openmodelz/badge.svg' alt='Coverage Status' /></a>\n</p>\n\n## What is OpenModelZ?\n\nOpenModelZ ( `mdz` ) is tool to deploy your models to any cluster (GCP, AWS, Lambda labs, your home lab, or even a single machine).\n\nGetting models into production is hard for data scientists and SREs. You need to configure the monitoring, logging, and scaling infrastructure, with the right security and permissions. And then setup the domain, SSL, and load balancer. This can take weeks or months of work even for a single model deployment.\n\nYou can now use mdz deploy to effortlessly deploy your models. OpenModelZ handles all the infrastructure setup for you. Each deployment gets a public subdomain, like `http://jupyter-9pnxd.2.242.22.143.modelz.live`, making it easily accessible.\n\n<p align=center>\n<img src=\"https://user-images.githubusercontent.com/5100735/260630222-46e26e54-50c6-43ba-b3ea-2e64dd276f87.png\" alt=\"OpenModelZ\" width=\"1000\"/>\n</p>\n\n## Benefits\n\nOpenModelZ provides the following features out-of-the-box:\n\n- 📈 **Auto-scaling from 0**: The number of inference servers could be scaled based on the workload. You could start from 0 and scale it up to 10+ replicas easily.\n- 📦 **Support any machine learning framework**: You could deploy any machine learning framework (e.g. [vLLM](https://github.com/vllm-project/vllm)/[triton-inference-server](https://github.com/triton-inference-server/server)/[mosec](https://github.com/mosecorg/mosec) etc.) with a single command. Besides, you could also deploy your own custom inference server.\n- 🔬 **Gradio/Streamlit/Jupyter support**: We provide a robust prototyping environment with support for [Gradio](https://gradio.app), [Streamlit](https://streamlit.io/), [jupyter](https://jupyter.org/) and so on. You could visualize your model's performance and debug it easily in the notebook, or deploy a web app for your model with a single command.\n- 🏃 **Start from a single machine to a cluster of machines**: You could start from a single machine and scale it up to a cluster of machines without any hassle, with a single command `mdz server start`.\n- 🚀 **Public accessible subdomain for each deployment** ( optional ) : We provision a separate subdomain for each deployment without any extra cost and effort, making each deployment easily accessible from the outside.\n\nOpenModelZ is the foundational component of the ModelZ platform available at [modelz.ai](https://modelz.ai).\n\n## How it works\n\nGet a server (could be a cloud VM, a home lab, or even a single machine) and run the `mdz server start` command. OpenModelZ will bootstrap the server for you.\n\n```text\n$ mdz server start\n🚧 Creating the server...\n🚧 Initializing the load balancer...\n🚧 Initializing the GPU resource...\n🚧 Initializing the server...\n🚧 Waiting for the server to be ready...\n🐋 Checking if the server is running...\n🐳 The server is running at http://146.235.213.84.modelz.live\n🎉 You could set the environment variable to get started!\n\nexport MDZ_URL=http://146.235.213.84.modelz.live\n$ export MDZ_URL=http://146.235.213.84.modelz.live\n```\n\nThen you could deploy your model with a single command `mdz deploy` and get the endpoint:\n\n```\n$ mdz deploy --image modelzai/gradio-stable-diffusion:23.03 --name sdw --port 7860 --gpu 1\nInference sd is created\n$ mdz list\n NAME  ENDPOINT                                                 STATUS  INVOCATIONS  REPLICAS \n sdw   http://sdw-qh2n0y28ybqc36oc.146.235.213.84.modelz.live   Ready           174  1/1      \n       http://146.235.213.84.modelz.live/inference/sdw.default                                \n```\n\n## Quick Start 🚀\n\n### Install `mdz`\n\nYou can install OpenModelZ using the following command:\n\n```text copy\npip install openmodelz\n```\n\nYou could verify the installation by running the following command:\n\n```text copy\nmdz\n```\n\nOnce you've installed the `mdz` you can start deploying models and experimenting with them.\n\n### Bootstrap `mdz`\n\nIt's super easy to bootstrap the `mdz` server. You just need to find a server (could be a cloud VM, a home lab, or even a single machine) and run the `mdz server start` command.\n\n> Notice: We may require the root permission to bootstrap the `mdz` server on port 80.\n\n```\n$ mdz server start\n🚧 Creating the server...\n🚧 Initializing the load balancer...\n🚧 Initializing the GPU resource...\n🚧 Initializing the server...\n🚧 Waiting for the server to be ready...\n🐋 Checking if the server is running...\nAgent:\n Version:       v0.0.13\n Build Date:    2023-07-19T09:12:55Z\n Git Commit:    84d0171640453e9272f78a63e621392e93ef6bbb\n Git State:     clean\n Go Version:    go1.19.10\n Compiler:      gc\n Platform:      linux/amd64\n🐳 The server is running at http://192.168.71.93.modelz.live\n🎉 You could set the environment variable to get started!\n\nexport MDZ_URL=http://192.168.71.93.modelz.live\n```\n\nThe internal IP address will be used as the default endpoint of your deployments. You could provide the public IP address of your server to the `mdz server start` command to make it accessible from the outside world.\n\n```bash\n# Provide the public IP as an argument\n$ mdz server start 1.2.3.4\n```\n\nYou could also specify the registry mirror to speed up the image pulling process. Here is an example:\n\n```bash /--mirror-endpoints/\n$ mdz server start --mirror-endpoints https://docker.mirrors.sjtug.sjtu.edu.cn\n```\n\n### Create your first UI-based deployment\n\nOnce you've bootstrapped the `mdz` server, you can start deploying your first applications. We will use jupyter notebook as an example in this tutorial. You could use any docker image as your deployment.\n\n```text\n$ mdz deploy --image jupyter/minimal-notebook:lab-4.0.3 --name jupyter --port 8888 --command \"jupyter notebook --ip='*' --NotebookApp.token='' --NotebookApp.password=''\"\nInference jupyter is created\n$ mdz list\n NAME     ENDPOINT                                                   STATUS  INVOCATIONS  REPLICAS\n jupyter  http://jupyter-9pnxdkeb6jsfqkmq.192.168.71.93.modelz.live  Ready           488  1/1\n          http://192.168.71.93/inference/jupyter.default                                                                         \n```\n\nYou could access the deployment by visiting the endpoint URL. The endpoint will be automatically generated for each deployment with the following format: `<name>-<random-string>.<ip>.modelz.live`.\n\nIt is `http://jupyter-9pnxdkeb6jsfqkmq.192.168.71.93.modelz.live` in this case. The endpoint could be accessed from the outside world as well if you've provided the public IP address of your server to the `mdz server start` command. \n\n![jupyter notebook](./images/jupyter.png)\n\n### Create your first OpenAI compatible API server\n\nYou could also create API-based deployments. We will use [OpenAI compatible API server with Bloomz 560M](https://github.com/tensorchord/modelz-llm#run-the-self-hosted-api-server) as an example in this tutorial.\n\n```text\n$ mdz deploy --image modelzai/llm-bloomz-560m:23.07.4 --name simple-server\nInference simple-server is created\n$ mdz list\n NAME           ENDPOINT                                                         STATUS  INVOCATIONS  REPLICAS \n jupyter        http://jupyter-9pnxdkeb6jsfqkmq.192.168.71.93.modelz.live        Ready           488  1/1      \n                http://192.168.71.93/inference/jupyter.default                                                 \n simple-server  http://simple-server-lagn8m9m8648q6kx.192.168.71.93.modelz.live  Ready             0  1/1      \n                http://192.168.71.93/inference/simple-server.default                                           \n```\n\nYou could use OpenAI python package and the endpoint `http://simple-server-lagn8m9m8648q6kx.192.168.71.93.modelz.live` in this case, to interact with the deployment.\n\n```python\nimport openai\nopenai.api_base=\"http://simple-server-lagn8m9m8648q6kx.192.168.71.93.modelz.live\"\nopenai.api_key=\"any\"\n\n# create a chat completion\nchat_completion = openai.ChatCompletion.create(model=\"bloomz\", messages=[\n    {\"role\": \"user\", \"content\": \"Who are you?\"},\n    {\"role\": \"assistant\", \"content\": \"I am a student\"},\n    {\"role\": \"user\", \"content\": \"What do you learn?\"},\n], max_tokens=100)\n```\n\n### Scale your deployment\n\nYou could scale your deployment by using the `mdz scale` command.\n\n```text /scale/\n$ mdz scale simple-server --replicas 3\n```\n\nThe requests will be load balanced between the replicas of your deployment. \n\nYou could also tell the `mdz` to **autoscale your deployment** based on the inflight requests. Please check out the [Autoscaling](https://docs.open.modelz.ai/deployment/autoscale) documentation for more details.\n\n### Debug your deployment\n\nSometimes you may want to debug your deployment. You could use the `mdz logs` command to get the logs of your deployment.\n\n```text /logs/\n$ mdz logs simple-server\nsimple-server-6756dd67ff-4bf4g: 10.42.0.1 - - [27/Jul/2023 02:32:16] \"GET / HTTP/1.1\" 200 -\nsimple-server-6756dd67ff-4bf4g: 10.42.0.1 - - [27/Jul/2023 02:32:16] \"GET / HTTP/1.1\" 200 -\nsimple-server-6756dd67ff-4bf4g: 10.42.0.1 - - [27/Jul/2023 02:32:17] \"GET / HTTP/1.1\" 200 -\n```\n\nYou could also use the `mdz exec` command to execute a command in the container of your deployment. You do not need to ssh into the server to do that.\n\n```text /exec/\n$ mdz exec simple-server ps\nPID   USER     TIME   COMMAND\n    1 root       0:00 /usr/bin/dumb-init /bin/sh -c python3 -m http.server 80\n    7 root       0:00 /bin/sh -c python3 -m http.server 80\n    8 root       0:00 python3 -m http.server 80\n    9 root       0:00 ps\n```\n\n```text /exec/\n$ mdz exec simple-server -ti bash\nbash-4.4# \n```\n\nOr you could port-forward the deployment to your local machine and debug it locally.\n\n```text /port-forward/\n$ mdz port-forward simple-server 7860\nForwarding inference simple-server to local port 7860\n```\n\n### Add more servers\n\nYou could add more servers to your cluster by using the `mdz server join` command. The `mdz` server will be bootstrapped on the server and join the cluster automatically.\n\n```text /join/\n$ mdz server join <internal ip address of the previous server>\n$ mdz server list\n NAME   PHASE  ALLOCATABLE      CAPACITY        \n node1  Ready  cpu: 16          cpu: 16         \n               mem: 32784748Ki  mem: 32784748Ki \n               gpu: 1           gpu: 1      \n node2  Ready  cpu: 16          cpu: 16         \n               mem: 32784748Ki  mem: 32784748Ki \n               gpu: 1           gpu: 1      \n```\n\n### Label your servers\n\nYou could label your servers to deploy your models to specific servers. For example, you could label your servers with `gpu=true` and deploy your models to servers with GPUs.\n\n```text /--node-labels gpu=true,type=nvidia-a100/\n$ mdz server label node3 gpu=true type=nvidia-a100\n$ mdz deploy ... --node-labels gpu=true,type=nvidia-a100\n```\n\n## Architecture\n\nOpenModelZ is inspired by the [k3s](https://github.com/k3s-io/k3s) and [OpenFaaS](https://github.com/openfaas), but designed specifically for machine learning deployment. We keep the core of the system **simple, and easy to extend**.\n\nYou do not need to read this section if you just want to deploy your models. But if you want to understand how OpenModelZ works, this section is for you.\n\n<p align=center>\n<img src=\"https://user-images.githubusercontent.com/5100735/260627792-2e89f6b8-006c-4807-84a3-29b6785af812.png\" alt=\"OpenModelZ\" width=\"500\"/>\n</p>\n\nOpenModelZ is composed of two components:\n\n- Data Plane: The data plane is responsible for the servers. You could use `mdz server` to manage the servers. The data plane is designed to be **stateless** and **scalable**. You could easily scale the data plane by adding more servers to the cluster. It uses k3s under the hood, to support VMs, bare-metal, and IoT devices (in the future). You could also deploy OpenModelZ on a existing kubernetes cluster.\n- Control Plane: The control plane is responsible for the deployments. It manages the deployments and the underlying resources.\n\nA request will be routed to the inference servers by the load balancer. And the autoscaler will scale the number of inference servers based on the workload. We provide a domain `*.modelz.live` by default, with the help of a [wildcard DNS server](https://github.com/cunnie/sslip.io) to support the public accessible subdomain for each deployment. You could also use your own domain.\n\nYou could check out the [architecture](https://docs.open.modelz.ai/architecture) documentation for more details.\n\n## Roadmap 🗂️\n\nPlease checkout [ROADMAP](https://docs.open.modelz.ai/community).\n\n## Contribute 😊\n\nWe welcome all kinds of contributions from the open-source community, individuals, and partners.\n\n- Join our [discord community](https://discord.gg/KqswhpVgdU)!\n\n## Contributors ✨\n\n<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->\n<!-- prettier-ignore-start -->\n<!-- markdownlint-disable -->\n<table>\n  <tbody>\n    <tr>\n      <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/gaocegege\"><img src=\"https://avatars.githubusercontent.com/u/5100735?v=4?s=70\" width=\"70px;\" alt=\"Ce Gao\"/><br /><sub><b>Ce Gao</b></sub></a><br /><a href=\"https://github.com/tensorchord/openmodelz/commits?author=gaocegege\" title=\"Code\">💻</a> <a href=\"https://github.com/tensorchord/openmodelz/pulls?q=is%3Apr+reviewed-by%3Agaocegege\" title=\"Reviewed Pull Requests\">👀</a> <a href=\"#tutorial-gaocegege\" title=\"Tutorials\">✅</a></td>\n      <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/VoVAllen\"><img src=\"https://avatars.githubusercontent.com/u/8686776?v=4?s=70\" width=\"70px;\" alt=\"Jinjing Zhou\"/><br /><sub><b>Jinjing Zhou</b></sub></a><br /><a href=\"#question-VoVAllen\" title=\"Answering Questions\">💬</a> <a href=\"https://github.com/tensorchord/openmodelz/issues?q=author%3AVoVAllen\" title=\"Bug reports\">🐛</a> <a href=\"#ideas-VoVAllen\" title=\"Ideas, Planning, & Feedback\">🤔</a></td>\n      <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://blog.mapotofu.org/\"><img src=\"https://avatars.githubusercontent.com/u/12974685?v=4?s=70\" width=\"70px;\" alt=\"Keming\"/><br /><sub><b>Keming</b></sub></a><br /><a href=\"https://github.com/tensorchord/openmodelz/commits?author=kemingy\" title=\"Code\">💻</a> <a href=\"#design-kemingy\" title=\"Design\">🎨</a> <a href=\"#infra-kemingy\" title=\"Infrastructure (Hosting, Build-Tools, etc)\">🚇</a></td>\n      <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"http://manjusaka.itscoder.com/\"><img src=\"https://avatars.githubusercontent.com/u/7054676?v=4?s=70\" width=\"70px;\" alt=\"Nadeshiko Manju\"/><br /><sub><b>Nadeshiko Manju</b></sub></a><br /><a href=\"https://github.com/tensorchord/openmodelz/issues?q=author%3AZheaoli\" title=\"Bug reports\">🐛</a> <a href=\"#design-Zheaoli\" title=\"Design\">🎨</a> <a href=\"#ideas-Zheaoli\" title=\"Ideas, Planning, & Feedback\">🤔</a></td>\n      <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/tddschn\"><img src=\"https://avatars.githubusercontent.com/u/45612704?v=4?s=70\" width=\"70px;\" alt=\"Teddy Xinyuan Chen\"/><br /><sub><b>Teddy Xinyuan Chen</b></sub></a><br /><a href=\"https://github.com/tensorchord/openmodelz/commits?author=tddschn\" title=\"Documentation\">📖</a></td>\n      <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://page.codespaper.com\"><img src=\"https://avatars.githubusercontent.com/u/3764335?v=4?s=70\" width=\"70px;\" alt=\"Wei Zhang\"/><br /><sub><b>Wei Zhang</b></sub></a><br /><a href=\"https://github.com/tensorchord/openmodelz/commits?author=zwpaper\" title=\"Code\">💻</a></td>\n      <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://xuanwo.io/\"><img src=\"https://avatars.githubusercontent.com/u/5351546?v=4?s=70\" width=\"70px;\" alt=\"Xuanwo\"/><br /><sub><b>Xuanwo</b></sub></a><br /><a href=\"#content-Xuanwo\" title=\"Content\">🖋</a> <a href=\"#design-Xuanwo\" title=\"Design\">🎨</a> <a href=\"#ideas-Xuanwo\" title=\"Ideas, Planning, & Feedback\">🤔</a></td>\n    </tr>\n    <tr>\n      <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://github.com/cutecutecat\"><img src=\"https://avatars.githubusercontent.com/u/19801166?v=4?s=70\" width=\"70px;\" alt=\"cutecutecat\"/><br /><sub><b>cutecutecat</b></sub></a><br /><a href=\"#ideas-cutecutecat\" title=\"Ideas, Planning, & Feedback\">🤔</a></td>\n      <td align=\"center\" valign=\"top\" width=\"14.28%\"><a href=\"https://xieydd.github.io/\"><img src=\"https://avatars.githubusercontent.com/u/20329697?v=4?s=70\" width=\"70px;\" alt=\"xieydd\"/><br /><sub><b>xieydd</b></sub></a><br /><a href=\"#ideas-xieydd\" title=\"Ideas, Planning, & Feedback\">🤔</a></td>\n    </tr>\n  </tbody>\n</table>\n\n<!-- markdownlint-restore -->\n<!-- prettier-ignore-end -->\n\n<!-- ALL-CONTRIBUTORS-LIST:END -->\n\n## Acknowledgements 🙏\n\n- [K3s](https://github.com/k3s-io/k3s) for the single control-plane binary and process.\n- [OpenFaaS](https://github.com/openfaas) for their work on serverless function services. It laid the foundation for OpenModelZ.\n- [sslip.io](https://github.com/cunnie/sslip.io) for the wildcard DNS service. It makes it possible to access the server from the outside world without any setup.\n"
  },
  {
    "path": "agent/.gitignore",
    "content": "# If you prefer the allow list template instead of the deny list, see community template:\n# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore\n#\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n\n# Test binary, built with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n*.report\n\n# Dependency directories (remove the comment below to include it)\nvendor/\n\n# Go workspace file\ngo.work\n\n.vscode/*\n.idea\n\n# Local History for Visual Studio Code\n.history/\n\n# Built Visual Studio Code Extensions\n*.vsix\n\n__debug_bin\nbin/\ndebug-bin/\n/build.envd\n.ipynb_checkpoints/\ncover.html\n\ncmd/test/\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nwheelhouse/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n.demo/\npkg/docs/swagger.*\n"
  },
  {
    "path": "agent/Dockerfile",
    "content": "FROM ubuntu:22.04\n\nLABEL maintainer=\"modelz-support@tensorchord.ai\"\nRUN apt-get -qq update \\\n    && apt-get -qq install -y --no-install-recommends ca-certificates curl\n\nCOPY agent /usr/bin/agent\nENTRYPOINT [\"/usr/bin/agent\"]\n"
  },
  {
    "path": "agent/Makefile",
    "content": "# Copyright 2022 TensorChord Inc.\n#\n# The old school Makefile, following are required targets. The Makefile is written\n# to allow building multiple binaries. You are free to add more targets or change\n# existing implementations, as long as the semantics are preserved.\n#\n#   make              - default to 'build' target\n#   make lint         - code analysis\n#   make test         - run unit test (or plus integration test)\n#   make build        - alias to build-local target\n#   make build-local  - build local binary targets\n#   make build-linux  - build linux binary targets\n#   make container    - build containers\n#   $ docker login registry -u username -p xxxxx\n#   make push         - push containers\n#   make clean        - clean up targets\n#\n# Not included but recommended targets:\n#   make e2e-test\n#\n# The makefile is also responsible to populate project version information.\n#\n\n#\n# Tweak the variables based on your project.\n#\n\n# This repo's root import path (under GOPATH).\nROOT := github.com/tensorchord/openmodelz/agent\n\n# Target binaries. You can build multiple binaries for a single project.\nTARGETS := agent\n\n# Container image prefix and suffix added to targets.\n# The final built images are:\n#   $[REGISTRY]/$[IMAGE_PREFIX]$[TARGET]$[IMAGE_SUFFIX]:$[VERSION]\n# $[REGISTRY] is an item from $[REGISTRIES], $[TARGET] is an item from $[TARGETS].\nIMAGE_PREFIX ?= $(strip )\nIMAGE_SUFFIX ?= $(strip )\n\n# Container registries.\nREGISTRY ?= ghcr.io/tensorchord\n\n# Container registry for base images.\nBASE_REGISTRY ?= docker.io\nBASE_REGISTRY_USER ?= modelzai\n\n# Disable CGO by default.\nCGO_ENABLED ?= 0\n\n#\n# These variables should not need tweaking.\n#\n\n# It's necessary to set this because some environments don't link sh -> bash.\nexport SHELL := bash\n\n# It's necessary to set the errexit flags for the bash shell.\nexport SHELLOPTS := errexit\n\nPACKAGE_NAME := github.com/tensorchord/openmodelz/agent\nGOLANG_CROSS_VERSION  ?= v1.17.6\n\n# Project main package location (can be multiple ones).\nCMD_DIR := ./cmd\n\n# Project output directory.\nOUTPUT_DIR := ./bin\nDEBUG_DIR := ./debug-bin\n\n# Build directory.\nBUILD_DIR := ./build\n\n# Current version of the project.\nVERSION ?= $(shell git describe --match 'v[0-9]*' --always --tags --abbrev=0)\nBUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\nGIT_COMMIT=$(shell git rev-parse HEAD)\nGIT_TAG=$(shell if [ -z \"`git status --porcelain`\" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)\nGIT_TREE_STATE=$(shell if [ -z \"`git status --porcelain`\" ]; then echo \"clean\" ; else echo \"dirty\"; fi)\nGITSHA ?= $(shell git rev-parse --short HEAD)\n\n# Track code version with Docker Label.\nDOCKER_LABELS ?= git-describe=\"$(shell date -u +v%Y%m%d)-$(shell git describe --tags --always --dirty)\"\n\n# Golang standard bin directory.\nGOPATH ?= $(shell go env GOPATH)\nGOROOT ?= $(shell go env GOROOT)\nBIN_DIR := $(GOPATH)/bin\nGOLANGCI_LINT := $(BIN_DIR)/golangci-lint\n\n# check if we need embed the dashboard\nDASHBOARD_BUILD ?= debug\n\n# Default golang flags used in build and test\n# -mod=vendor: force go to use the vendor files instead of using the `$GOPATH/pkg/mod`\n# -p: the number of programs that can be run in parallel\n# -count: run each test and benchmark 1 times. Set this flag to disable test cache\nexport GOFLAGS ?= -count=1\n\n#\n# Define all targets. At least the following commands are required:\n#\n\n# All targets.\n.PHONY: help lint test build container push addlicense debug debug-local build-local generate clean test-local addlicense-install release build-image\n\n.DEFAULT_GOAL:=build\n\nbuild: build-local  ## Build the release version\n\nhelp:  ## Display this help\n\t@awk 'BEGIN {FS = \":.*##\"; printf \"\\nUsage:\\n  make \\033[36m<target>\\033[0m\\n\"} /^[a-zA-Z0-9_-]+:.*?##/ { printf \"  \\033[36m%-15s\\033[0m %s\\n\", $$1, $$2 } /^##@/ { printf \"\\n\\033[1m%s\\033[0m\\n\", substr($$0, 5) } ' $(MAKEFILE_LIST)\n\ndebug: debug-local  ## Build the debug version\n\n# more info about `GOGC` env: https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint\nlint: $(GOLANGCI_LINT)  ## Lint GO code\n\t@$(GOLANGCI_LINT) run\n\n$(GOLANGCI_LINT):\n\tcurl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$(go env GOPATH)/bin\n\nmockgen-install:\n\tgo install github.com/golang/mock/mockgen@v1.6.0\n\naddlicense-install:\n\tgo install github.com/google/addlicense@latest\n\n# https://github.com/swaggo/swag/pull/1322, we should use master instead of latest for now.\nswag-install:\n\tgo install github.com/swaggo/swag/cmd/swag@v1.8.7\n\nbuild-local:\n\t@for target in $(TARGETS); do                                                      \\\n\t  CGO_ENABLED=$(CGO_ENABLED) go build -tags $(DASHBOARD_BUILD)  -trimpath -v -o $(OUTPUT_DIR)/$${target}     \\\n\t    -ldflags \"-s -w -X $(ROOT)/pkg/version.version=$(VERSION) -X $(ROOT)/pkg/version.buildDate=$(BUILD_DATE) -X $(ROOT)/pkg/version.gitCommit=$(GIT_COMMIT) -X $(ROOT)/pkg/version.gitTreeState=$(GIT_TREE_STATE)\"                     \\\n\t    $(CMD_DIR)/$${target};                                                         \\\n\tdone\n\n# It is used by vscode to attach into the process.\ndebug-local:\n\t@for target in $(TARGETS); do                                                      \\\n\t  CGO_ENABLED=$(CGO_ENABLED) go build -tags $(DASHBOARD_BUILD) -trimpath                                    \\\n\t  \t-v -o $(DEBUG_DIR)/$${target}                                                  \\\n\t  \t-gcflags='all=-N -l'                                                           \\\n\t    $(CMD_DIR)/$${target};                                                         \\\n\tdone\n\naddlicense: addlicense-install  ## Add license to GO code files\n\taddlicense -l mpl -c \"TensorChord Inc.\" $$(find . -type f -name '*.go' | grep -v pkg/docs/docs.go)\n\ntest-local:\n\t@go test -tags=$(DASHBOARD_BUILD) -v -race -coverprofile=coverage.out ./...\n\ntest:  ## Run the tests\n\t@go test -tags=$(DASHBOARD_BUILD) -race -coverprofile=coverage.out ./...\n\t@go tool cover -func coverage.out | tail -n 1 | awk '{ print \"Total coverage: \" $$3 }'\n\nclean:  ## Clean the outputs and artifacts\n\t@-rm -vrf ${OUTPUT_DIR}\n\t@-rm -vrf ${DEBUG_DIR}\n\t@-rm -vrf build dist .eggs *.egg-info\n\nfmt: swag-install ## Run go fmt against code.\n\tgo fmt ./...\n\tswag fmt\n\nvet: ## Run go vet against code.\n\tgo vet ./...\n\nswag: swag-install\n\tswag init -g ./cmd/agent/main.go --parseDependency --output ./pkg/docs \n\nbuild-image: build-local\n\tdocker build -t ${BASE_REGISTRY}/${BASE_REGISTRY_USER}/openmodelz-agent:dev -f Dockerfile ./bin\n\tdocker push ${BASE_REGISTRY}/${BASE_REGISTRY_USER}/openmodelz-agent:dev\n\nrelease:\n\t@if [ ! -f \".release-env\" ]; then \\\n\t\techo \"\\033[91m.release-env is required for release\\033[0m\";\\\n\t\texit 1;\\\n\tfi\n\tdocker run \\\n\t\t--rm \\\n\t\t--privileged \\\n\t\t-e CGO_ENABLED=1 \\\n\t\t--env-file .release-env \\\n\t\t-v /var/run/docker.sock:/var/run/docker.sock \\\n\t\t-v `pwd`:/go/src/$(PACKAGE_NAME) \\\n\t\t-v `pwd`/sysroot:/sysroot \\\n\t\t-w /go/src/$(PACKAGE_NAME) \\\n\t\tgoreleaser/goreleaser-cross:${GOLANG_CROSS_VERSION} \\\n\t\trelease --rm-dist\n\ngenerate: mockgen-install swag\n\t@mockgen -source pkg/runtime/runtime.go -destination pkg/runtime/mock/mock.go -package mock\n"
  },
  {
    "path": "agent/README.md",
    "content": "<div align=\"center\">\n\n# OpenModelZ Agent\n\n</div>\n\n<p align=center>\n<a href=\"https://discord.gg/KqswhpVgdU\"><img alt=\"discord invitation link\" src=\"https://dcbadge.vercel.app/api/server/KqswhpVgdU?style=flat\"></a>\n<a href=\"https://twitter.com/TensorChord\"><img src=\"https://img.shields.io/twitter/follow/tensorchord?style=social\" alt=\"trackgit-views\" /></a>\n</p>\n\n## Installation\n\n```\npip install openmodelz\n```\n\n## Architecture\n\nPlease check out [Architecture](https://docs.open.modelz.ai/architecture) documentation.\n"
  },
  {
    "path": "agent/api/types/build.go",
    "content": "package types\n\ntype Build struct {\n\tSpec   BuildSpec   `json:\"spec\"`\n\tStatus BuildStatus `json:\"status,omitempty\"`\n}\n\ntype BuildSpec struct {\n\tName                string `json:\"name,omitempty\"`\n\tNamespace           string `json:\"namespace,omitempty\"`\n\tGitRepositorySource `json:\",inline,omitempty\"`\n\tDockerSource        `json:\",inline,omitempty\"`\n\tBuildTarget         BuildTarget `json:\",inline,omitempty\"`\n}\n\ntype DockerSource struct {\n\tArtifactImage    string `json:\"image,omitempty\"`\n\tArtifactImageTag string `json:\"image_tag,omitempty\"`\n\tAuthN            AuthN  `json:\"authn,omitempty\"`\n\tSecretID         string `json:\"secret_id,omitempty\"`\n}\n\ntype BuildTarget struct {\n\t// directory is the target directory name.\n\t// Must not contain or start with '..'.  If '.' is supplied, the volume directory will be the\n\t// git repository.  Otherwise, if specified, the volume will contain the git repository in\n\t// the subdirectory with the given name.\n\t// +optional\n\tDirectory string `json:\"directory,omitempty\"`\n\n\tBuilder          BuilderType `json:\"builder,omitempty\"`\n\tArtifactImage    string      `json:\"image,omitempty\"`\n\tArtifactImageTag string      `json:\"image_tag,omitempty\"`\n\tDigest           string      `json:\"digest,omitempty\"`\n\n\tDuration      string `json:\"duration,omitempty\"`\n\tRegistry      string `json:\"registry,omitempty\"`\n\tRegistryToken string `json:\"registry_token,omitempty\"`\n}\n\ntype AuthN struct {\n\tUsername string `json:\"username,omitempty\"`\n\tPassword string `json:\"password,omitempty\"`\n\tToken    string `json:\"token,omitempty\"`\n}\n\ntype BuildStatus struct {\n\tPhase BuildPhase `json:\"phase,omitempty\"`\n}\n\ntype BuildPhase string\n\nconst (\n\tBuildPhasePending   BuildPhase = \"Pending\"\n\tBuildPhaseRunning   BuildPhase = \"Running\"\n\tBuildPhaseSucceeded BuildPhase = \"Succeeded\"\n\tBuildPhaseFailed    BuildPhase = \"Failed\"\n)\n\ntype BuilderType string\n\nconst (\n\tBuilderTypeDockerfile BuilderType = \"Dockerfile\"\n\tBuilderTypeENVD       BuilderType = \"envd\"\n\tBuilderTypeImage      BuilderType = \"image\"\n)\n\ntype GitRepositorySource struct {\n\t// repository is the URL\n\tRepository string `json:\"repository\"`\n\tBranch     string `json:\"branch,omitempty\"`\n\t// revision is the commit hash for the specified revision.\n\t// +optional\n\tRevision string `json:\"revision,omitempty\"`\n}\n"
  },
  {
    "path": "agent/api/types/error.go",
    "content": "package types\n\ntype ErrorResponse struct {\n\tMessage string `json:\"message\"`\n}\n"
  },
  {
    "path": "agent/api/types/event.go",
    "content": "package types\n\nimport \"time\"\n\nconst (\n\tDeploymentCreateEvent     = \"deployment-create\"\n\tDeploymentUpdateEvent     = \"deployment-update\"\n\tDeploymentDeleteEvent     = \"deployment-delete\"\n\tDeploymentScaleUpEvent    = \"deployment-scale-up\"\n\tDeploymentScaleDownEvent  = \"deployment-scale-down\"\n\tDeploymentScaleBlockEvent = \"deployment-scale-block\"\n\tPodCreateEvent            = \"pod-create\"\n\tPodReadyEvent             = \"pod-ready\"\n\tPodTimeoutEvent           = \"pod-timeout\"\n)\n\ntype DeploymentEvent struct {\n\tID           string    `json:\"id\"`\n\tCreatedAt    time.Time `json:\"created_at\"`\n\tUserID       string    `json:\"user_id\"`\n\tDeploymentID string    `json:\"deployment_id\"`\n\tEventType    string    `json:\"event_type\"`\n\tMessage      string    `json:\"message\"`\n}\n"
  },
  {
    "path": "agent/api/types/inference_deployment.go",
    "content": "package types\n\n// InferenceDeployment represents a request to create or update a Model.\ntype InferenceDeployment struct {\n\tSpec   InferenceDeploymentSpec   `json:\"spec\"`\n\tStatus InferenceDeploymentStatus `json:\"status,omitempty\"`\n}\n\ntype InferenceDeploymentSpec struct {\n\t// Name is the name of the inference.\n\tName string `json:\"name\"`\n\n\t// Namespace for the inference.\n\tNamespace string `json:\"namespace,omitempty\"`\n\n\t// Scaling is the scaling configuration for the inference.\n\tScaling *ScalingConfig `json:\"scaling,omitempty\"`\n\n\t// Framework is the inference framework.\n\tFramework Framework `json:\"framework,omitempty\"`\n\n\t// Image is a fully-qualified container image\n\tImage string `json:\"image\"`\n\n\t// Port is the port exposed by the inference.\n\tPort *int32 `json:\"port,omitempty\"`\n\n\t// HTTPProbePath is the path of the http probe.\n\tHTTPProbePath *string `json:\"http_probe_path,omitempty\"`\n\n\t// Command to run when starting the\n\tCommand *string `json:\"command,omitempty\"`\n\n\t// EnvVars can be provided to set environment variables for the inference runtime.\n\tEnvVars map[string]string `json:\"envVars,omitempty\"`\n\n\t// Constraints are the constraints for the inference.\n\tConstraints []string `json:\"constraints,omitempty\"`\n\n\t// Secrets list of secrets to be made available to inference.\n\tSecrets []string `json:\"secrets,omitempty\"`\n\n\t// Labels are key-value pairs that may be attached to the inference.\n\tLabels map[string]string `json:\"labels,omitempty\"`\n\n\t// Annotations are key-value pairs that may be attached to the inference.\n\tAnnotations map[string]string `json:\"annotations,omitempty\"`\n\n\t// Resources are the compute resource requirements.\n\tResources *ResourceRequirements `json:\"resources,omitempty\"`\n}\n\n// Framework is the inference framework. It is only used to set the default port\n// and command. For example, if the framework is \"gradio\", the default port is\n// 7860 and the default command is \"python app.py\". You could override these\n// defaults by setting the port and command fields and framework to `other`.\ntype Framework string\n\nconst (\n\tFrameworkGradio    Framework = \"gradio\"\n\tFrameworkStreamlit Framework = \"streamlit\"\n\tFrameworkMosec     Framework = \"mosec\"\n\tFrameworkOther     Framework = \"other\"\n)\n\ntype ScalingConfig struct {\n\t// MinReplicas is the lower limit for the number of replicas to which the\n\t// autoscaler can scale down. It defaults to 0.\n\tMinReplicas *int32 `json:\"min_replicas,omitempty\"`\n\t// MaxReplicas is the upper limit for the number of replicas to which the\n\t// autoscaler can scale up. It cannot be less that minReplicas. It defaults\n\t// to 1.\n\tMaxReplicas *int32 `json:\"max_replicas,omitempty\"`\n\t// TargetLoad is the target load. In capacity mode, it is the expected number of the inflight requests per replica.\n\tTargetLoad *int32 `json:\"target_load,omitempty\"`\n\t// Type is the scaling type. It can be either \"capacity\" or \"rps\". Default is \"capacity\".\n\tType *ScalingType `json:\"type,omitempty\"`\n\t// ZeroDuration is the duration (in seconds) of zero load before scaling down to zero. Default is 5 minutes.\n\tZeroDuration *int32 `json:\"zero_duration,omitempty\"`\n\t// StartupDuration is the duration (in seconds) of startup time.\n\tStartupDuration *int32 `json:\"startup_duration,omitempty\"`\n}\n\ntype ScalingType string\n\nconst (\n\tScalingTypeCapacity ScalingType = \"capacity\"\n\tScalingTypeRPS      ScalingType = \"rps\"\n)\n\n// ResourceRequirements describes the compute resource requirements.\ntype ResourceRequirements struct {\n\t// Limits describes the maximum amount of compute resources allowed.\n\t// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\n\t// +optional\n\tLimits ResourceList `json:\"limits,omitempty\" protobuf:\"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName\"`\n\t// Requests describes the minimum amount of compute resources required.\n\t// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\n\t// otherwise to an implementation-defined value.\n\t// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\n\t// +optional\n\tRequests ResourceList `json:\"requests,omitempty\" protobuf:\"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName\"`\n}\n\n// ResourceList is a set of (resource name, quantity) pairs.\ntype ResourceList map[ResourceName]Quantity\n\ntype ResourceName string\n\nconst (\n\tResourceCPU    ResourceName = \"cpu\"\n\tResourceMemory ResourceName = \"memory\"\n\tResourceGPU    ResourceName = \"gpu\"\n)\n\ntype Quantity string\n\nconst (\n\tRuntimeClassNvidia string = \"nvidia\"\n)\n\ntype ImageCache struct {\n\t// Name is the name of the inference.\n\tName           string `json:\"name\"`\n\tNamespace      string `json:\"namespace\"`\n\tImage          string `json:\"image\"`\n\tForceFullCache bool   `json:\"force_full_cache\"`\n\tNodeSelector   string `json:\"node_selector\"`\n}\n"
  },
  {
    "path": "agent/api/types/inference_deployment_instance.go",
    "content": "package types\n\nimport \"time\"\n\ntype InferenceDeploymentInstance struct {\n\tSpec   InferenceDeploymentInstanceSpec   `json:\"spec,omitempty\"`\n\tStatus InferenceDeploymentInstanceStatus `json:\"status,omitempty\"`\n}\n\ntype InferenceDeploymentInstanceSpec struct {\n\tNamespace      string `json:\"namespace,omitempty\"`\n\tName           string `json:\"name,omitempty\"`\n\tOwnerReference string `json:\"owner_reference,omitempty\"`\n}\n\ntype InferenceDeploymentInstanceStatus struct {\n\tPhase     InstancePhase `json:\"phase,omitempty\"`\n\tStartTime time.Time     `json:\"createdAt,omitempty\"`\n\tReason    string        `json:\"reason,omitempty\"`\n\tMessage   string        `json:\"message,omitempty\"`\n}\n\ntype InstancePhase string\n\nconst (\n\tInstancePhaseScheduling   InstancePhase = \"Scheduling\"\n\tInstancePhasePending      InstancePhase = \"Pending\"\n\tInstancePhaseRunning      InstancePhase = \"Running\"\n\tInstancePhaseFailed       InstancePhase = \"Failed\"\n\tInstancePhaseSucceeded    InstancePhase = \"Succeeded\"\n\tInstancePhaseUnknown      InstancePhase = \"Unknown\"\n\tInstancePhaseCreating     InstancePhase = \"Creating\"\n\tInstancePhaseInitializing InstancePhase = \"Initializing\"\n)\n"
  },
  {
    "path": "agent/api/types/inference_status.go",
    "content": "package types\n\nimport \"time\"\n\n// InferenceDeploymentStatus exported for system/inferences endpoint\ntype InferenceDeploymentStatus struct {\n\tPhase Phase `json:\"phase,omitempty\"`\n\n\t// InvocationCount count of invocations\n\tInvocationCount int32 `json:\"invocationCount,omitempty\"`\n\n\t// Replicas desired within the cluster\n\tReplicas int32 `json:\"replicas,omitempty\"`\n\n\t// AvailableReplicas is the count of replicas ready to receive\n\t// invocations as reported by the faas-provider\n\tAvailableReplicas int32 `json:\"availableReplicas,omitempty\"`\n\n\t// CreatedAt is the time read back from the faas backend's\n\t// data store for when the function or its container was created.\n\tCreatedAt *time.Time `json:\"createdAt,omitempty\"`\n\n\t// Usage represents CPU and RAM used by all of the\n\t// functions' replicas. Divide by AvailableReplicas for an\n\t// average value per replica.\n\tUsage *InferenceUsage `json:\"usage,omitempty\"`\n\n\t// EventMessage record human readable message indicating details about the event of deployment.\n\tEventMessage string `json:\"eventMessage,omitempty\"`\n}\n\ntype Phase string\n\nconst (\n\t// PhaseReady is the state of an inference when it is ready to\n\t// receive invocations.\n\tPhaseReady Phase = \"Ready\"\n\n\t// PhaseScaling is the state of an inference when scales.\n\tPhaseScaling Phase = \"Scaling\"\n\n\tPhaseTerminating Phase = \"Terminating\"\n\n\tPhaseNoReplicas Phase = \"NoReplicas\"\n\n\tPhaseNotReady Phase = \"NotReady\"\n\n\tPhaseBuilding Phase = \"Building\"\n\n\tPhaseOptimizing Phase = \"Optimizing\"\n)\n\n// InferenceUsage represents CPU and RAM used by all of the\n// functions' replicas.\n//\n// CPU is measured in seconds consumed since the last measurement\n// RAM is measured in total bytes consumed\ntype InferenceUsage struct {\n\t// CPU is the increase in CPU usage since the last measurement\n\t// equivalent to Kubernetes' concept of millicores.\n\tCPU float64 `json:\"cpu,omitempty\"`\n\n\t//TotalMemoryBytes is the total memory usage in bytes.\n\tTotalMemoryBytes float64 `json:\"totalMemoryBytes,omitempty\"`\n\n\tGPU float64 `json:\"gpu,omitempty\"`\n}\n"
  },
  {
    "path": "agent/api/types/info.go",
    "content": "package types\n\n// ProviderInfo provides information about the configured provider\ntype ProviderInfo struct {\n\tName          string       `json:\"provider\"`\n\tVersion       *VersionInfo `json:\"version\"`\n\tOrchestration string       `json:\"orchestration\"`\n}\n\n// VersionInfo provides the commit message, sha and release version number\ntype VersionInfo struct {\n\tVersion      string `json:\"version,omitempty\"`\n\tBuildDate    string `json:\"build_date,omitempty\"`\n\tGitCommit    string `json:\"git_commit,omitempty\"`\n\tGitTag       string `json:\"git_tag,omitempty\"`\n\tGitTreeState string `json:\"git_tree_state,omitempty\"`\n\tGoVersion    string `json:\"go_version,omitempty\"`\n\tCompiler     string `json:\"compiler,omitempty\"`\n\tPlatform     string `json:\"platform,omitempty\"`\n}\n"
  },
  {
    "path": "agent/api/types/log.go",
    "content": "package types\n\nimport \"time\"\n\ntype LogRequest struct {\n\tNamespace string `form:\"namespace\" json:\"namespace,omitempty\"`\n\tName      string `form:\"name\" json:\"name,omitempty\"`\n\t// Instance is the optional pod name, that allows you to request logs from a specific instance\n\tInstance string `form:\"instance\" json:\"instance,omitempty\"`\n\t// Follow is allows the user to request a stream of logs until the timeout\n\tFollow bool `form:\"follow\" json:\"follow,omitempty\"`\n\t// Tail sets the maximum number of log messages to return, <=0 means unlimited\n\tTail  int    `form:\"tail\" json:\"tail,omitempty\"`\n\tSince string `form:\"since\" json:\"since,omitempty\"`\n\t// End is the end time of the log stream\n\tEnd string `form:\"end\" json:\"end,omitempty\"`\n}\n\n// Message is a specific log message from a function container log stream\ntype Message struct {\n\t// Name is the function name\n\tName      string `json:\"name\"`\n\tNamespace string `json:\"namespace\"`\n\t// instance is the name/id of the specific function instance\n\tInstance string `json:\"instance\"`\n\t// Timestamp is the timestamp of when the log message was recorded\n\tTimestamp time.Time `json:\"timestamp\"`\n\t// Text is the raw log message content\n\tText string `json:\"text\"`\n}\n"
  },
  {
    "path": "agent/api/types/modelz_cloud.go",
    "content": "package types\n\nimport \"time\"\n\nconst (\n\tClusterStatusInit    = \"init\"\n\tClusterStatusActive  = \"active\"\n\tClusterStatusUnknown = \"unknown\"\n)\n\nconst (\n\tDailEndPointSuffix = \"/api/v1/clusteragent/connect\"\n)\n\ntype AgentToken struct {\n\tUID         string `json:\"uid,omitempty\"`\n\tToken       string `json:\"token,omitempty\"`\n\tClusterName string `json:\"cluster_name,omitempty\"`\n}\n\ntype ManagedCluster struct {\n\tName              string    `json:\"name,omitempty\"`\n\tID                string    `json:\"id,omitempty\"`\n\tTokenID           string    `json:\"token_id,omitempty\"`\n\tVersion           string    `json:\"version,omitempty\"`\n\tKubernetesVersion string    `json:\"kubernetes_version,omitempty\"`\n\tPlatform          string    `json:\"platform,omitempty\"`\n\tStatus            string    `json:\"status,omitempty\"`\n\tCreatedAt         time.Time `json:\"created_at,omitempty\"`\n\tUpdatedAt         time.Time `json:\"updated_at,omitempty\"`\n\tRegion            string    `json:\"region,omitempty\"`\n\tServerResources   string    `json:\"server_resources,omitempty\"`\n\tPrometheusURL     string    `json:\"prometheus_url,omitempty\"`\n}\n\ntype APIKeyMap map[string]string\n\ntype NamespaceList struct {\n\tItems []string `json:\"items,omitempty\"`\n}\n"
  },
  {
    "path": "agent/api/types/namespace.go",
    "content": "package types\n\ntype NamespaceRequest struct {\n\tName string `json:\"name,omitempty\"`\n}\n"
  },
  {
    "path": "agent/api/types/queue.go",
    "content": "package types\n\nimport (\n\t\"net/http\"\n\t\"net/url\"\n)\n\n// Request for asynchronous processing\ntype QueueRequest struct {\n\t// Header from HTTP request\n\tHeader http.Header\n\n\t// Host from HTTP request\n\tHost string\n\n\t// Body from HTTP request to use for invocation\n\tBody []byte\n\n\t// Method from HTTP request\n\tMethod string\n\n\t// Path from HTTP request\n\tPath string\n\n\t// QueryString from HTTP request\n\tQueryString string\n\n\t// Function name to invoke\n\tFunction string\n\n\t// QueueName to publish the request to, leave blank\n\t// for default.\n\tQueueName string\n\n\t// Used by queue worker to submit a result\n\tCallbackURL *url.URL `json:\"CallbackUrl\"`\n}\n\n// RequestQueuer can public a request to be executed asynchronously\ntype RequestQueuer interface {\n\tQueue(req *QueueRequest) error\n}\n"
  },
  {
    "path": "agent/api/types/requests.go",
    "content": "// Copyright (c) Alex Ellis 2017. All rights reserved.\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage types\n\n// ScaleServiceRequest scales the service to the requested replcia count.\ntype ScaleServiceRequest struct {\n\tServiceName  string `json:\"serviceName\"`\n\tReplicas     uint64 `json:\"replicas\"`\n\tEventMessage string `json:\"eventMessage\"`\n\tAttempt      int    `json:\"attempt\"`\n}\n\n// DeleteFunctionRequest delete a deployed function\ntype DeleteFunctionRequest struct {\n\tFunctionName string `json:\"functionName\"`\n}\n"
  },
  {
    "path": "agent/api/types/secret.go",
    "content": "package types\n\n// Secret for underlying orchestrator\ntype Secret struct {\n\t// Name of the secret\n\tName string `json:\"name\"`\n\n\t// Namespace if applicable for the secret\n\tNamespace string `json:\"namespace,omitempty\"`\n\n\t// Value is a string representing the string's value\n\tValue string `json:\"value,omitempty\"`\n\n\t// RawValue can be used to provide binary data when\n\t// Value is not set\n\tRawValue []byte `json:\"rawValue,omitempty\"`\n}\n"
  },
  {
    "path": "agent/api/types/server.go",
    "content": "package types\n\ntype Server struct {\n\tSpec   ServerSpec   `json:\"spec,omitempty\"`\n\tStatus ServerStatus `json:\"status,omitempty\"`\n}\n\ntype ServerSpec struct {\n\tName   string            `json:\"name,omitempty\"`\n\tLabels map[string]string `json:\"labels,omitempty\"`\n}\n\ntype ServerStatus struct {\n\tAllocatable ResourceList   `json:\"allocatable,omitempty\"`\n\tCapacity    ResourceList   `json:\"capacity,omitempty\"`\n\tPhase       string         `json:\"phase,omitempty\"`\n\tSystem      NodeSystemInfo `json:\"system,omitempty\"`\n}\n\n// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.\ntype NodeSystemInfo struct {\n\t// MachineID reported by the node. For unique machine identification\n\t// in the cluster this field is preferred. Learn more from man(5)\n\t// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html\n\tMachineID string `json:\"machineID\" protobuf:\"bytes,1,opt,name=machineID\"`\n\t// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).\n\tKernelVersion string `json:\"kernelVersion\" protobuf:\"bytes,4,opt,name=kernelVersion\"`\n\t// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).\n\tOSImage string `json:\"osImage\" protobuf:\"bytes,5,opt,name=osImage\"`\n\t// The Operating System reported by the node\n\tOperatingSystem string `json:\"operatingSystem\" protobuf:\"bytes,9,opt,name=operatingSystem\"`\n\t// The Architecture reported by the node\n\tArchitecture string `json:\"architecture\" protobuf:\"bytes,10,opt,name=architecture\"`\n\t// The Resource Type reported by the node\n\tResourceType string `json:\"resourceType\" protobuf:\"bytes,11,opt,name=resourceType\"`\n}\n"
  },
  {
    "path": "agent/client/build.go",
    "content": "package client\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/url\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nfunc (cli *Client) BuildCreate(ctx context.Context, namespace string, build types.Build) error {\n\tbuild.Spec.Namespace = namespace\n\tlogrus.Debugf(\"create new build: %s\", build)\n\n\tval := url.Values{}\n\tresp, err := cli.post(ctx, gatewayBuildControlPlanePath, val, build, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn wrapResponseError(err, resp, \"build\", build.Spec.Name)\n\t}\n\n\treturn nil\n}\n\nfunc (cli *Client) BuildGet(ctx context.Context, namespace, name string) (types.Build, error) {\n\tval := url.Values{}\n\tval.Add(\"namespace\", namespace)\n\tbuild := types.Build{}\n\tresp, err := cli.get(\n\t\tctx, fmt.Sprintf(gatewayBuildInstanceControlPlanePath, name), val, nil)\n\tdefer ensureReaderClosed(resp)\n\tif err != nil {\n\t\tlogrus.Infof(\"failed to query build.get: %s\", err)\n\t\treturn build, wrapResponseError(err, resp, \"build\", name)\n\t}\n\n\terr = json.NewDecoder(resp.body).Decode(&build)\n\tif err != nil {\n\t\tlogrus.Infof(\"failed to decode build: %s\", err)\n\t\treturn build, wrapResponseError(err, resp, \"build\", name)\n\t}\n\treturn build, nil\n}\n\nfunc (cli *Client) BuildList(ctx context.Context, namespace string) ([]types.Build, error) {\n\tval := url.Values{}\n\tval.Add(\"namespace\", namespace)\n\tresp, err := cli.get(ctx, gatewayBuildControlPlanePath, val, nil)\n\tdefer ensureReaderClosed(resp)\n\tif err != nil {\n\t\tlogrus.Infof(\"failed to query build.list: %s\", err)\n\t\treturn nil, wrapResponseError(err, resp, \"build\", namespace)\n\t}\n\n\tvar builds []types.Build\n\terr = json.NewDecoder(resp.body).Decode(&builds)\n\tif err != nil {\n\t\tlogrus.Infof(\"failed to decode builds: %s\", err)\n\t\treturn nil, wrapResponseError(err, resp, \"build\", namespace)\n\t}\n\treturn builds, nil\n}\n"
  },
  {
    "path": "agent/client/client.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/docker/go-connections/sockets\"\n)\n\n// Refer to github.com/docker/docker/client\n\n// ErrRedirect is the error returned by checkRedirect when the request is non-GET.\nvar ErrRedirect = errors.New(\"unexpected redirect in response\")\n\n// Client is the API client that performs all operations\n// against a docker server.\ntype Client struct {\n\t// scheme sets the scheme for the client\n\tscheme string\n\t// host holds the server address to connect to\n\thost string\n\t// proto holds the client protocol i.e. unix.\n\tproto string\n\t// addr holds the client address.\n\taddr string\n\t// basePath holds the path to prepend to the requests.\n\tbasePath string\n\t// client used to send and receive http requests.\n\tclient *http.Client\n\t// version of the server to talk to.\n\tversion string\n\t// custom http headers configured by users.\n\tcustomHTTPHeaders map[string]string\n\t// manualOverride is set to true when the version was set by users.\n\tmanualOverride bool\n\n\t// negotiateVersion indicates if the client should automatically negotiate\n\t// the API version to use when making requests. API version negotiation is\n\t// performed on the first request, after which negotiated is set to \"true\"\n\t// so that subsequent requests do not re-negotiate.\n\tnegotiateVersion bool\n\n\t// negotiated indicates that API version negotiation took place\n\tnegotiated bool\n}\n\n// NewClientWithOpts initializes a new API client with a default HTTPClient, and\n// default API host and version. It also initializes the custom HTTP headers to\n// add to each request.\n//\n// It takes an optional list of Opt functional arguments, which are applied in\n// the order they're provided, which allows modifying the defaults when creating\n// the client. For example, the following initializes a client that configures\n// itself with values from environment variables (client.FromEnv), and has\n// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()).\n//\n//\tcli, err := client.NewClientWithOpts(\n//\t\tclient.FromEnv,\n//\t\tclient.WithAPIVersionNegotiation(),\n//\t)\nfunc NewClientWithOpts(ops ...Opt) (*Client, error) {\n\tclient, err := defaultHTTPClient(DefaultModelzGatewayHost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &Client{\n\t\thost:     DefaultModelzGatewayHost,\n\t\tversion:  \"\",\n\t\tclient:   client,\n\t\tproto:    defaultProto,\n\t\taddr:     defaultAddr,\n\t\tbasePath: apiBasePath,\n\t}\n\n\tfor _, op := range ops {\n\t\tif err := op(c); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif c.scheme == \"\" {\n\t\tc.scheme = \"http\"\n\n\t\ttlsConfig := resolveTLSConfig(c.client.Transport)\n\t\tif tlsConfig != nil {\n\t\t\t// TODO(stevvooe): This isn't really the right way to write clients in Go.\n\t\t\t// `NewClient` should probably only take an `*http.Client` and work from there.\n\t\t\t// Unfortunately, the model of having a host-ish/url-thingy as the connection\n\t\t\t// string has us confusing protocol and transport layers. We continue doing\n\t\t\t// this to avoid breaking existing clients but this should be addressed.\n\t\t\tc.scheme = \"https\"\n\t\t}\n\t}\n\n\treturn c, nil\n}\n\nfunc defaultHTTPClient(host string) (*http.Client, error) {\n\thostURL, err := ParseHostURL(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttransport := &http.Transport{}\n\t_ = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)\n\treturn &http.Client{\n\t\tTransport:     transport,\n\t\tCheckRedirect: CheckRedirectKeepHeader,\n\t}, nil\n}\n\n// CheckRedirect specifies the policy for dealing with redirect responses:\n// If the request is non-GET return ErrRedirect, otherwise use the last response.\n//\n// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308)\n// in the client. The envd client (and by extension envd API client) can be\n// made to send a request like POST /containers//start where what would normally\n// be in the name section of the URL is empty. This triggers an HTTP 301 from\n// the daemon.\n//\n// In go 1.8 this 301 will be converted to a GET request, and ends up getting\n// a 404 from the daemon. This behavior change manifests in the client in that\n// before, the 301 was not followed and the client did not generate an error,\n// but now results in a message like Error response from daemon: page not found.\nfunc CheckRedirect(req *http.Request, via []*http.Request) error {\n\tif via[0].Method == http.MethodGet {\n\t\treturn http.ErrUseLastResponse\n\t}\n\treturn ErrRedirect\n}\n\nfunc CheckRedirectKeepHeader(req *http.Request, via []*http.Request) error {\n\treq.Header = via[0].Header.Clone()\n\treturn nil\n}\n\n// DaemonHost returns the host address used by the client\nfunc (cli *Client) DaemonHost() string {\n\treturn cli.host\n}\n\n// HTTPClient returns a copy of the HTTP client bound to the server\nfunc (cli *Client) HTTPClient() *http.Client {\n\tc := *cli.client\n\treturn &c\n}\n\n// ParseHostURL parses a url string, validates the string is a host url, and\n// returns the parsed URL\nfunc ParseHostURL(host string) (*url.URL, error) {\n\tprotoAddrParts := strings.SplitN(host, \"://\", 2)\n\tif len(protoAddrParts) == 1 {\n\t\treturn nil, errors.Errorf(\"unable to parse docker host `%s`\", host)\n\t}\n\n\tvar basePath string\n\tproto, addr := protoAddrParts[0], protoAddrParts[1]\n\tif proto == \"tcp\" {\n\t\tparsed, err := url.Parse(\"tcp://\" + addr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\taddr = parsed.Host\n\t\tbasePath = parsed.Path\n\t}\n\treturn &url.URL{\n\t\tScheme: proto,\n\t\tHost:   addr,\n\t\tPath:   basePath,\n\t}, nil\n}\n\n// Close the transport used by the client\nfunc (cli *Client) Close() error {\n\tif t, ok := cli.client.Transport.(*http.Transport); ok {\n\t\tt.CloseIdleConnections()\n\t}\n\treturn nil\n}\n\n// getAPIPath returns the versioned request path to call the api.\n// It appends the query parameters to the path if they are not empty.\nfunc (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string {\n\tvar apiPath string\n\tif cli.version != \"\" {\n\t\tv := strings.TrimPrefix(cli.version, \"v\")\n\t\tapiPath = path.Join(cli.basePath, \"/v\"+v, p)\n\t} else {\n\t\tapiPath = path.Join(cli.basePath, p)\n\t}\n\treturn (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String()\n}\n"
  },
  {
    "path": "agent/client/const.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nconst DefaultModelzGatewayHost = \"http://0.0.0.0:8080\"\n\nconst defaultProto = \"http\"\nconst defaultAddr = \"0.0.0.0:8080\"\n\n// Base path for api, distinguish from frontend pages\nconst apiBasePath = \"\"\n\nconst (\n\tgatewayInferControlPlanePath                      = \"/system/inferences\"\n\tgatewayInferScaleControlPath                      = \"/system/scale-inference\"\n\tgatewayInferInstanceControlPlanePath              = \"/system/inference/%s/instances\"\n\tgatewayInferInstanceExecControlPlanePath          = \"/system/inference/%s/instance/%s/exec\"\n\tgatewayServerControlPlanePath                     = \"/system/servers\"\n\tgatewayServerLabelCreateControlPlanePath          = \"/system/server/%s/labels\"\n\tgatewayServerNodeDeleteControlPlanePath           = \"/system/server/%s/delete\"\n\tgatewayNamespaceControlPlanePath                  = \"/system/namespaces\"\n\tgatewayBuildControlPlanePath                      = \"/system/build\"\n\tgatewayBuildInstanceControlPlanePath              = \"/system/build/%s\"\n\tgatewayImageCacheControlPlanePath                 = \"/system/image-cache\"\n\tmodelzCloudClusterControlPlanePath                = \"/api/v1/users/%s/clusters/%s\"\n\tmodelzCloudClusterWithUserControlPlanePath        = \"/api/v1/users/%s/clusters\"\n\tmodelzCloudClusterAPIKeyControlPlanePath          = \"/api/v1/users/%s/clusters/%s/api_keys\"\n\tmodelzCloudClusterNamespaceControlPlanePath       = \"/api/v1/users/%s/clusters/%s/namespaces\"\n\tmodelzCloudClusterDeploymentControlPlanePath      = \"/api/v1/users/%s/clusters/%s/deployments/%s/agent\"\n\tmodelzCloudClusterDeploymentEventControlPlanePath = \"/api/v1/users/%s/clusters/%s/deployments/%s/event\"\n)\n\nconst (\n\t// EnvOverrideHost is the name of the environment variable that can be used\n\t// to override the default host to connect to (DefaultEnvdServerHost).\n\t//\n\t// This env-var is read by FromEnv and WithHostFromEnv and when set to a\n\t// non-empty value, takes precedence over the default host (which is platform\n\t// specific), or any host already set.\n\tEnvOverrideHost = \"MODELZ_GATEWAY_HOST\"\n\n\t// EnvOverrideCertPath is the name of the environment variable that can be\n\t// used to specify the directory from which to load the TLS certificates\n\t// (ca.pem, cert.pem, key.pem) from. These certificates are used to configure\n\t// the Client for a TCP connection protected by TLS client authentication.\n\t//\n\t// TLS certificate verification is enabled by default if the Client is configured\n\t// to use a TLS connection. Refer to EnvTLSVerify below to learn how to\n\t// disable verification for testing purposes.\n\t//\n\t//\n\t// For local access to the API, it is recommended to connect with the daemon\n\t// using the default local socket connection (on Linux), or the named pipe\n\t// (on Windows).\n\t//\n\t// If you need to access the API of a remote daemon, consider using an SSH\n\t// (ssh://) connection, which is easier to set up, and requires no additional\n\t// configuration if the host is accessible using ssh.\n\tEnvOverrideCertPath = \"ENVD_SERVER_CERT_PATH\"\n\n\t// EnvTLSVerify is the name of the environment variable that can be used to\n\t// enable or disable TLS certificate verification. When set to a non-empty\n\t// value, TLS certificate verification is enabled, and the client is configured\n\t// to use a TLS connection, using certificates from the default directories\n\t// (within `~/.envd`); refer to EnvOverrideCertPath above for additional\n\t// details.\n\t//\n\t//\n\t// Before setting up your client and daemon to use a TCP connection with TLS\n\t// client authentication, consider using one of the alternatives mentioned\n\t// in EnvOverrideCertPath above.\n\t//\n\t// Disabling TLS certificate verification (for testing purposes)\n\t//\n\t// TLS certificate verification is enabled by default if the Client is configured\n\t// to use a TLS connection, and it is highly recommended to keep verification\n\t// enabled to prevent machine-in-the-middle attacks.\n\t//\n\t// Set the \"ENVD_SERVER_TLS_VERIFY\" environment to an empty string (\"\") to\n\t// disable TLS certificate verification. Disabling verification is insecure,\n\t// so should only be done for testing purposes. From the Go documentation\n\t// (https://pkg.go.dev/crypto/tls#Config):\n\t//\n\t// InsecureSkipVerify controls whether a client verifies the server's\n\t// certificate chain and host name. If InsecureSkipVerify is true, crypto/tls\n\t// accepts any certificate presented by the server and any host name in that\n\t// certificate. In this mode, TLS is susceptible to machine-in-the-middle\n\t// attacks unless custom verification is used. This should be used only for\n\t// testing or in combination with VerifyConnection or VerifyPeerCertificate.\n\tEnvTLSVerify = \"ENVD_SERVER_TLS_VERIFY\"\n)\n"
  },
  {
    "path": "agent/client/errors.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client // import \"github.com/docker/docker/client\"\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"github.com/cockroachdb/errors\"\n\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\n// errConnectionFailed implements an error returned when connection failed.\ntype errConnectionFailed struct {\n\thost string\n}\n\n// Error returns a string representation of an errConnectionFailed\nfunc (err errConnectionFailed) Error() string {\n\tif err.host == \"\" {\n\t\treturn \"Cannot connect to the backend\"\n\t}\n\treturn fmt.Sprintf(\"Cannot connect at %s\", err.host)\n}\n\n// IsErrConnectionFailed returns true if the error is caused by connection failed.\nfunc IsErrConnectionFailed(err error) bool {\n\treturn errors.As(err, &errConnectionFailed{})\n}\n\n// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.\nfunc ErrorConnectionFailed(host string) error {\n\treturn errConnectionFailed{host: host}\n}\n\n// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility\ntype notFound interface {\n\terror\n\tNotFound() bool\n}\n\n// IsErrNotFound returns true if the error is a NotFound error, which is returned\n// by the API when some object is not found.\nfunc IsErrNotFound(err error) bool {\n\tif errdefs.IsNotFound(err) {\n\t\treturn true\n\t}\n\tvar e notFound\n\treturn errors.As(err, &e)\n}\n\ntype objectNotFoundError struct {\n\tobject string\n\tid     string\n}\n\nfunc (e objectNotFoundError) NotFound() {}\n\nfunc (e objectNotFoundError) Error() string {\n\treturn fmt.Sprintf(\"Error: No such %s: %s\", e.object, e.id)\n}\n\n// IsErrUnauthorized returns true if the error is caused\n// when a remote registry authentication fails\n//\n// Deprecated: use errdefs.IsUnauthorized\nfunc IsErrUnauthorized(err error) bool {\n\treturn errdefs.IsUnauthorized(err)\n}\n\ntype pluginPermissionDenied struct {\n\tname string\n}\n\nfunc (e pluginPermissionDenied) Error() string {\n\treturn \"Permission denied while installing plugin \" + e.name\n}\n\n// IsErrNotImplemented returns true if the error is a NotImplemented error.\n// This is returned by the API when a requested feature has not been\n// implemented.\n//\n// Deprecated: use errdefs.IsNotImplemented\nfunc IsErrNotImplemented(err error) bool {\n\treturn errdefs.IsNotImplemented(err)\n}\n\nfunc wrapResponseError(err error, resp serverResponse, object, id string) error {\n\tswitch {\n\tcase err == nil:\n\t\treturn nil\n\tcase resp.statusCode == http.StatusNotFound:\n\t\treturn objectNotFoundError{object: object, id: id}\n\tcase resp.statusCode == http.StatusNotImplemented:\n\t\treturn errdefs.NotImplemented(err)\n\tdefault:\n\t\treturn err\n\t}\n}\n"
  },
  {
    "path": "agent/client/hijack.go",
    "content": "package client // import \"docker.io/go-docker\"\n\nimport (\n\t\"net/url\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"golang.org/x/net/context\"\n)\n\n// HijackedResponse holds connection information for a hijacked request.\ntype HijackedResponse struct {\n\tConn *websocket.Conn\n}\n\n// Close closes the hijacked connection and reader.\nfunc (h *HijackedResponse) Close() {\n\th.Conn.Close()\n}\n\n// postHijacked sends a POST request and hijacks the connection.\nfunc (cli *Client) websocket(ctx context.Context, path string, query url.Values, headers map[string][]string) (HijackedResponse, error) {\n\tapiPath := cli.getAPIPath(ctx, path, nil)\n\n\tscheme := \"ws\"\n\tif cli.scheme == \"https\" {\n\t\tscheme = \"wss\"\n\t}\n\n\tapiURL := url.URL{\n\t\tScheme:   scheme,\n\t\tHost:     cli.addr,\n\t\tPath:     apiPath,\n\t\tRawQuery: query.Encode(),\n\t}\n\tc, _, err := websocket.DefaultDialer.DialContext(ctx, apiURL.String(), nil)\n\tif err != nil {\n\t\treturn HijackedResponse{}, err\n\t}\n\n\treturn HijackedResponse{Conn: c}, err\n}\n\nfunc (h HijackedResponse) Read(p []byte) (int, error) {\n\t// Read message from websocket connection.\n\ttm := &TerminalMessage{}\n\tif err := h.Conn.ReadJSON(tm); err != nil {\n\t\treturn 0, err\n\t}\n\tif tm.Op != \"stdout\" {\n\t\treturn 0, nil\n\t}\n\treturn copy(p, tm.Data), nil\n}\n\nfunc (h HijackedResponse) Write(p []byte) (int, error) {\n\t// Write message to websocket connection.\n\ttm := &TerminalMessage{\n\t\tOp:   \"stdin\",\n\t\tData: string(p),\n\t}\n\tif err := h.Conn.WriteJSON(tm); err != nil {\n\t\treturn 0, err\n\t}\n\treturn len(p), nil\n}\n\n// TerminalMessage is the messaging protocol between ShellController and TerminalSession.\n//\n// OP      DIRECTION  FIELD(S) USED  DESCRIPTION\n// ---------------------------------------------------------------------\n// bind    fe->be     SessionID      Id sent back from TerminalResponse\n// stdin   fe->be     Data           Keystrokes/paste buffer\n// resize  fe->be     Rows, Cols     New terminal size\n// stdout  be->fe     Data           Output from the process\n// toast   be->fe     Data           OOB message to be shown to the user\ntype TerminalMessage struct {\n\tID   string `json:\"id,omitempty\"`\n\tOp   string `json:\"op,omitempty\"`\n\tData string `json:\"data,omitempty\"`\n\tRows uint16 `json:\"rows,omitempty\"`\n\tCols uint16 `json:\"cols,omitempty\"`\n}\n"
  },
  {
    "path": "agent/client/image_cache_create.go",
    "content": "package client\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nfunc (cli *Client) ImageCacheCreate(ctx context.Context, namespace string,\n\timageCache *types.ImageCache) error {\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\n\tresp, err := cli.post(ctx, gatewayImageCacheControlPlanePath, urlValues, imageCache, nil)\n\tdefer ensureReaderClosed(resp)\n\tif err != nil {\n\t\treturn wrapResponseError(err, resp, \"imagecache\", imageCache.Name)\n\t}\n\n\treturn wrapResponseError(err, resp, \"imagecache\", imageCache.Name)\n}\n"
  },
  {
    "path": "agent/client/inference_create.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// InferenceCreate creates the inference.\nfunc (cli *Client) InferenceCreate(ctx context.Context, namespace string,\n\tinference types.InferenceDeployment) (types.InferenceDeployment, error) {\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\n\tresp, err := cli.post(ctx, gatewayInferControlPlanePath, urlValues, inference, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn inference, wrapResponseError(err, resp, \"inference\", inference.Spec.Name)\n\t}\n\n\treturn inference, wrapResponseError(err, resp, \"inference\", inference.Spec.Name)\n}\n"
  },
  {
    "path": "agent/client/inference_get.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/url\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// InferenceGet gets the inference.\nfunc (cli *Client) InferenceGet(ctx context.Context, namespace, name string) (types.InferenceDeployment, error) {\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\n\turl := fmt.Sprintf(\"/system/inference/%s\", name)\n\tresp, err := cli.get(ctx, url, urlValues, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn types.InferenceDeployment{},\n\t\t\twrapResponseError(err, resp, \"inference\", name)\n\t}\n\n\tvar inference types.InferenceDeployment\n\terr = json.NewDecoder(resp.body).Decode(&inference)\n\tif err != nil {\n\t\treturn types.InferenceDeployment{},\n\t\t\twrapResponseError(err, resp, \"inference\", name)\n\t}\n\n\treturn inference, wrapResponseError(err, resp, \"inference\", name)\n}\n"
  },
  {
    "path": "agent/client/inference_list.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"net/url\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// InferenceList lists the inferences.\nfunc (cli *Client) InferenceList(ctx context.Context, namespace string) ([]types.InferenceDeployment, error) {\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\n\tresp, err := cli.get(ctx, gatewayInferControlPlanePath, urlValues, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn nil,\n\t\t\twrapResponseError(err, resp, \"inferences with namespace\", namespace)\n\t}\n\n\tvar inferences []types.InferenceDeployment\n\terr = json.NewDecoder(resp.body).Decode(&inferences)\n\n\treturn inferences, err\n}\n"
  },
  {
    "path": "agent/client/inference_remove.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// InferenceRemove removes the inference.\nfunc (cli *Client) InferenceRemove(ctx context.Context, namespace string,\n\tname string) error {\n\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\n\treq := types.DeleteFunctionRequest{\n\t\tFunctionName: name,\n\t}\n\n\tresp, err := cli.delete(ctx, gatewayInferControlPlanePath, urlValues, req, nil)\n\tdefer ensureReaderClosed(resp)\n\treturn wrapResponseError(err, resp, \"inference\", name)\n}\n"
  },
  {
    "path": "agent/client/inference_scale.go",
    "content": "package client\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// InferenceScale scales the inference.\nfunc (cli *Client) InferenceScale(ctx context.Context, namespace string,\n\tname string, replicas int, eventMessage string) error {\n\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\n\treq := types.ScaleServiceRequest{\n\t\tServiceName:  name,\n\t\tReplicas:     uint64(replicas),\n\t\tEventMessage: eventMessage,\n\t}\n\n\tresp, err := cli.post(ctx, gatewayInferScaleControlPath, urlValues, req, nil)\n\tdefer ensureReaderClosed(resp)\n\treturn wrapResponseError(err, resp, \"inference\", name)\n}\n"
  },
  {
    "path": "agent/client/inference_update.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// DeploymentUpdate creates the deployment.\nfunc (cli *Client) DeploymentUpdate(ctx context.Context, namespace string,\n\tinference types.InferenceDeployment) (types.InferenceDeployment, error) {\n\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\n\tresp, err := cli.put(ctx, gatewayInferControlPlanePath, urlValues, inference, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn inference,\n\t\t\twrapResponseError(err, resp, \"inference\", inference.Spec.Name)\n\t}\n\n\treturn inference, wrapResponseError(err, resp, \"inference\", inference.Spec.Name)\n}\n"
  },
  {
    "path": "agent/client/info.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// InfoGet gets the agent info.\nfunc (cli *Client) InfoGet(ctx context.Context) (types.ProviderInfo, error) {\n\tresp, err := cli.get(ctx, \"/system/info\", nil, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn types.ProviderInfo{},\n\t\t\twrapResponseError(err, resp, \"info\", \"system\")\n\t}\n\n\tvar info types.ProviderInfo\n\terr = json.NewDecoder(resp.body).Decode(&info)\n\tif err != nil {\n\t\treturn types.ProviderInfo{},\n\t\t\twrapResponseError(err, resp, \"info\", \"system\")\n\t}\n\n\treturn info, wrapResponseError(err, resp, \"info\", \"system\")\n}\n"
  },
  {
    "path": "agent/client/instance_exec.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/url\"\n\t\"strings\"\n)\n\n// InstanceExec executes command in the instance.\nfunc (cli *Client) InstanceExec(ctx context.Context,\n\tnamespace, inferenceName, instance string, command []string, tty bool) (string, error) {\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\turlValues.Add(\"tty\", fmt.Sprintf(\"%v\", tty))\n\turlValues.Add(\"command\", strings.Join(command, \",\"))\n\n\turlPath := fmt.Sprintf(gatewayInferInstanceExecControlPlanePath, inferenceName, instance)\n\n\tresp, err := cli.get(ctx, urlPath, urlValues, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn \"\",\n\t\t\twrapResponseError(err, resp, \"instances with namespace\", namespace)\n\t}\n\n\tres, err := io.ReadAll(resp.body)\n\tif err != nil {\n\t\treturn \"\", wrapResponseError(err, resp, \"instances with namespace\", namespace)\n\t}\n\n\treturn string(res), wrapResponseError(err, resp, \"instances with namespace\", namespace)\n}\n\n// InstanceExec executes command in the instance.\nfunc (cli *Client) InstanceExecTTY(ctx context.Context,\n\tnamespace, inferenceName, instance string, command []string,\n) (HijackedResponse, error) {\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\turlValues.Add(\"tty\", \"true\")\n\turlValues.Add(\"command\", strings.Join(command, \",\"))\n\n\turlPath := fmt.Sprintf(gatewayInferInstanceExecControlPlanePath, inferenceName, instance)\n\n\tresp, err := cli.websocket(ctx, urlPath, urlValues, nil)\n\tif err != nil {\n\t\treturn HijackedResponse{}, wrapResponseError(err, serverResponse{}, \"instances with namespace\", namespace)\n\t}\n\n\treturn resp, nil\n}\n"
  },
  {
    "path": "agent/client/instance_list.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/url\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// InstanceList lists the deployment instances.\nfunc (cli *Client) InstanceList(ctx context.Context,\n\tnamespace, inferenceName string) ([]types.InferenceDeploymentInstance, error) {\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\n\turlPath := fmt.Sprintf(gatewayInferInstanceControlPlanePath, inferenceName)\n\n\tresp, err := cli.get(ctx, urlPath, urlValues, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn nil,\n\t\t\twrapResponseError(err, resp, \"instances with namespace\", namespace)\n\t}\n\n\tvar instances []types.InferenceDeploymentInstance\n\terr = json.NewDecoder(resp.body).Decode(&instances)\n\n\treturn instances, wrapResponseError(err, resp, \"instances with namespace\", namespace)\n}\n"
  },
  {
    "path": "agent/client/log.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"strings\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nconst LogBufferSize = 128\n\n// DeploymentLogGet gets the deployment logs.\nfunc (cli *Client) DeploymentLogGet(ctx context.Context, namespace, name string,\n\tsince string, tail int, end string, follow bool) (\n\t<-chan types.Message, error) {\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\turlValues.Add(\"name\", name)\n\n\tif since != \"\" {\n\t\turlValues.Add(\"since\", since)\n\t}\n\n\tif end != \"\" {\n\t\turlValues.Add(\"end\", end)\n\t}\n\n\tif tail != 0 {\n\t\turlValues.Add(\"tail\", fmt.Sprintf(\"%d\", tail))\n\t}\n\n\tif follow {\n\t\turlValues.Add(\"follow\", \"true\")\n\t}\n\n\tresp, err := cli.get(ctx, \"/system/logs/inference\", urlValues, nil)\n\n\tif err != nil {\n\t\treturn nil, wrapResponseError(err, resp, \"deployment logs\", name)\n\t}\n\n\tstream := make(chan types.Message, LogBufferSize)\n\tvar log types.Message\n\tscanner := bufio.NewScanner(resp.body)\n\tgo func() {\n\t\tdefer ensureReaderClosed(resp)\n\t\tdefer close(stream)\n\t\tfor scanner.Scan() {\n\t\t\terr = json.Unmarshal(scanner.Bytes(), &log)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Warnf(\"failed to decode %s log: %v | %s | [%s]\", name, err, scanner.Text(), scanner.Err())\n\t\t\t\treturn\n\t\t\t\t// continue\n\t\t\t}\n\t\t\tstream <- log\n\t\t}\n\t}()\n\n\treturn stream, err\n}\n\nfunc (cli *Client) BuildLogGet(ctx context.Context, namespace, name, since string,\n\ttail int) ([]types.Message, error) {\n\turlValues := url.Values{}\n\turlValues.Add(\"namespace\", namespace)\n\turlValues.Add(\"name\", name)\n\n\tif since != \"\" {\n\t\turlValues.Add(\"since\", since)\n\t}\n\tif tail != 0 {\n\t\turlValues.Add(\"tail\", fmt.Sprintf(\"%d\", tail))\n\t}\n\n\tresp, err := cli.get(ctx, \"/system/logs/build\", urlValues, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn nil,\n\t\t\twrapResponseError(err, resp, \"build logs\", name)\n\t}\n\n\tvar log types.Message\n\tlogs := []types.Message{}\n\tscanner := bufio.NewScanner(resp.body)\n\tfor scanner.Scan() {\n\t\terr = json.NewDecoder(strings.NewReader(scanner.Text())).Decode(&log)\n\t\tif err != nil {\n\t\t\treturn nil, wrapResponseError(err, resp, \"build logs\", name)\n\t\t}\n\t\tlogs = append(logs, log)\n\t}\n\n\treturn logs, err\n}\n"
  },
  {
    "path": "agent/client/modelz_cloud.go",
    "content": "package client\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/consts\"\n\t\"k8s.io/apimachinery/pkg/util/wait\"\n)\n\nfunc (cli *Client) WaitForAPIServerReady() error {\n\terr := wait.PollImmediateWithContext(context.Background(), time.Second, consts.DefaultAPIServerReadyTimeout, func(ctx context.Context) (bool, error) {\n\t\terr, healthStatus := cli.waitForAPIServerReady(ctx)\n\t\tif err != nil || healthStatus != http.StatusOK {\n\t\t\tlogrus.Warn(\"APIServer isn't ready yet, Waiting a little while.\")\n\t\t\treturn false, err\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to wait for apiserver ready, %v\", err)\n\t}\n\treturn nil\n}\n\nfunc (cli *Client) waitForAPIServerReady(ctx context.Context) (error, int) {\n\turlValues := url.Values{}\n\tresp, err := cli.get(ctx, \"/healthz\", urlValues, nil)\n\tif err != nil {\n\t\treturn wrapResponseError(err, resp, \"check apiserver is ready\", \"\"), resp.statusCode\n\t}\n\tdefer ensureReaderClosed(resp)\n\treturn nil, resp.statusCode\n}\n\nfunc (cli *Client) RegisterAgent(ctx context.Context, token string, cluster *types.ManagedCluster) error {\n\turlValues := url.Values{}\n\tagentToken, err := ParseAgentToken(token)\n\tif err != nil {\n\t\treturn err\n\t}\n\turlPath := fmt.Sprintf(modelzCloudClusterWithUserControlPlanePath, agentToken.UID)\n\theaders := make(map[string][]string)\n\theaders[\"Authorization\"] = []string{\"Bearer \" + agentToken.Token}\n\tcluster.Name = agentToken.ClusterName\n\n\tresp, err := cli.post(ctx, urlPath, urlValues, cluster, headers)\n\tif err != nil {\n\t\treturn wrapResponseError(err, resp, \"register agent to modelz cloud\", agentToken.UID)\n\t}\n\tdefer ensureReaderClosed(resp)\n\n\terr = json.NewDecoder(resp.body).Decode(&cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cli *Client) UpdateAgentStatus(ctx context.Context, apiServerReady <-chan struct{}, token string, cluster types.ManagedCluster) error {\n\t<-apiServerReady\n\turlValues := url.Values{}\n\tagentToken, err := ParseAgentToken(token)\n\tif err != nil {\n\t\treturn err\n\t}\n\turlPath := fmt.Sprintf(modelzCloudClusterControlPlanePath, agentToken.UID, cluster.ID)\n\theaders := make(map[string][]string)\n\theaders[\"Authorization\"] = []string{\"Bearer \" + agentToken.Token}\n\n\tresp, err := cli.put(ctx, urlPath, urlValues, cluster, headers)\n\tif err != nil {\n\t\treturn wrapResponseError(err, resp, \"update agent status to modelz cloud\", agentToken.UID)\n\t}\n\tdefer ensureReaderClosed(resp)\n\n\tif resp.statusCode == 200 {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"failed to update agent status to modelz cloud, status code: %d\", resp.statusCode)\n}\n\nfunc (cli *Client) GetAPIKeys(ctx context.Context, apiServerReady <-chan struct{}, token string, cluster string) (types.APIKeyMap, error) {\n\t<-apiServerReady\n\turlValues := url.Values{}\n\tagentToken, err := ParseAgentToken(token)\n\tkeys := types.APIKeyMap{}\n\tif err != nil {\n\t\treturn keys, err\n\t}\n\theaders := make(map[string][]string)\n\theaders[\"Authorization\"] = []string{\"Bearer \" + agentToken.Token}\n\n\turlPath := fmt.Sprintf(modelzCloudClusterAPIKeyControlPlanePath, agentToken.UID, cluster)\n\tresp, err := cli.get(ctx, urlPath, urlValues, headers)\n\tif err != nil {\n\t\treturn keys, wrapResponseError(err, resp, \"get api keys from modelz cloud\", agentToken.UID)\n\t}\n\tdefer ensureReaderClosed(resp)\n\n\terr = json.NewDecoder(resp.body).Decode(&keys)\n\tif err != nil {\n\t\treturn keys, err\n\t}\n\treturn keys, nil\n}\n\nfunc (cli *Client) GetNamespaces(ctx context.Context, apiServerReady <-chan struct{}, token string, cluster string) (types.NamespaceList, error) {\n\t<-apiServerReady\n\turlValues := url.Values{}\n\tagentToken, err := ParseAgentToken(token)\n\tns := types.NamespaceList{}\n\tif err != nil {\n\t\treturn ns, err\n\t}\n\turlValues.Add(\"login_name\", agentToken.UID)\n\theaders := make(map[string][]string)\n\theaders[\"Authorization\"] = []string{\"Bearer \" + agentToken.Token}\n\n\tresp, err := cli.get(ctx, fmt.Sprintf(modelzCloudClusterNamespaceControlPlanePath, agentToken.UID, cluster), urlValues, headers)\n\tif err != nil {\n\t\treturn ns, wrapResponseError(err, resp, \"get namespaces from modelz cloud\", agentToken.UID)\n\t}\n\tdefer ensureReaderClosed(resp)\n\n\terr = json.NewDecoder(resp.body).Decode(&ns)\n\tif err != nil {\n\t\treturn ns, err\n\t}\n\n\tns.Items = append(ns.Items, GetNamespaceByUserID(agentToken.UID))\n\treturn ns, nil\n}\n\nfunc (cli *Client) GetUIDFromDeploymentID(ctx context.Context, token string, cluster string, deployment string) (string, error) {\n\turlValues := url.Values{}\n\tagentToken, err := ParseAgentToken(token)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\theaders := make(map[string][]string)\n\theaders[\"Authorization\"] = []string{\"Bearer \" + agentToken.Token}\n\turlPath := fmt.Sprintf(modelzCloudClusterDeploymentControlPlanePath, agentToken.UID, cluster, deployment)\n\n\tresp, err := cli.get(ctx, urlPath, urlValues, headers)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer ensureReaderClosed(resp)\n\n\tvar uid string\n\terr = json.NewDecoder(resp.body).Decode(&uid)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.statusCode == 200 {\n\t\treturn uid, nil\n\t}\n\treturn \"\", fmt.Errorf(\"failed to get uid from deployment id, status code: %d\", resp.statusCode)\n}\n\nfunc (cli *Client) CreateDeploymentEvent(ctx context.Context, token string, event types.DeploymentEvent) error {\n\turlValues := url.Values{}\n\tagentToken, err := ParseAgentToken(token)\n\tif err != nil {\n\t\treturn err\n\t}\n\theaders := make(map[string][]string)\n\theaders[\"Authorization\"] = []string{\"Bearer \" + agentToken.Token}\n\turlPath := fmt.Sprintf(modelzCloudClusterDeploymentEventControlPlanePath, agentToken.UID, agentToken.ClusterName, event.DeploymentID)\n\n\tresp, err := cli.post(ctx, urlPath, urlValues, event, headers)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ensureReaderClosed(resp)\n\n\tif resp.statusCode == http.StatusCreated {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"failed to create deployment event, status code: %d\", resp.statusCode)\n}\n"
  },
  {
    "path": "agent/client/namespace_create.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// NamespaceCreate creates the namespace.\nfunc (cli *Client) NamespaceCreate(ctx context.Context,\n\tnamespace string) error {\n\treq := types.NamespaceRequest{\n\t\tName: namespace,\n\t}\n\n\turlValues := url.Values{}\n\n\tresp, err := cli.post(ctx, gatewayNamespaceControlPlanePath, urlValues, req, nil)\n\tdefer ensureReaderClosed(resp)\n\n\treturn wrapResponseError(err, resp, \"namespace\", namespace)\n}\n"
  },
  {
    "path": "agent/client/namespace_delete.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// NamespaceDelete deletes the namespace.\nfunc (cli *Client) NamespaceDelete(ctx context.Context,\n\tnamespace string) error {\n\treq := types.NamespaceRequest{\n\t\tName: namespace,\n\t}\n\n\turlValues := url.Values{}\n\n\tresp, err := cli.delete(ctx, gatewayNamespaceControlPlanePath, urlValues, req, nil)\n\tdefer ensureReaderClosed(resp)\n\n\treturn wrapResponseError(err, resp, \"namespace\", namespace)\n}\n"
  },
  {
    "path": "agent/client/options.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"time\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/docker/go-connections/sockets\"\n\t\"github.com/docker/go-connections/tlsconfig\"\n)\n\n// Opt is a configuration option to initialize a client\ntype Opt func(*Client) error\n\n// FromEnv configures the client with values from environment variables.\n//\n// FromEnv uses the following environment variables:\n//\n// ENVD_SERVER_HOST (EnvOverrideHost) to set the URL to the docker server.\n//\n// ENVD_SERVER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to\n// load the TLS certificates (ca.pem, cert.pem, key.pem).\n//\n// ENVD_SERVER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by\n// default).\nfunc FromEnv(c *Client) error {\n\t// TODO(gaocegege): Support:\n\t// ENVD_SERVER_API_VERSION (EnvOverrideAPIVersion) to set the version of the API to\n\t// use, leave empty for latest.\n\t//\n\tops := []Opt{\n\t\tWithTLSClientConfigFromEnv(),\n\t\tWithHostFromEnv(),\n\t}\n\tfor _, op := range ops {\n\t\tif err := op(c); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// WithDialContext applies the dialer to the client transport. This can be\n// used to set the Timeout and KeepAlive settings of the client.\nfunc WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt {\n\treturn func(c *Client) error {\n\t\tif transport, ok := c.client.Transport.(*http.Transport); ok {\n\t\t\ttransport.DialContext = dialContext\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.Errorf(\"cannot apply dialer to transport: %T\", c.client.Transport)\n\t}\n}\n\n// WithHost overrides the client host with the specified one.\nfunc WithHost(host string) Opt {\n\treturn func(c *Client) error {\n\t\thostURL, err := ParseHostURL(host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.host = host\n\t\tc.proto = hostURL.Scheme\n\t\tc.addr = hostURL.Host\n\t\tc.basePath = hostURL.Path\n\t\tif transport, ok := c.client.Transport.(*http.Transport); ok {\n\t\t\treturn sockets.ConfigureTransport(transport, c.proto, c.addr)\n\t\t}\n\t\treturn errors.Errorf(\"cannot apply host to transport: %T\", c.client.Transport)\n\t}\n}\n\n// WithHostFromEnv overrides the client host with the host specified in the\n// DOCKER_HOST (EnvOverrideHost) environment variable. If DOCKER_HOST is not set,\n// or set to an empty value, the host is not modified.\nfunc WithHostFromEnv() Opt {\n\treturn func(c *Client) error {\n\t\tif host := os.Getenv(EnvOverrideHost); host != \"\" {\n\t\t\treturn WithHost(host)(c)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// WithHTTPClient overrides the client http client with the specified one\nfunc WithHTTPClient(client *http.Client) Opt {\n\treturn func(c *Client) error {\n\t\tif client != nil {\n\t\t\tc.client = client\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// WithTimeout configures the time limit for requests made by the HTTP client\nfunc WithTimeout(timeout time.Duration) Opt {\n\treturn func(c *Client) error {\n\t\tc.client.Timeout = timeout\n\t\treturn nil\n\t}\n}\n\n// WithHTTPHeaders overrides the client default http headers\nfunc WithHTTPHeaders(headers map[string]string) Opt {\n\treturn func(c *Client) error {\n\t\tc.customHTTPHeaders = headers\n\t\treturn nil\n\t}\n}\n\n// WithScheme overrides the client scheme with the specified one\nfunc WithScheme(scheme string) Opt {\n\treturn func(c *Client) error {\n\t\tc.scheme = scheme\n\t\treturn nil\n\t}\n}\n\n// WithTLSClientConfig applies a tls config to the client transport.\nfunc WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt {\n\treturn func(c *Client) error {\n\t\topts := tlsconfig.Options{\n\t\t\tCAFile:             cacertPath,\n\t\t\tCertFile:           certPath,\n\t\t\tKeyFile:            keyPath,\n\t\t\tExclusiveRootPools: true,\n\t\t}\n\t\tconfig, err := tlsconfig.Client(opts)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to create tls config\")\n\t\t}\n\t\tif transport, ok := c.client.Transport.(*http.Transport); ok {\n\t\t\ttransport.TLSClientConfig = config\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.Errorf(\"cannot apply tls config to transport: %T\", c.client.Transport)\n\t}\n}\n\n// WithTLSClientConfigFromEnv configures the client's TLS settings with the\n// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables.\n// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified.\n//\n// WithTLSClientConfigFromEnv uses the following environment variables:\n//\n// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to\n// load the TLS certificates (ca.pem, cert.pem, key.pem).\n//\n// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by\n// default).\nfunc WithTLSClientConfigFromEnv() Opt {\n\treturn func(c *Client) error {\n\t\tdockerCertPath := os.Getenv(EnvOverrideCertPath)\n\t\tif dockerCertPath == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\toptions := tlsconfig.Options{\n\t\t\tCAFile:             filepath.Join(dockerCertPath, \"ca.pem\"),\n\t\t\tCertFile:           filepath.Join(dockerCertPath, \"cert.pem\"),\n\t\t\tKeyFile:            filepath.Join(dockerCertPath, \"key.pem\"),\n\t\t\tInsecureSkipVerify: os.Getenv(EnvTLSVerify) == \"\",\n\t\t}\n\t\ttlsc, err := tlsconfig.Client(options)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.client = &http.Client{\n\t\t\tTransport:     &http.Transport{TLSClientConfig: tlsc},\n\t\t\tCheckRedirect: CheckRedirect,\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// WithVersion overrides the client version with the specified one. If an empty\n// version is specified, the value will be ignored to allow version negotiation.\nfunc WithVersion(version string) Opt {\n\treturn func(c *Client) error {\n\t\tif version != \"\" {\n\t\t\tc.version = version\n\t\t\tc.manualOverride = true\n\t\t}\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "agent/client/request.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\n// serverResponse is a wrapper for http API responses.\ntype serverResponse struct {\n\tbody       io.ReadCloser\n\theader     http.Header\n\tstatusCode int\n\treqURL     *url.URL\n}\n\n// head sends an http request to the docker API using the method HEAD.\nfunc (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {\n\treturn cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers)\n}\n\n// get sends an http request to the docker API using the method GET with a specific Go context.\nfunc (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {\n\treturn cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers)\n}\n\n// post sends an http request to the docker API using the method POST with a specific Go context.\nfunc (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {\n\tbody, headers, err := encodeBody(obj, headers)\n\tif err != nil {\n\t\treturn serverResponse{}, err\n\t}\n\treturn cli.sendRequest(ctx, http.MethodPost, path, query, body, headers)\n}\n\nfunc (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {\n\treturn cli.sendRequest(ctx, http.MethodPost, path, query, body, headers)\n}\n\nfunc (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {\n\tbody, headers, err := encodeBody(obj, headers)\n\tif err != nil {\n\t\treturn serverResponse{}, err\n\t}\n\treturn cli.sendRequest(ctx, http.MethodPut, path, query, body, headers)\n}\n\n// putRaw sends an http request to the docker API using the method PUT.\nfunc (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {\n\treturn cli.sendRequest(ctx, http.MethodPut, path, query, body, headers)\n}\n\n// delete sends an http request to the docker API using the method DELETE.\nfunc (cli *Client) delete(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {\n\tbody, headers, err := encodeBody(obj, headers)\n\tif err != nil {\n\t\treturn serverResponse{}, err\n\t}\n\treturn cli.sendRequest(ctx, http.MethodDelete, path, query, body, headers)\n}\n\ntype headers map[string][]string\n\nfunc encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) {\n\tif obj == nil {\n\t\treturn nil, headers, nil\n\t}\n\n\tbody, err := encodeData(obj)\n\tif err != nil {\n\t\treturn nil, headers, err\n\t}\n\tif headers == nil {\n\t\theaders = make(map[string][]string)\n\t}\n\theaders[\"Content-Type\"] = []string{\"application/json\"}\n\treturn body, headers, nil\n}\n\nfunc (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) {\n\texpectedPayload := (method == http.MethodPost || method == http.MethodPut)\n\tif expectedPayload && body == nil {\n\t\tbody = bytes.NewReader([]byte{})\n\t}\n\n\treq, err := http.NewRequest(method, path, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq = cli.addHeaders(req, headers)\n\n\tif cli.proto == \"unix\" || cli.proto == \"npipe\" {\n\t\t// For local communications, it doesn't matter what the host is. We just\n\t\t// need a valid and meaningful host name.\n\t\treq.Host = \"modelz\"\n\t}\n\n\treq.URL.Host = cli.addr\n\treq.URL.Scheme = cli.scheme\n\n\tif expectedPayload && req.Header.Get(\"Content-Type\") == \"\" {\n\t\treq.Header.Set(\"Content-Type\", \"text/plain\")\n\t}\n\n\tlogrus.Debugf(\"Sending HTTP request to %s\\n\", req.URL.String())\n\tlogrus.Debugf(\"Request Headers: %v\\n\", req.Header)\n\tlogrus.Debugf(\"Request Body: %v\\n\", body)\n\treturn req, nil\n}\n\nfunc (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) {\n\treq, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers)\n\tif err != nil {\n\t\treturn serverResponse{}, errors.Wrap(err, \"failed to build request\")\n\t}\n\n\tresp, err := cli.doRequest(ctx, req)\n\tswitch {\n\tcase errors.Is(err, context.Canceled):\n\t\treturn serverResponse{}, errdefs.Cancelled(err)\n\tcase errors.Is(err, context.DeadlineExceeded):\n\t\treturn serverResponse{}, errdefs.Deadline(err)\n\tcase err == nil:\n\t\terr = cli.checkResponseErr(resp)\n\t}\n\treturn resp, errdefs.FromStatusCode(err, resp.statusCode)\n}\n\nfunc (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) {\n\tserverResp := serverResponse{statusCode: -1, reqURL: req.URL}\n\n\treq = req.WithContext(ctx)\n\tresp, err := cli.client.Do(req)\n\tif err != nil {\n\t\tif cli.scheme != \"https\" && strings.Contains(err.Error(), \"malformed HTTP response\") {\n\t\t\treturn serverResp, fmt.Errorf(\"%v.\\n* Are you trying to connect to a TLS-enabled daemon without TLS?\", err)\n\t\t}\n\n\t\tif cli.scheme == \"https\" && strings.Contains(err.Error(), \"bad certificate\") {\n\t\t\treturn serverResp, errors.Wrap(err, \"the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings\")\n\t\t}\n\n\t\t// Don't decorate context sentinel errors; users may be comparing to\n\t\t// them directly.\n\t\tif errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {\n\t\t\treturn serverResp, err\n\t\t}\n\n\t\tif nErr, ok := err.(*url.Error); ok {\n\t\t\tif nErr, ok := nErr.Err.(*net.OpError); ok {\n\t\t\t\tif os.IsPermission(nErr.Err) {\n\t\t\t\t\treturn serverResp, errors.Wrapf(err, \"permission denied while trying to connect to the modelz agent server socket at %v\", cli.host)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err, ok := err.(net.Error); ok {\n\t\t\tif err.Timeout() {\n\t\t\t\treturn serverResp, ErrorConnectionFailed(cli.host)\n\t\t\t}\n\t\t\tif strings.Contains(err.Error(), \"connection refused\") || strings.Contains(err.Error(), \"dial unix\") {\n\t\t\t\treturn serverResp, ErrorConnectionFailed(cli.host)\n\t\t\t}\n\t\t}\n\n\t\treturn serverResp, errors.Wrap(err, \"error during connect\")\n\t}\n\n\tif resp != nil {\n\t\tserverResp.statusCode = resp.StatusCode\n\t\tserverResp.body = resp.Body\n\t\tserverResp.header = resp.Header\n\t}\n\treturn serverResp, nil\n}\n\nfunc (cli *Client) checkResponseErr(serverResp serverResponse) error {\n\tif serverResp.statusCode >= 200 && serverResp.statusCode < 400 {\n\t\treturn nil\n\t}\n\n\tvar body []byte\n\tvar err error\n\tif serverResp.body != nil {\n\t\tbodyMax := 1 * 1024 * 1024 // 1 MiB\n\t\tbodyR := &io.LimitedReader{\n\t\t\tR: serverResp.body,\n\t\t\tN: int64(bodyMax),\n\t\t}\n\t\tbody, err = io.ReadAll(bodyR)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif bodyR.N == 0 {\n\t\t\treturn fmt.Errorf(\"request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version\", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL)\n\t\t}\n\t}\n\tif len(body) == 0 {\n\t\treturn fmt.Errorf(\"request returned %s for API route and version %s, check if the server supports the requested API version\", http.StatusText(serverResp.statusCode), serverResp.reqURL)\n\t}\n\n\terrorMessage := strings.TrimSpace(string(body))\n\n\treturn errors.Wrap(errors.New(errorMessage), \"Error response from gateway\")\n}\n\nfunc (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request {\n\t// Add CLI Config's HTTP Headers BEFORE we set the Docker headers\n\t// then the user can't change OUR headers\n\tfor k, v := range cli.customHTTPHeaders {\n\t\treq.Header.Set(k, v)\n\t}\n\n\tfor k, v := range headers {\n\t\treq.Header[http.CanonicalHeaderKey(k)] = v\n\t}\n\treturn req\n}\n\nfunc encodeData(data interface{}) (*bytes.Buffer, error) {\n\tparams := bytes.NewBuffer(nil)\n\tif data != nil {\n\t\tif err := json.NewEncoder(params).Encode(data); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn params, nil\n}\n\nfunc ensureReaderClosed(response serverResponse) {\n\tif response.body != nil {\n\t\t// Drain up to 512 bytes and close the body to let the Transport reuse the connection\n\t\tio.CopyN(io.Discard, response.body, 512)\n\t\tresponse.body.Close()\n\t}\n}\n"
  },
  {
    "path": "agent/client/server_label_create.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/url\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// ServerLabelCreate create the labels for the servers.\nfunc (cli *Client) ServerLabelCreate(ctx context.Context, name string,\n\tlabels map[string]string) error {\n\treq := types.ServerSpec{\n\t\tName:   name,\n\t\tLabels: labels,\n\t}\n\n\turlValues := url.Values{}\n\n\tresp, err := cli.post(ctx,\n\t\tfmt.Sprintf(gatewayServerLabelCreateControlPlanePath, name), urlValues, req, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn wrapResponseError(err, resp, \"server\", name)\n\t}\n\n\treturn wrapResponseError(err, resp, \"server\", name)\n}\n"
  },
  {
    "path": "agent/client/server_list.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// ServerList lists the servers.\nfunc (cli *Client) ServerList(ctx context.Context) ([]types.Server, error) {\n\tresp, err := cli.get(ctx, gatewayServerControlPlanePath, nil, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn nil,\n\t\t\twrapResponseError(err, resp, \"servers\", \"\")\n\t}\n\n\tvar servers []types.Server\n\terr = json.NewDecoder(resp.body).Decode(&servers)\n\n\treturn servers, wrapResponseError(err, resp, \"servers\", \"\")\n}\n"
  },
  {
    "path": "agent/client/server_node_delete.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/url\"\n)\n\n// ServerLabelCreate create the labels for the servers.\nfunc (cli *Client) ServerNodeDelete(ctx context.Context, name string) error {\n\turlValues := url.Values{}\n\n\tresp, err := cli.delete(ctx,\n\t\tfmt.Sprintf(gatewayServerNodeDeleteControlPlanePath, name), urlValues, nil, nil)\n\tdefer ensureReaderClosed(resp)\n\n\tif err != nil {\n\t\treturn wrapResponseError(err, resp, \"server-delete\", name)\n\t}\n\n\treturn wrapResponseError(err, resp, \"server-delete\", name)\n}\n"
  },
  {
    "path": "agent/client/transport.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage client // import \"github.com/docker/docker/client\"\n\nimport (\n\t\"crypto/tls\"\n\t\"net/http\"\n)\n\n// resolveTLSConfig attempts to resolve the TLS configuration from the\n// RoundTripper.\nfunc resolveTLSConfig(transport http.RoundTripper) *tls.Config {\n\tswitch tr := transport.(type) {\n\tcase *http.Transport:\n\t\treturn tr.TLSClientConfig\n\tdefault:\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "agent/client/utils.go",
    "content": "package client\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nconst (\n\tDefaultPrefix = \"modelz-\"\n)\n\nfunc ParseAgentToken(token string) (types.AgentToken, error) {\n\tagentToken := types.AgentToken{}\n\tif token == \"\" {\n\t\treturn agentToken, errors.New(\"agent token is empty\")\n\t}\n\n\tstrings := strings.Split(token, \":\")\n\tif len(strings) != 3 {\n\t\treturn agentToken, errors.New(\"invalid agent token\")\n\t}\n\tagentToken.ClusterName = strings[0]\n\tagentToken.UID = strings[1]\n\tagentToken.Token = strings[2]\n\n\treturn agentToken, nil\n}\n\nfunc GetNamespaceByUserID(uid string) string {\n\treturn fmt.Sprintf(\"%s%s\", DefaultPrefix, uid)\n}\n\nfunc GetUserIDFromNamespace(ns string) (string, error) {\n\tif len(ns) < 8 {\n\t\treturn \"\", fmt.Errorf(\"namespace too short\")\n\t}\n\n\tif ns[:len(DefaultPrefix)] != DefaultPrefix {\n\t\treturn \"\", fmt.Errorf(\"namespace does not start with \")\n\t}\n\n\treturn ns[7:], nil\n}\n"
  },
  {
    "path": "agent/cmd/agent/main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli/v2\"\n\n\t\"github.com/tensorchord/openmodelz/agent/pkg/app\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/version\"\n)\n\nfunc run(args []string) error {\n\tcli.VersionPrinter = func(c *cli.Context) {\n\t\tfmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision)\n\t}\n\n\tapp := app.New()\n\treturn app.Run(args)\n}\n\nfunc handleErr(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tlogrus.Error(err)\n\tos.Exit(1)\n}\n\n// @title       modelz cluster agent\n// @version     v0.0.23\n// @description modelz kubernetes cluster agent\n\n// @contact.name  modelz support\n// @contact.url   https://github.com/tensorchord/openmodelz\n// @contact.email modelz-support@tensorchord.ai\n\n// @host     localhost:8081\n// @BasePath /\n// @schemes  http\nfunc main() {\n\terr := run(os.Args)\n\thandleErr(err)\n}\n"
  },
  {
    "path": "agent/errdefs/defs.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage errdefs // import \"github.com/docker/docker/errdefs\"\n\n// ErrNotFound signals that the requested object doesn't exist\ntype ErrNotFound interface {\n\tNotFound()\n}\n\n// ErrInvalidParameter signals that the user input is invalid\ntype ErrInvalidParameter interface {\n\tInvalidParameter()\n}\n\n// ErrConflict signals that some internal state conflicts with the requested action and can't be performed.\n// A change in state should be able to clear this error.\ntype ErrConflict interface {\n\tConflict()\n}\n\n// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action\ntype ErrUnauthorized interface {\n\tUnauthorized()\n}\n\n// ErrUnavailable signals that the requested action/subsystem is not available.\ntype ErrUnavailable interface {\n\tUnavailable()\n}\n\n// ErrForbidden signals that the requested action cannot be performed under any circumstances.\n// When a ErrForbidden is returned, the caller should never retry the action.\ntype ErrForbidden interface {\n\tForbidden()\n}\n\n// ErrSystem signals that some internal error occurred.\n// An example of this would be a failed mount request.\ntype ErrSystem interface {\n\tSystem()\n}\n\n// ErrNotModified signals that an action can't be performed because it's already in the desired state\ntype ErrNotModified interface {\n\tNotModified()\n}\n\n// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured.\ntype ErrNotImplemented interface {\n\tNotImplemented()\n}\n\n// ErrUnknown signals that the kind of error that occurred is not known.\ntype ErrUnknown interface {\n\tUnknown()\n}\n\n// ErrCancelled signals that the action was cancelled.\ntype ErrCancelled interface {\n\tCancelled()\n}\n\n// ErrDeadline signals that the deadline was reached before the action completed.\ntype ErrDeadline interface {\n\tDeadlineExceeded()\n}\n\n// ErrDataLoss indicates that data was lost or there is data corruption.\ntype ErrDataLoss interface {\n\tDataLoss()\n}\n"
  },
  {
    "path": "agent/errdefs/doc.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\n// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors.\n// Errors that cross the package boundary should implement one (and only one) of these interfaces.\n//\n// Packages should not reference these interfaces directly, only implement them.\n// To check if a particular error implements one of these interfaces, there are helper\n// functions provided (e.g. `Is<SomeError>`) which can be used rather than asserting the interfaces directly.\n// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`).\npackage errdefs // import \"github.com/docker/docker/errdefs\"\n"
  },
  {
    "path": "agent/errdefs/helpers.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage errdefs // import \"github.com/docker/docker/errdefs\"\n\nimport \"context\"\n\ntype errNotFound struct{ error }\n\nfunc (errNotFound) NotFound() {}\n\nfunc (e errNotFound) Cause() error {\n\treturn e.error\n}\n\nfunc (e errNotFound) Unwrap() error {\n\treturn e.error\n}\n\n// NotFound is a helper to create an error of the class with the same name from any error type\nfunc NotFound(err error) error {\n\tif err == nil || IsNotFound(err) {\n\t\treturn err\n\t}\n\treturn errNotFound{err}\n}\n\ntype errInvalidParameter struct{ error }\n\nfunc (errInvalidParameter) InvalidParameter() {}\n\nfunc (e errInvalidParameter) Cause() error {\n\treturn e.error\n}\n\nfunc (e errInvalidParameter) Unwrap() error {\n\treturn e.error\n}\n\n// InvalidParameter is a helper to create an error of the class with the same name from any error type\nfunc InvalidParameter(err error) error {\n\tif err == nil || IsInvalidParameter(err) {\n\t\treturn err\n\t}\n\treturn errInvalidParameter{err}\n}\n\ntype errConflict struct{ error }\n\nfunc (errConflict) Conflict() {}\n\nfunc (e errConflict) Cause() error {\n\treturn e.error\n}\n\nfunc (e errConflict) Unwrap() error {\n\treturn e.error\n}\n\n// Conflict is a helper to create an error of the class with the same name from any error type\nfunc Conflict(err error) error {\n\tif err == nil || IsConflict(err) {\n\t\treturn err\n\t}\n\treturn errConflict{err}\n}\n\ntype errUnauthorized struct{ error }\n\nfunc (errUnauthorized) Unauthorized() {}\n\nfunc (e errUnauthorized) Cause() error {\n\treturn e.error\n}\n\nfunc (e errUnauthorized) Unwrap() error {\n\treturn e.error\n}\n\n// Unauthorized is a helper to create an error of the class with the same name from any error type\nfunc Unauthorized(err error) error {\n\tif err == nil || IsUnauthorized(err) {\n\t\treturn err\n\t}\n\treturn errUnauthorized{err}\n}\n\ntype errUnavailable struct{ error }\n\nfunc (errUnavailable) Unavailable() {}\n\nfunc (e errUnavailable) Cause() error {\n\treturn e.error\n}\n\nfunc (e errUnavailable) Unwrap() error {\n\treturn e.error\n}\n\n// Unavailable is a helper to create an error of the class with the same name from any error type\nfunc Unavailable(err error) error {\n\tif err == nil || IsUnavailable(err) {\n\t\treturn err\n\t}\n\treturn errUnavailable{err}\n}\n\ntype errForbidden struct{ error }\n\nfunc (errForbidden) Forbidden() {}\n\nfunc (e errForbidden) Cause() error {\n\treturn e.error\n}\n\nfunc (e errForbidden) Unwrap() error {\n\treturn e.error\n}\n\n// Forbidden is a helper to create an error of the class with the same name from any error type\nfunc Forbidden(err error) error {\n\tif err == nil || IsForbidden(err) {\n\t\treturn err\n\t}\n\treturn errForbidden{err}\n}\n\ntype errSystem struct{ error }\n\nfunc (errSystem) System() {}\n\nfunc (e errSystem) Cause() error {\n\treturn e.error\n}\n\nfunc (e errSystem) Unwrap() error {\n\treturn e.error\n}\n\n// System is a helper to create an error of the class with the same name from any error type\nfunc System(err error) error {\n\tif err == nil || IsSystem(err) {\n\t\treturn err\n\t}\n\treturn errSystem{err}\n}\n\ntype errNotModified struct{ error }\n\nfunc (errNotModified) NotModified() {}\n\nfunc (e errNotModified) Cause() error {\n\treturn e.error\n}\n\nfunc (e errNotModified) Unwrap() error {\n\treturn e.error\n}\n\n// NotModified is a helper to create an error of the class with the same name from any error type\nfunc NotModified(err error) error {\n\tif err == nil || IsNotModified(err) {\n\t\treturn err\n\t}\n\treturn errNotModified{err}\n}\n\ntype errNotImplemented struct{ error }\n\nfunc (errNotImplemented) NotImplemented() {}\n\nfunc (e errNotImplemented) Cause() error {\n\treturn e.error\n}\n\nfunc (e errNotImplemented) Unwrap() error {\n\treturn e.error\n}\n\n// NotImplemented is a helper to create an error of the class with the same name from any error type\nfunc NotImplemented(err error) error {\n\tif err == nil || IsNotImplemented(err) {\n\t\treturn err\n\t}\n\treturn errNotImplemented{err}\n}\n\ntype errUnknown struct{ error }\n\nfunc (errUnknown) Unknown() {}\n\nfunc (e errUnknown) Cause() error {\n\treturn e.error\n}\n\nfunc (e errUnknown) Unwrap() error {\n\treturn e.error\n}\n\n// Unknown is a helper to create an error of the class with the same name from any error type\nfunc Unknown(err error) error {\n\tif err == nil || IsUnknown(err) {\n\t\treturn err\n\t}\n\treturn errUnknown{err}\n}\n\ntype errCancelled struct{ error }\n\nfunc (errCancelled) Cancelled() {}\n\nfunc (e errCancelled) Cause() error {\n\treturn e.error\n}\n\nfunc (e errCancelled) Unwrap() error {\n\treturn e.error\n}\n\n// Cancelled is a helper to create an error of the class with the same name from any error type\nfunc Cancelled(err error) error {\n\tif err == nil || IsCancelled(err) {\n\t\treturn err\n\t}\n\treturn errCancelled{err}\n}\n\ntype errDeadline struct{ error }\n\nfunc (errDeadline) DeadlineExceeded() {}\n\nfunc (e errDeadline) Cause() error {\n\treturn e.error\n}\n\nfunc (e errDeadline) Unwrap() error {\n\treturn e.error\n}\n\n// Deadline is a helper to create an error of the class with the same name from any error type\nfunc Deadline(err error) error {\n\tif err == nil || IsDeadline(err) {\n\t\treturn err\n\t}\n\treturn errDeadline{err}\n}\n\ntype errDataLoss struct{ error }\n\nfunc (errDataLoss) DataLoss() {}\n\nfunc (e errDataLoss) Cause() error {\n\treturn e.error\n}\n\nfunc (e errDataLoss) Unwrap() error {\n\treturn e.error\n}\n\n// DataLoss is a helper to create an error of the class with the same name from any error type\nfunc DataLoss(err error) error {\n\tif err == nil || IsDataLoss(err) {\n\t\treturn err\n\t}\n\treturn errDataLoss{err}\n}\n\n// FromContext returns the error class from the passed in context\nfunc FromContext(ctx context.Context) error {\n\te := ctx.Err()\n\tif e == nil {\n\t\treturn nil\n\t}\n\n\tif e == context.Canceled {\n\t\treturn Cancelled(e)\n\t}\n\tif e == context.DeadlineExceeded {\n\t\treturn Deadline(e)\n\t}\n\treturn Unknown(e)\n}\n"
  },
  {
    "path": "agent/errdefs/http_helpers.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage errdefs // import \"github.com/docker/docker/errdefs\"\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\n// FromStatusCode creates an errdef error, based on the provided HTTP status-code\nfunc FromStatusCode(err error, statusCode int) error {\n\tif err == nil {\n\t\treturn err\n\t}\n\tswitch statusCode {\n\tcase http.StatusNotFound:\n\t\terr = NotFound(err)\n\tcase http.StatusBadRequest:\n\t\terr = InvalidParameter(err)\n\tcase http.StatusConflict:\n\t\terr = Conflict(err)\n\tcase http.StatusUnauthorized:\n\t\terr = Unauthorized(err)\n\tcase http.StatusServiceUnavailable:\n\t\terr = Unavailable(err)\n\tcase http.StatusForbidden:\n\t\terr = Forbidden(err)\n\tcase http.StatusNotModified:\n\t\terr = NotModified(err)\n\tcase http.StatusNotImplemented:\n\t\terr = NotImplemented(err)\n\tcase http.StatusInternalServerError:\n\t\tif !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) {\n\t\t\terr = System(err)\n\t\t}\n\tdefault:\n\t\tlogrus.WithError(err).WithFields(logrus.Fields{\n\t\t\t\"module\":      \"api\",\n\t\t\t\"status_code\": statusCode,\n\t\t}).Debug(\"FIXME: Got an status-code for which error does not match any expected type!!!\")\n\n\t\tswitch {\n\t\tcase statusCode >= 200 && statusCode < 400:\n\t\t\t// it's a client error\n\t\tcase statusCode >= 400 && statusCode < 500:\n\t\t\terr = InvalidParameter(err)\n\t\tcase statusCode >= 500 && statusCode < 600:\n\t\t\terr = System(err)\n\t\tdefault:\n\t\t\terr = Unknown(err)\n\t\t}\n\t}\n\treturn err\n}\n"
  },
  {
    "path": "agent/errdefs/is.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage errdefs // import \"github.com/docker/docker/errdefs\"\n\ntype causer interface {\n\tCause() error\n}\n\nfunc getImplementer(err error) error {\n\tswitch e := err.(type) {\n\tcase\n\t\tErrNotFound,\n\t\tErrInvalidParameter,\n\t\tErrConflict,\n\t\tErrUnauthorized,\n\t\tErrUnavailable,\n\t\tErrForbidden,\n\t\tErrSystem,\n\t\tErrNotModified,\n\t\tErrNotImplemented,\n\t\tErrCancelled,\n\t\tErrDeadline,\n\t\tErrDataLoss,\n\t\tErrUnknown:\n\t\treturn err\n\tcase causer:\n\t\treturn getImplementer(e.Cause())\n\tdefault:\n\t\treturn err\n\t}\n}\n\n// IsNotFound returns if the passed in error is an ErrNotFound\nfunc IsNotFound(err error) bool {\n\t_, ok := getImplementer(err).(ErrNotFound)\n\treturn ok\n}\n\n// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter\nfunc IsInvalidParameter(err error) bool {\n\t_, ok := getImplementer(err).(ErrInvalidParameter)\n\treturn ok\n}\n\n// IsConflict returns if the passed in error is an ErrConflict\nfunc IsConflict(err error) bool {\n\t_, ok := getImplementer(err).(ErrConflict)\n\treturn ok\n}\n\n// IsUnauthorized returns if the passed in error is an ErrUnauthorized\nfunc IsUnauthorized(err error) bool {\n\t_, ok := getImplementer(err).(ErrUnauthorized)\n\treturn ok\n}\n\n// IsUnavailable returns if the passed in error is an ErrUnavailable\nfunc IsUnavailable(err error) bool {\n\t_, ok := getImplementer(err).(ErrUnavailable)\n\treturn ok\n}\n\n// IsForbidden returns if the passed in error is an ErrForbidden\nfunc IsForbidden(err error) bool {\n\t_, ok := getImplementer(err).(ErrForbidden)\n\treturn ok\n}\n\n// IsSystem returns if the passed in error is an ErrSystem\nfunc IsSystem(err error) bool {\n\t_, ok := getImplementer(err).(ErrSystem)\n\treturn ok\n}\n\n// IsNotModified returns if the passed in error is a NotModified error\nfunc IsNotModified(err error) bool {\n\t_, ok := getImplementer(err).(ErrNotModified)\n\treturn ok\n}\n\n// IsNotImplemented returns if the passed in error is an ErrNotImplemented\nfunc IsNotImplemented(err error) bool {\n\t_, ok := getImplementer(err).(ErrNotImplemented)\n\treturn ok\n}\n\n// IsUnknown returns if the passed in error is an ErrUnknown\nfunc IsUnknown(err error) bool {\n\t_, ok := getImplementer(err).(ErrUnknown)\n\treturn ok\n}\n\n// IsCancelled returns if the passed in error is an ErrCancelled\nfunc IsCancelled(err error) bool {\n\t_, ok := getImplementer(err).(ErrCancelled)\n\treturn ok\n}\n\n// IsDeadline returns if the passed in error is an ErrDeadline\nfunc IsDeadline(err error) bool {\n\t_, ok := getImplementer(err).(ErrDeadline)\n\treturn ok\n}\n\n// IsDataLoss returns if the passed in error is an ErrDataLoss\nfunc IsDataLoss(err error) bool {\n\t_, ok := getImplementer(err).(ErrDataLoss)\n\treturn ok\n}\n"
  },
  {
    "path": "agent/pkg/app/config.go",
    "content": "package app\n\nimport (\n\t\"github.com/urfave/cli/v2\"\n\n\t\"github.com/tensorchord/openmodelz/agent/pkg/config\"\n)\n\nfunc configFromCLI(c *cli.Context) config.Config {\n\tcfg := config.New()\n\n\t// server\n\tcfg.Server.Dev = c.Bool(flagDev)\n\tcfg.Server.ServerPort = c.Int(flagServerPort)\n\tcfg.Server.ReadTimeout = c.Duration(flagServerReadTimeout)\n\tcfg.Server.WriteTimeout = c.Duration(flagServerWriteTimeout)\n\n\t// kubernetes\n\tcfg.KubeConfig.Kubeconfig = c.String(flagKubeConfig)\n\tcfg.KubeConfig.MasterURL = c.String(flagMasterURL)\n\tcfg.KubeConfig.QPS = c.Int(flagQPS)\n\tcfg.KubeConfig.Burst = c.Int(flagBurst)\n\tcfg.KubeConfig.ResyncPeriod = c.Duration(flagResyncPeriod)\n\n\t// inference ingress\n\tcfg.Ingress.IngressEnabled = c.Bool(flagIngressEnabled)\n\tcfg.Ingress.Domain = c.String(flagIngressDomain)\n\tcfg.Ingress.AnyIPToDomain = c.Bool(flagIngressAnyIPToDomain)\n\tcfg.Ingress.Namespace = c.String(flagIngressNamespace)\n\tcfg.Ingress.TLSEnabled = c.Bool(flagIngressTLSEnabled)\n\n\t// inference\n\tcfg.Inference.LogTimeout = c.Duration(flagInferenceLogTimeout)\n\tcfg.Inference.CacheTTL = c.Duration(flagInferenceCacheTTL)\n\n\t// build\n\tcfg.Build.BuildEnabled = c.Bool(flagBuildEnabled)\n\tcfg.Build.BuilderImage = c.String(flagBuilderImage)\n\tcfg.Build.BuildkitdAddress = c.String(flagBuildkitdAddress)\n\tcfg.Build.BuildCtlBin = c.String(flagBuildCtlBin)\n\tcfg.Build.BuildRegistry = c.String(flagBuildRegistry)\n\tcfg.Build.BuildRegistryToken = c.String(flagBuildRegistryToken)\n\tcfg.Build.BuildImagePullSecret = c.String(flagBuildImagePullSecret)\n\n\t// loki\n\tcfg.Logs.Timeout = c.Duration(flagLogsTimeout)\n\tcfg.Logs.LokiURL = c.String(flagLogsLokiURL)\n\tcfg.Logs.LokiUser = c.String(flaglogsLokiUser)\n\tcfg.Logs.LokiToken = c.String(flagLogsLokiToken)\n\n\t// metrics\n\tcfg.Metrics.PollingInterval = c.Duration(flagMetricsPollingInterval)\n\tcfg.Metrics.ServerPort = c.Int(flagMetricsPort)\n\tcfg.Metrics.PrometheusHost = c.String(flagMetricsPrometheusHost)\n\tcfg.Metrics.PrometheusPort = c.Int(flagMetricsPrometheusPort)\n\n\t// modelz cloud\n\tcfg.ModelZCloud.Enabled = c.Bool(flagModelZCloudEnabled)\n\tcfg.ModelZCloud.URL = c.String(flagModelZCloudURL)\n\tcfg.ModelZCloud.AgentToken = c.String(flagModelZCloudAgentToken)\n\tcfg.ModelZCloud.HeartbeatInterval = c.Duration(flagModelZCloudAgentHeartbeatInterval)\n\tcfg.ModelZCloud.Region = c.String(flagModelZCloudRegion)\n\tcfg.ModelZCloud.UnifiedAPIKey = c.String(flagModelZCloudUnifiedAPIKey)\n\tcfg.ModelZCloud.UpstreamTimeout = c.Duration(flagModelZCloudUpstreamTimeout)\n\tcfg.ModelZCloud.MaxIdleConnections = c.Int(flagModelZCloudMaxIdleConnections)\n\tcfg.ModelZCloud.MaxIdleConnectionsPerHost = c.Int(flagModelZCloudMaxIdleConnectionsPerHost)\n\tcfg.ModelZCloud.EventEnabled = c.Bool(flagModelZCloudEventEnabled)\n\treturn cfg\n}\n"
  },
  {
    "path": "agent/pkg/app/root.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage app\n\nimport (\n\t\"time\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/sirupsen/logrus\"\n\tcli \"github.com/urfave/cli/v2\"\n\n\t\"github.com/tensorchord/openmodelz/agent/pkg/server\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/version\"\n)\n\nconst (\n\tflagDebug = \"debug\"\n\tflagDev   = \"dev\"\n\n\t// server\n\tflagServerPort         = \"server-port\"\n\tflagServerReadTimeout  = \"server-read-timeout\"\n\tflagServerWriteTimeout = \"server-write-timeout\"\n\n\t// kubernetes\n\tflagMasterURL    = \"master-url\"\n\tflagKubeConfig   = \"kube-config\"\n\tflagQPS          = \"kube-qps\"\n\tflagBurst        = \"kube-burst\"\n\tflagResyncPeriod = \"kube-resync-period\"\n\n\t// inference ingress\n\tflagIngressEnabled       = \"ingress-enabled\"\n\tflagIngressDomain        = \"ingress-domain\"\n\tflagIngressNamespace     = \"ingress-namespace\"\n\tflagIngressAnyIPToDomain = \"ingress-any-ip-to-domain\"\n\tflagIngressTLSEnabled    = \"ingress-tls-enabled\"\n\n\t// inference\n\tflagInferenceLogTimeout = \"inference-log-timeout\"\n\tflagInferenceCacheTTL   = \"inference-cache-ttl\"\n\n\t// build\n\tflagBuildEnabled         = \"build-enabled\"\n\tflagBuilderImage         = \"builder-image\"\n\tflagBuildkitdAddress     = \"buildkitd-address\"\n\tflagBuildCtlBin          = \"buildctl-bin\"\n\tflagBuildRegistry        = \"build-registry\"\n\tflagBuildRegistryToken   = \"build-registry-token\"\n\tflagBuildImagePullSecret = \"build-image-pull-secret\"\n\n\t// metrics\n\tflagMetricsPollingInterval = \"metrics-polling-interval\"\n\tflagMetricsPort            = \"metrics-port\"\n\tflagMetricsPrometheusHost  = \"metrics-prometheus-host\"\n\tflagMetricsPrometheusPort  = \"metrics-prometheus-port\"\n\n\t// logs\n\tflagLogsTimeout   = \"logs-timeout\"\n\tflagLogsLokiURL   = \"logs-loki-url\"\n\tflaglogsLokiUser  = \"logs-loki-user\"\n\tflagLogsLokiToken = \"logs-loki-token\"\n\n\t// modelz cloud\n\tflagModelZCloudEnabled                   = \"modelz-cloud-enabled\"\n\tflagModelZCloudURL                       = \"modelz-cloud-url\"\n\tflagModelZCloudAgentToken                = \"modelz-cloud-agent-token\"\n\tflagModelZCloudAgentHeartbeatInterval    = \"modelz-cloud-agent-heartbeat-interval\"\n\tflagModelZCloudRegion                    = \"modelz-cloud-region\"\n\tflagModelZCloudUnifiedAPIKey             = \"modelz-cloud-unified-api-key\"\n\tflagModelZCloudUpstreamTimeout           = \"modelz-cloud-upstream-timeout\"\n\tflagModelZCloudMaxIdleConnections        = \"modelz-cloud-max-idle-connections\"\n\tflagModelZCloudMaxIdleConnectionsPerHost = \"modelz-cloud-max-idle-connections-per-host\"\n\tflagModelZCloudEventEnabled              = \"modelz-cloud-event-enabled\"\n)\n\ntype App struct {\n\t*cli.App\n}\n\nfunc New() App {\n\tinternalApp := cli.NewApp()\n\tinternalApp.EnableBashCompletion = true\n\tinternalApp.Name = \"modelz-agent\"\n\tinternalApp.Usage = \"Cluster agent for modelz\"\n\tinternalApp.HideHelpCommand = true\n\tinternalApp.HideVersion = false\n\tinternalApp.Version = version.GetVersion().String()\n\tinternalApp.Flags = []cli.Flag{\n\t\t&cli.BoolFlag{\n\t\t\tName:  flagDebug,\n\t\t\tUsage: \"enable debug output in logs\",\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName:  flagDev,\n\t\t\tUsage: \"enable development mode\",\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagServerPort,\n\t\t\tValue:   8080,\n\t\t\tUsage:   \"port to listen on\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_SERVER_PORT\"},\n\t\t\tAliases: []string{\"p\"},\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName: flagServerReadTimeout,\n\t\t\tUsage: \"maximum duration before timing out read of the request, \" +\n\t\t\t\t\"including the body\",\n\t\t\tValue:   305 * time.Second,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_SERVER_READ_TIMEOUT\"},\n\t\t\tAliases: []string{\"srt\"},\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName: flagServerWriteTimeout,\n\t\t\tUsage: \"maximum duration before timing out write of the response, \" +\n\t\t\t\t\"including the body\",\n\t\t\tValue:   305 * time.Second,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_SERVER_WRITE_TIMEOUT\"},\n\t\t\tAliases: []string{\"swt\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagMasterURL,\n\t\t\tUsage:   \"URL to master for kubernetes cluster\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_MASTER_URL\"},\n\t\t\tAliases: []string{\"mu\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagKubeConfig,\n\t\t\tUsage:   \"Path to kubeconfig file. If not provided, will use in-cluster config\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_KUBE_CONFIG\"},\n\t\t\tAliases: []string{\"kc\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagQPS,\n\t\t\tUsage:   \"QPS for kubernetes client\",\n\t\t\tValue:   100,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_KUBE_QPS\"},\n\t\t\tAliases: []string{\"kq\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagBurst,\n\t\t\tValue:   250,\n\t\t\tUsage:   \"Burst for kubernetes client\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_KUBE_BURST\"},\n\t\t\tAliases: []string{\"kb\"},\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName:    flagResyncPeriod,\n\t\t\tValue:   time.Hour,\n\t\t\tUsage:   \"Resync period for kubernetes client\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_KUBE_RESYNC_PERIOD\"},\n\t\t\tAliases: []string{\"kr\"},\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName: flagIngressEnabled,\n\t\t\tUsage: \"Enable inference ingress. \" +\n\t\t\t\t\"If enabled, the agent will create ingress for each inference\",\n\t\t\tValue:   false,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_INGRESS_ENABLED\"},\n\t\t\tAliases: []string{\"ie\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagIngressDomain,\n\t\t\tUsage:   \"Domain for inference ingress\",\n\t\t\tValue:   \"cloud.modelz.dev\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_INGRESS_DOMAIN\"},\n\t\t\tAliases: []string{\"id\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagIngressNamespace,\n\t\t\tUsage:   \"Namespace for inference ingress\",\n\t\t\tValue:   \"default\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_INGRESS_NAMESPACE\"},\n\t\t\tAliases: []string{\"in\"},\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName: flagIngressAnyIPToDomain,\n\t\t\tUsage: \"Enable any ip to domain. \" +\n\t\t\t\t\"If enabled, the agent will create ingress for each inference\",\n\t\t\tValue:   false,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_INGRESS_ANY_IP_TO_DOMAIN\"},\n\t\t\tAliases: []string{\"iad\"},\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName:    flagIngressTLSEnabled,\n\t\t\tUsage:   \"Enable TLS for inference ingress. \",\n\t\t\tValue:   true,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_INGRESS_TLS_ENABLED\"},\n\t\t\tAliases: []string{\"it\"},\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName: flagInferenceLogTimeout,\n\t\t\tUsage: \"Timeout for inference log streaming. \" +\n\t\t\t\t\"If the inference log has not been updated in this time, \" +\n\t\t\t\t\"the connection will be closed.\",\n\t\t\tValue:   time.Minute,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_INFERENCE_LOG_TIMEOUT\"},\n\t\t\tAliases: []string{\"ilt\"},\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName:    flagInferenceCacheTTL,\n\t\t\tUsage:   \"Time to live for inference cache. \",\n\t\t\tValue:   time.Millisecond * 500,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_INFERENCE_CACHE_TTL\"},\n\t\t\tAliases: []string{\"ict\"},\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName:   flagBuildEnabled,\n\t\t\tHidden: true,\n\t\t\tUsage: \"Enable model build. \" +\n\t\t\t\t\"If enabled, the agent will build inference server image\",\n\t\t\tValue:   false,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_BUILD_ENABLED\"},\n\t\t\tAliases: []string{\"be\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:   flagBuilderImage,\n\t\t\tHidden: true,\n\t\t\tUsage: \"Image to use for building models. \" +\n\t\t\t\t\"Must be a valid docker image reference.\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_BUILDER_IMAGE\"},\n\t\t\tAliases: []string{\"bi\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:   flagBuildkitdAddress,\n\t\t\tHidden: true,\n\t\t\tUsage: \"Address of buildkitd server. \" +\n\t\t\t\t\"Must be a valid tcp address.\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_BUILDKITD_ADDRESS\"},\n\t\t\tAliases: []string{\"ba\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:   flagBuildCtlBin,\n\t\t\tHidden: true,\n\t\t\tUsage: \"Path to buildctl binary. \" +\n\t\t\t\t\"Must be a valid path to a binary.\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_BUILDCTL_BIN\"},\n\t\t\tAliases: []string{\"bb\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagBuildRegistry,\n\t\t\tHidden:  true,\n\t\t\tUsage:   \"Registry to use for building models. \",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_BUILD_REGISTRY\"},\n\t\t\tAliases: []string{\"br\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagBuildRegistryToken,\n\t\t\tHidden:  true,\n\t\t\tUsage:   \"Token to use for building models. \",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_BUILD_REGISTRY_TOKEN\"},\n\t\t\tAliases: []string{\"bt\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagBuildImagePullSecret,\n\t\t\tHidden:  true,\n\t\t\tUsage:   \"Image pull secret to use for building models.\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_BUILD_IMAGE_PULL_SECRET\"},\n\t\t\tAliases: []string{\"bp\"},\n\t\t\tValue:   \"dockerhub-secret\",\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName:    flagMetricsPollingInterval,\n\t\t\tUsage:   \"Interval to poll metrics from kubernetes\",\n\t\t\tValue:   time.Second * 5,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_METRICS_POLLING_INTERVAL\"},\n\t\t\tAliases: []string{\"mpi\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagMetricsPort,\n\t\t\tUsage:   \"Port to expose metrics on. \",\n\t\t\tValue:   8082,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_METRICS_PORT\"},\n\t\t\tAliases: []string{\"mp\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagMetricsPrometheusHost,\n\t\t\tValue:   \"localhost\",\n\t\t\tUsage:   \"Host to expose prometheus metrics on. \",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_METRICS_PROMETHEUS_HOST\"},\n\t\t\tAliases: []string{\"mph\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagMetricsPrometheusPort,\n\t\t\tUsage:   \"Port to expose prometheus metrics on. \",\n\t\t\tValue:   9090,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_METRICS_PROMETHEUS_PORT\"},\n\t\t\tAliases: []string{\"mpp\"},\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName:    flagLogsTimeout,\n\t\t\tUsage:   \"request timeout to query the logs\",\n\t\t\tValue:   time.Second * 5,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_LOGS_TIMEOUT\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagLogsLokiURL,\n\t\t\tHidden:  true,\n\t\t\tUsage:   \"Loki service URL\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_LOGS_LOKI_URL\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flaglogsLokiUser,\n\t\t\tHidden:  true,\n\t\t\tUsage:   \"Loki service auth user\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_LOGS_LOKI_USER\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagLogsLokiToken,\n\t\t\tHidden:  true,\n\t\t\tUsage:   \"Loki service auth token\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_LOGS_LOKI_TOKEN\"},\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName:    flagModelZCloudEnabled,\n\t\t\tUsage:   \"Enable modelz cloud, agent as modelz cloud agent\",\n\t\t\tValue:   false,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_MODELZ_CLOUD_ENABLED\"},\n\t\t\tAliases: []string{\"mzc\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagModelZCloudURL,\n\t\t\tUsage:   \"Modelz cloud URL\",\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_MODELZ_CLOUD_URL\"},\n\t\t\tAliases: []string{\"mzu\"},\n\t\t\tValue:   \"https://cloud.modelz.ai\",\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagModelZCloudAgentToken,\n\t\t\tUsage:   \"Modelz cloud agent token\",\n\t\t\tEnvVars: []string{\"MODELZ_CLOUD_AGENT_TOKEN\"},\n\t\t\tAliases: []string{\"mzt\"},\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName:    flagModelZCloudAgentHeartbeatInterval,\n\t\t\tUsage:   \"Modelz cloud agent heartbeat interval\",\n\t\t\tEnvVars: []string{\"MODELZ_CLOUD_AGENT_HEARTBEAT_INTERVAL\"},\n\t\t\tAliases: []string{\"mzh\"},\n\t\t\tValue:   time.Minute * 1,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagModelZCloudRegion,\n\t\t\tUsage:   \"Modelz cloud agent region\",\n\t\t\tEnvVars: []string{\"MODELZ_CLOUD_AGENT_REGION\"},\n\t\t\tAliases: []string{\"mzr\"},\n\t\t\tValue:   \"us-central1\",\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagModelZCloudUnifiedAPIKey,\n\t\t\tUsage:   \"Modelz cloud agent unified api key\",\n\t\t\tEnvVars: []string{\"MODELZ_CLOUD_AGENT_UNIFIED_API_KEY\"},\n\t\t\tAliases: []string{\"mzua\"},\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName:    flagModelZCloudUpstreamTimeout,\n\t\t\tUsage:   \"upstream timeout\",\n\t\t\tEnvVars: []string{\"MODELZ_UPSTREAM_TIMEOUT\"},\n\t\t\tAliases: []string{\"ut\"},\n\t\t\tValue:   300 * time.Second,\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagModelZCloudMaxIdleConnections,\n\t\t\tUsage:   \"max idle connections\",\n\t\t\tEnvVars: []string{\"MODELZ_MAX_IDLE_CONNECTIONS\"},\n\t\t\tAliases: []string{\"mic\"},\n\t\t\tValue:   1024,\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagModelZCloudMaxIdleConnectionsPerHost,\n\t\t\tUsage:   \"max idle connections per host\",\n\t\t\tEnvVars: []string{\"MODELZ_MAX_IDLE_CONNECTIONS_PER_HOST\"},\n\t\t\tAliases: []string{\"mich\"},\n\t\t\tValue:   1024,\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName:    flagModelZCloudEventEnabled,\n\t\t\tUsage:   \"Enable event logging for modelz cloud.\",\n\t\t\tValue:   false,\n\t\t\tEnvVars: []string{\"MODELZ_AGENT_MODELZ_CLOUD_EVENT_ENABLED\"},\n\t\t\tAliases: []string{\"mze\"},\n\t\t},\n\t}\n\tinternalApp.Action = runServer\n\n\t// Deal with debug flag.\n\tvar debugEnabled bool\n\n\tinternalApp.Before = func(context *cli.Context) error {\n\t\tdebugEnabled = context.Bool(flagDebug)\n\n\t\tif debugEnabled {\n\t\t\tlogrus.SetReportCaller(true)\n\t\t\tlogrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true})\n\t\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t\t\tgin.SetMode(gin.DebugMode)\n\t\t} else {\n\t\t\tlogrus.SetFormatter(&logrus.JSONFormatter{})\n\t\t}\n\n\t\treturn nil\n\t}\n\treturn App{\n\t\tApp: internalApp,\n\t}\n}\n\nfunc runServer(clicontext *cli.Context) error {\n\tc := configFromCLI(clicontext)\n\n\tif clicontext.Bool(flagDebug) {\n\t\tlogrus.Debug(\"debug mode enabled\")\n\t\tcfgString, _ := c.GetString()\n\t\tlogrus.WithField(\"config\", cfgString).Debug(\"config\")\n\t}\n\n\tif err := c.Validate(); err != nil {\n\t\tif clicontext.Bool(flagDebug) {\n\t\t\tlogrus.WithError(err).Error(\"invalid config\")\n\t\t} else {\n\t\t\treturn errors.Wrap(err, \"invalid config\")\n\t\t}\n\t}\n\n\ts, err := server.New(c)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create server\")\n\t}\n\n\treturn s.Run()\n}\n"
  },
  {
    "path": "agent/pkg/config/config.go",
    "content": "package config\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"time\"\n)\n\ntype Config struct {\n\tServer      ServerConfig      `json:\"server,omitempty\"`\n\tKubeConfig  KubeConfig        `json:\"kube_config,omitempty\"`\n\tIngress     IngressConfig     `json:\"ingress,omitempty\"`\n\tInference   InferenceConfig   `json:\"inference,omitempty\"`\n\tBuild       BuildConfig       `json:\"build,omitempty\"`\n\tMetrics     MetricsConfig     `json:\"metrics,omitempty\"`\n\tLogs        LogsConfig        `json:\"logs,omitempty\"`\n\tModelZCloud ModelZCloudConfig `json:\"modelz_cloud,omitempty\"`\n}\n\ntype ModelZCloudConfig struct {\n\tEnabled bool `json:\"enabled,omitempty\"`\n\t// URL of apiserver\n\tURL                       string            `json:\"url,omitempty\"`\n\tAgentToken                string            `json:\"agent_token,omitempty\"`\n\tHeartbeatInterval         time.Duration     `json:\"heartbeat_interval,omitempty\"`\n\tID                        string            `json:\"id,omitempty\"`\n\tName                      string            `json:\"name,omitempty\"`\n\tTokenID                   string            `json:\"token_id,omitempty\"`\n\tRegion                    string            `json:\"region,omitempty\"`\n\tAPIKeys                   map[string]string `json:\"api_keys,omitempty\"`\n\tUserNamespaces            []string          `json:\"user_namespaces,omitempty\"`\n\tUnifiedAPIKey             string            `json:\"unified_api_key,omitempty\"`\n\tUpstreamTimeout           time.Duration     `json:\"upstream_timeout,omitempty\"`\n\tMaxIdleConnections        int               `json:\"max_idle_connections,omitempty\"`\n\tMaxIdleConnectionsPerHost int               `json:\"max_idle_connections_per_host,omitempty\"`\n\tEventEnabled              bool              `json:\"event_enabled,omitempty\"`\n}\n\ntype LogsConfig struct {\n\tTimeout   time.Duration `json:\"timeout,omitempty\"`\n\tLokiURL   string        `json:\"loki_url,omitempty\"`\n\tLokiUser  string        `json:\"loki_user,omitempty\"`\n\tLokiToken string        `json:\"loki_token,omitempty\"`\n}\n\ntype ServerConfig struct {\n\tDev          bool          `json:\"dev,omitempty\"`\n\tServerPort   int           `json:\"server_port,omitempty\"`\n\tReadTimeout  time.Duration `json:\"read_timeout,omitempty\"`\n\tWriteTimeout time.Duration `json:\"write_timeout,omitempty\"`\n}\n\ntype MetricsConfig struct {\n\tPollingInterval time.Duration `json:\"polling_interval,omitempty\"`\n\tServerPort      int           `json:\"server_port,omitempty\"`\n\tPrometheusPort  int           `json:\"prometheus_port,omitempty\"`\n\tPrometheusHost  string        `json:\"prometheus_host,omitempty\"`\n}\n\ntype BuildConfig struct {\n\tBuildEnabled         bool   `json:\"build_enabled,omitempty\"`\n\tBuilderImage         string `json:\"builder_image,omitempty\"`\n\tBuildkitdAddress     string `json:\"buildkitd_address,omitempty\"`\n\tBuildCtlBin          string `json:\"build_ctl_bin,omitempty\"`\n\tBuildRegistry        string `json:\"build_registry,omitempty\"`\n\tBuildRegistryToken   string `json:\"build_registry_token,omitempty\"`\n\tBuildImagePullSecret string `json:\"build_image_pull_secret,omitempty\"`\n}\n\ntype InferenceConfig struct {\n\tLogTimeout time.Duration `json:\"log_timeout,omitempty\"`\n\tCacheTTL   time.Duration `json:\"cache_ttl,omitempty\"`\n}\n\ntype IngressConfig struct {\n\tIngressEnabled bool   `json:\"ingress_enabled,omitempty\"`\n\tDomain         string `json:\"domain,omitempty\"`\n\tNamespace      string `json:\"namespace,omitempty\"`\n\tAnyIPToDomain  bool   `json:\"any_ip_to_domain,omitempty\"`\n\tTLSEnabled     bool   `json:\"tls_enabled,omitempty\"`\n}\n\ntype KubeConfig struct {\n\tKubeconfig   string        `json:\"kubeconfig,omitempty\"`\n\tMasterURL    string        `json:\"master_url,omitempty\"`\n\tQPS          int           `json:\"qps,omitempty\"`\n\tBurst        int           `json:\"burst,omitempty\"`\n\tResyncPeriod time.Duration `json:\"resync_period,omitempty\"`\n}\n\nfunc New() Config {\n\treturn Config{\n\t\tKubeConfig: KubeConfig{},\n\t\tIngress:    IngressConfig{},\n\t\tInference:  InferenceConfig{},\n\t\tBuild:      BuildConfig{},\n\t\tMetrics:    MetricsConfig{},\n\t\tLogs:       LogsConfig{},\n\t}\n}\n\nfunc (c Config) GetString() (string, error) {\n\tbytes, err := json.Marshal(c)\n\treturn string(bytes), err\n}\n\nfunc (c Config) Validate() error {\n\tif c.Server.ServerPort == 0 ||\n\t\tc.Server.ReadTimeout == 0 ||\n\t\tc.Server.WriteTimeout == 0 {\n\t\treturn errors.New(\"server config is required\")\n\t}\n\n\tif c.Inference.LogTimeout == 0 {\n\t\treturn errors.New(\"inference log timeout is required\")\n\t}\n\n\tif c.Build.BuildEnabled {\n\t\tif c.Build.BuildkitdAddress == \"\" ||\n\t\t\tc.Build.BuilderImage == \"\" ||\n\t\t\tc.Build.BuildRegistryToken == \"\" ||\n\t\t\tc.Build.BuildRegistry == \"\" ||\n\t\t\tc.Build.BuildCtlBin == \"\" ||\n\t\t\tc.Build.BuildImagePullSecret == \"\" {\n\t\t\treturn errors.New(\"build config is required\")\n\t\t}\n\t}\n\n\tif c.Metrics.ServerPort == 0 ||\n\t\tc.Metrics.PollingInterval == 0 ||\n\t\tc.Metrics.PrometheusHost == \"\" ||\n\t\tc.Metrics.PrometheusPort == 0 {\n\t\treturn errors.New(\"metrics config is required\")\n\t}\n\n\tif c.Ingress.IngressEnabled {\n\t\tif c.Ingress.Namespace == \"\" {\n\t\t\treturn errors.New(\"ingress namespace is required\")\n\t\t}\n\t\tif !c.Ingress.AnyIPToDomain && c.Ingress.Domain == \"\" {\n\t\t\treturn errors.New(\"ingress domain is required\")\n\t\t}\n\t}\n\n\tif c.ModelZCloud.Enabled {\n\t\tif c.ModelZCloud.URL == \"\" ||\n\t\t\tc.ModelZCloud.AgentToken == \"\" ||\n\t\t\tc.ModelZCloud.HeartbeatInterval == 0 {\n\t\t\treturn errors.New(\"modelz cloud config is required\")\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/consts/consts.go",
    "content": "package consts\n\nimport \"time\"\n\nconst (\n\tDomain        = \"modelz.live\"\n\tDefaultPrefix = \"modelz-\"\n\tAPIKEY_PREFIX = \"mzi-\"\n)\nconst DefaultAPIServerReadyTimeout = 15 * time.Minute\n"
  },
  {
    "path": "agent/pkg/docs/docs.go",
    "content": "// Package docs GENERATED BY SWAG; DO NOT EDIT\n// This file was generated by swaggo/swag\npackage docs\n\nimport \"github.com/swaggo/swag\"\n\nconst docTemplate = `{\n    \"schemes\": {{ marshal .Schemes }},\n    \"swagger\": \"2.0\",\n    \"info\": {\n        \"description\": \"{{escape .Description}}\",\n        \"title\": \"{{.Title}}\",\n        \"contact\": {\n            \"name\": \"modelz support\",\n            \"url\": \"https://github.com/tensorchord/openmodelz\",\n            \"email\": \"modelz-support@tensorchord.ai\"\n        },\n        \"version\": \"{{.Version}}\"\n    },\n    \"host\": \"{{.Host}}\",\n    \"basePath\": \"{{.BasePath}}\",\n    \"paths\": {\n        \"/gradio/{id}\": {\n            \"get\": {\n                \"description\": \"Reverse proxy to the backend gradio.\",\n                \"consumes\": [\n                    \"*/*\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Reverse proxy to the backend gradio.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Deployment ID\",\n                        \"name\": \"id\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"201\": {\n                        \"description\": \"Created\"\n                    }\n                }\n            },\n            \"post\": {\n                \"description\": \"Reverse proxy to the backend gradio.\",\n                \"consumes\": [\n                    \"*/*\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Reverse proxy to the backend gradio.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Deployment ID\",\n                        \"name\": \"id\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"201\": {\n                        \"description\": \"Created\"\n                    }\n                }\n            }\n        },\n        \"/healthz\": {\n            \"get\": {\n                \"description\": \"Healthz\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"system\"\n                ],\n                \"summary\": \"Healthz\",\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\"\n                    }\n                }\n            }\n        },\n        \"/inference/{name}\": {\n            \"get\": {\n                \"description\": \"Inference proxy.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference-proxy\"\n                ],\n                \"summary\": \"Inference.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"inference id\",\n                        \"name\": \"name\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\"\n                    },\n                    \"303\": {\n                        \"description\": \"See Other\"\n                    },\n                    \"400\": {\n                        \"description\": \"Bad Request\"\n                    },\n                    \"404\": {\n                        \"description\": \"Not Found\"\n                    },\n                    \"500\": {\n                        \"description\": \"Internal Server Error\"\n                    }\n                }\n            },\n            \"put\": {\n                \"description\": \"Inference proxy.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference-proxy\"\n                ],\n                \"summary\": \"Inference.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"inference id\",\n                        \"name\": \"name\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\"\n                    },\n                    \"303\": {\n                        \"description\": \"See Other\"\n                    },\n                    \"400\": {\n                        \"description\": \"Bad Request\"\n                    },\n                    \"404\": {\n                        \"description\": \"Not Found\"\n                    },\n                    \"500\": {\n                        \"description\": \"Internal Server Error\"\n                    }\n                }\n            },\n            \"post\": {\n                \"description\": \"Inference proxy.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference-proxy\"\n                ],\n                \"summary\": \"Inference.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"inference id\",\n                        \"name\": \"name\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\"\n                    },\n                    \"303\": {\n                        \"description\": \"See Other\"\n                    },\n                    \"400\": {\n                        \"description\": \"Bad Request\"\n                    },\n                    \"404\": {\n                        \"description\": \"Not Found\"\n                    },\n                    \"500\": {\n                        \"description\": \"Internal Server Error\"\n                    }\n                }\n            },\n            \"delete\": {\n                \"description\": \"Inference proxy.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference-proxy\"\n                ],\n                \"summary\": \"Inference.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"inference id\",\n                        \"name\": \"name\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\"\n                    },\n                    \"303\": {\n                        \"description\": \"See Other\"\n                    },\n                    \"400\": {\n                        \"description\": \"Bad Request\"\n                    },\n                    \"404\": {\n                        \"description\": \"Not Found\"\n                    },\n                    \"500\": {\n                        \"description\": \"Internal Server Error\"\n                    }\n                }\n            }\n        },\n        \"/mosec/{id}\": {\n            \"get\": {\n                \"description\": \"Proxy to the backend mosec.\",\n                \"consumes\": [\n                    \"*/*\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Proxy to the backend mosec.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Deployment ID\",\n                        \"name\": \"id\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"201\": {\n                        \"description\": \"Created\"\n                    }\n                }\n            }\n        },\n        \"/mosec/{id}/inference\": {\n            \"post\": {\n                \"description\": \"Proxy to the backend mosec.\",\n                \"consumes\": [\n                    \"*/*\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Proxy to the backend mosec.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Deployment ID\",\n                        \"name\": \"id\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"201\": {\n                        \"description\": \"Created\"\n                    }\n                }\n            }\n        },\n        \"/mosec/{id}/metrics\": {\n            \"get\": {\n                \"description\": \"Proxy to the backend mosec.\",\n                \"consumes\": [\n                    \"*/*\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Proxy to the backend mosec.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Deployment ID\",\n                        \"name\": \"id\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"201\": {\n                        \"description\": \"Created\"\n                    }\n                }\n            }\n        },\n        \"/other/{id}\": {\n            \"get\": {\n                \"description\": \"Reverse proxy to the backend other.\",\n                \"consumes\": [\n                    \"*/*\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Reverse proxy to the backend other.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Deployment ID\",\n                        \"name\": \"id\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"201\": {\n                        \"description\": \"Created\"\n                    }\n                }\n            },\n            \"post\": {\n                \"description\": \"Reverse proxy to the backend other.\",\n                \"consumes\": [\n                    \"*/*\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Reverse proxy to the backend other.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Deployment ID\",\n                        \"name\": \"id\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"201\": {\n                        \"description\": \"Created\"\n                    }\n                }\n            }\n        },\n        \"/streamlit/{id}\": {\n            \"get\": {\n                \"description\": \"Reverse proxy to streamlit.\",\n                \"consumes\": [\n                    \"*/*\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Reverse proxy to streamlit.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Deployment ID\",\n                        \"name\": \"id\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"201\": {\n                        \"description\": \"Created\"\n                    }\n                }\n            },\n            \"post\": {\n                \"description\": \"Reverse proxy to streamlit.\",\n                \"consumes\": [\n                    \"*/*\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Reverse proxy to streamlit.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Deployment ID\",\n                        \"name\": \"id\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"201\": {\n                        \"description\": \"Created\"\n                    }\n                }\n            }\n        },\n        \"/system/build\": {\n            \"get\": {\n                \"description\": \"List the builds.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"build\"\n                ],\n                \"summary\": \"List the builds.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Namespace\",\n                        \"name\": \"namespace\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"type\": \"array\",\n                            \"items\": {\n                                \"$ref\": \"#/definitions/types.Build\"\n                            }\n                        }\n                    }\n                }\n            },\n            \"post\": {\n                \"description\": \"Create the build.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"build\"\n                ],\n                \"summary\": \"Create the build.\",\n                \"parameters\": [\n                    {\n                        \"description\": \"build\",\n                        \"name\": \"body\",\n                        \"in\": \"body\",\n                        \"required\": true,\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.Build\"\n                        }\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.Build\"\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/build/{name}\": {\n            \"get\": {\n                \"description\": \"Get the build by name.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"build\"\n                ],\n                \"summary\": \"Get the build by name.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Namespace\",\n                        \"name\": \"namespace\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"inference id\",\n                        \"name\": \"name\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.Build\"\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/image-cache\": {\n            \"post\": {\n                \"description\": \"Create the image cache.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"image-cache\"\n                ],\n                \"summary\": \"Create the image cache.\",\n                \"parameters\": [\n                    {\n                        \"description\": \"image-cache\",\n                        \"name\": \"body\",\n                        \"in\": \"body\",\n                        \"required\": true,\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.ImageCache\"\n                        }\n                    }\n                ],\n                \"responses\": {\n                    \"201\": {\n                        \"description\": \"Created\",\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.ImageCache\"\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/inference/{name}\": {\n            \"get\": {\n                \"description\": \"Get the inference by name.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Get the inference by name.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Namespace\",\n                        \"name\": \"namespace\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"inference id\",\n                        \"name\": \"name\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.InferenceDeployment\"\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/inference/{name}/instance/{instance}\": {\n            \"post\": {\n                \"description\": \"Attach to the inference instance.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Attach to the inference instance.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Namespace\",\n                        \"name\": \"namespace\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Name\",\n                        \"name\": \"name\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Instance name\",\n                        \"name\": \"instance\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"type\": \"array\",\n                            \"items\": {\n                                \"$ref\": \"#/definitions/types.InferenceDeployment\"\n                            }\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/inference/{name}/instances\": {\n            \"get\": {\n                \"description\": \"List the inference instances.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"List the inference instances.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Namespace\",\n                        \"name\": \"namespace\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Name\",\n                        \"name\": \"name\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"type\": \"array\",\n                            \"items\": {\n                                \"$ref\": \"#/definitions/types.InferenceDeployment\"\n                            }\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/inferences\": {\n            \"get\": {\n                \"description\": \"List the inferences.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"List the inferences.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Namespace\",\n                        \"name\": \"namespace\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"type\": \"array\",\n                            \"items\": {\n                                \"$ref\": \"#/definitions/types.InferenceDeployment\"\n                            }\n                        }\n                    }\n                }\n            },\n            \"put\": {\n                \"description\": \"Update the inferences.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Update the inferences.\",\n                \"parameters\": [\n                    {\n                        \"description\": \"query params\",\n                        \"name\": \"request\",\n                        \"in\": \"body\",\n                        \"required\": true,\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.InferenceDeployment\"\n                        }\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Namespace\",\n                        \"name\": \"namespace\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"202\": {\n                        \"description\": \"Accepted\",\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.InferenceDeployment\"\n                        }\n                    }\n                }\n            },\n            \"post\": {\n                \"description\": \"Create the inferences.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Create the inferences.\",\n                \"parameters\": [\n                    {\n                        \"description\": \"query params\",\n                        \"name\": \"request\",\n                        \"in\": \"body\",\n                        \"required\": true,\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.InferenceDeployment\"\n                        }\n                    }\n                ],\n                \"responses\": {\n                    \"201\": {\n                        \"description\": \"Created\",\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.InferenceDeployment\"\n                        }\n                    }\n                }\n            },\n            \"delete\": {\n                \"description\": \"Delete the inferences.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Delete the inferences.\",\n                \"parameters\": [\n                    {\n                        \"description\": \"query params\",\n                        \"name\": \"request\",\n                        \"in\": \"body\",\n                        \"required\": true,\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.DeleteFunctionRequest\"\n                        }\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Namespace\",\n                        \"name\": \"namespace\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"202\": {\n                        \"description\": \"Accepted\",\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.DeleteFunctionRequest\"\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/info\": {\n            \"get\": {\n                \"description\": \"Get system info.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"system\"\n                ],\n                \"summary\": \"Get system info.\",\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.ProviderInfo\"\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/logs/build\": {\n            \"get\": {\n                \"description\": \"Get the build logs.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"log\"\n                ],\n                \"summary\": \"Get the build logs.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Namespace\",\n                        \"name\": \"namespace\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Build Name\",\n                        \"name\": \"name\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Instance\",\n                        \"name\": \"instance\",\n                        \"in\": \"query\"\n                    },\n                    {\n                        \"type\": \"integer\",\n                        \"description\": \"Tail\",\n                        \"name\": \"tail\",\n                        \"in\": \"query\"\n                    },\n                    {\n                        \"type\": \"boolean\",\n                        \"description\": \"Follow\",\n                        \"name\": \"follow\",\n                        \"in\": \"query\"\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Since\",\n                        \"name\": \"since\",\n                        \"in\": \"query\"\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"type\": \"array\",\n                            \"items\": {\n                                \"$ref\": \"#/definitions/types.Message\"\n                            }\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/logs/inference\": {\n            \"get\": {\n                \"description\": \"Get the inference logs.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"log\"\n                ],\n                \"summary\": \"Get the inference logs.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Namespace\",\n                        \"name\": \"namespace\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Name\",\n                        \"name\": \"name\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Instance\",\n                        \"name\": \"instance\",\n                        \"in\": \"query\"\n                    },\n                    {\n                        \"type\": \"integer\",\n                        \"description\": \"Tail\",\n                        \"name\": \"tail\",\n                        \"in\": \"query\"\n                    },\n                    {\n                        \"type\": \"boolean\",\n                        \"description\": \"Follow\",\n                        \"name\": \"follow\",\n                        \"in\": \"query\"\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Since\",\n                        \"name\": \"since\",\n                        \"in\": \"query\"\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"End\",\n                        \"name\": \"end\",\n                        \"in\": \"query\"\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"type\": \"array\",\n                            \"items\": {\n                                \"$ref\": \"#/definitions/types.Message\"\n                            }\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/namespaces\": {\n            \"get\": {\n                \"description\": \"List the namespaces.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"namespace\"\n                ],\n                \"summary\": \"List the namespaces.\",\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"type\": \"array\",\n                            \"items\": {\n                                \"type\": \"string\"\n                            }\n                        }\n                    }\n                }\n            },\n            \"post\": {\n                \"description\": \"Create the namespace.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"namespace\"\n                ],\n                \"summary\": \"Create the namespace.\",\n                \"parameters\": [\n                    {\n                        \"description\": \"Namespace name\",\n                        \"name\": \"body\",\n                        \"in\": \"body\",\n                        \"required\": true,\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.NamespaceRequest\"\n                        }\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.NamespaceRequest\"\n                        }\n                    }\n                }\n            },\n            \"delete\": {\n                \"description\": \"Delete the namespace.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"namespace\"\n                ],\n                \"summary\": \"Delete the namespace.\",\n                \"parameters\": [\n                    {\n                        \"description\": \"Namespace name\",\n                        \"name\": \"body\",\n                        \"in\": \"body\",\n                        \"required\": true,\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.NamespaceRequest\"\n                        }\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.NamespaceRequest\"\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/scale-inference\": {\n            \"post\": {\n                \"description\": \"Scale the inferences.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"inference\"\n                ],\n                \"summary\": \"Scale the inferences.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Namespace\",\n                        \"name\": \"namespace\",\n                        \"in\": \"query\",\n                        \"required\": true\n                    },\n                    {\n                        \"description\": \"query params\",\n                        \"name\": \"request\",\n                        \"in\": \"body\",\n                        \"required\": true,\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.ScaleServiceRequest\"\n                        }\n                    }\n                ],\n                \"responses\": {\n                    \"202\": {\n                        \"description\": \"Accepted\",\n                        \"schema\": {\n                            \"type\": \"array\",\n                            \"items\": {\n                                \"$ref\": \"#/definitions/types.ScaleServiceRequest\"\n                            }\n                        }\n                    },\n                    \"400\": {\n                        \"description\": \"Bad Request\"\n                    }\n                }\n            }\n        },\n        \"/system/server/{name}/delete\": {\n            \"delete\": {\n                \"description\": \"Delete a node.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"namespace\"\n                ],\n                \"summary\": \"Delete a node from the cluster.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Server Name\",\n                        \"name\": \"name\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\"\n                    }\n                }\n            }\n        },\n        \"/system/server/{name}/labels\": {\n            \"post\": {\n                \"description\": \"List the servers.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"namespace\"\n                ],\n                \"summary\": \"List the servers.\",\n                \"parameters\": [\n                    {\n                        \"type\": \"string\",\n                        \"description\": \"Server Name\",\n                        \"name\": \"name\",\n                        \"in\": \"path\",\n                        \"required\": true\n                    },\n                    {\n                        \"description\": \"query params\",\n                        \"name\": \"request\",\n                        \"in\": \"body\",\n                        \"required\": true,\n                        \"schema\": {\n                            \"$ref\": \"#/definitions/types.ServerSpec\"\n                        }\n                    }\n                ],\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"type\": \"array\",\n                            \"items\": {\n                                \"type\": \"string\"\n                            }\n                        }\n                    }\n                }\n            }\n        },\n        \"/system/servers\": {\n            \"get\": {\n                \"description\": \"List the servers.\",\n                \"consumes\": [\n                    \"application/json\"\n                ],\n                \"produces\": [\n                    \"application/json\"\n                ],\n                \"tags\": [\n                    \"namespace\"\n                ],\n                \"summary\": \"List the servers.\",\n                \"responses\": {\n                    \"200\": {\n                        \"description\": \"OK\",\n                        \"schema\": {\n                            \"type\": \"array\",\n                            \"items\": {\n                                \"$ref\": \"#/definitions/types.Server\"\n                            }\n                        }\n                    }\n                }\n            }\n        }\n    },\n    \"definitions\": {\n        \"types.AuthN\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"password\": {\n                    \"type\": \"string\"\n                },\n                \"token\": {\n                    \"type\": \"string\"\n                },\n                \"username\": {\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"types.Build\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"spec\": {\n                    \"$ref\": \"#/definitions/types.BuildSpec\"\n                },\n                \"status\": {\n                    \"$ref\": \"#/definitions/types.BuildStatus\"\n                }\n            }\n        },\n        \"types.BuildSpec\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"authn\": {\n                    \"$ref\": \"#/definitions/types.AuthN\"\n                },\n                \"branch\": {\n                    \"type\": \"string\"\n                },\n                \"buildTarget\": {\n                    \"$ref\": \"#/definitions/types.BuildTarget\"\n                },\n                \"image\": {\n                    \"type\": \"string\"\n                },\n                \"image_tag\": {\n                    \"type\": \"string\"\n                },\n                \"name\": {\n                    \"type\": \"string\"\n                },\n                \"namespace\": {\n                    \"type\": \"string\"\n                },\n                \"repository\": {\n                    \"description\": \"repository is the URL\",\n                    \"type\": \"string\"\n                },\n                \"revision\": {\n                    \"description\": \"revision is the commit hash for the specified revision.\\n+optional\",\n                    \"type\": \"string\"\n                },\n                \"secret_id\": {\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"types.BuildStatus\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"phase\": {\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"types.BuildTarget\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"builder\": {\n                    \"type\": \"string\"\n                },\n                \"digest\": {\n                    \"type\": \"string\"\n                },\n                \"directory\": {\n                    \"description\": \"directory is the target directory name.\\nMust not contain or start with '..'.  If '.' is supplied, the volume directory will be the\\ngit repository.  Otherwise, if specified, the volume will contain the git repository in\\nthe subdirectory with the given name.\\n+optional\",\n                    \"type\": \"string\"\n                },\n                \"duration\": {\n                    \"type\": \"string\"\n                },\n                \"image\": {\n                    \"type\": \"string\"\n                },\n                \"image_tag\": {\n                    \"type\": \"string\"\n                },\n                \"registry\": {\n                    \"type\": \"string\"\n                },\n                \"registry_token\": {\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"types.DeleteFunctionRequest\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"functionName\": {\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"types.ImageCache\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"force_full_cache\": {\n                    \"type\": \"boolean\"\n                },\n                \"image\": {\n                    \"type\": \"string\"\n                },\n                \"name\": {\n                    \"description\": \"Name is the name of the inference.\",\n                    \"type\": \"string\"\n                },\n                \"namespace\": {\n                    \"type\": \"string\"\n                },\n                \"node_selector\": {\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"types.InferenceDeployment\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"spec\": {\n                    \"$ref\": \"#/definitions/types.InferenceDeploymentSpec\"\n                },\n                \"status\": {\n                    \"$ref\": \"#/definitions/types.InferenceDeploymentStatus\"\n                }\n            }\n        },\n        \"types.InferenceDeploymentSpec\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"annotations\": {\n                    \"description\": \"Annotations are key-value pairs that may be attached to the inference.\",\n                    \"type\": \"object\",\n                    \"additionalProperties\": {\n                        \"type\": \"string\"\n                    }\n                },\n                \"command\": {\n                    \"description\": \"Command to run when starting the\",\n                    \"type\": \"string\"\n                },\n                \"constraints\": {\n                    \"description\": \"Constraints are the constraints for the inference.\",\n                    \"type\": \"array\",\n                    \"items\": {\n                        \"type\": \"string\"\n                    }\n                },\n                \"envVars\": {\n                    \"description\": \"EnvVars can be provided to set environment variables for the inference runtime.\",\n                    \"type\": \"object\",\n                    \"additionalProperties\": {\n                        \"type\": \"string\"\n                    }\n                },\n                \"framework\": {\n                    \"description\": \"Framework is the inference framework.\",\n                    \"type\": \"string\"\n                },\n                \"http_probe_path\": {\n                    \"description\": \"HTTPProbePath is the path of the http probe.\",\n                    \"type\": \"string\"\n                },\n                \"image\": {\n                    \"description\": \"Image is a fully-qualified container image\",\n                    \"type\": \"string\"\n                },\n                \"labels\": {\n                    \"description\": \"Labels are key-value pairs that may be attached to the inference.\",\n                    \"type\": \"object\",\n                    \"additionalProperties\": {\n                        \"type\": \"string\"\n                    }\n                },\n                \"name\": {\n                    \"description\": \"Name is the name of the inference.\",\n                    \"type\": \"string\"\n                },\n                \"namespace\": {\n                    \"description\": \"Namespace for the inference.\",\n                    \"type\": \"string\"\n                },\n                \"port\": {\n                    \"description\": \"Port is the port exposed by the inference.\",\n                    \"type\": \"integer\"\n                },\n                \"resources\": {\n                    \"description\": \"Resources are the compute resource requirements.\",\n                    \"$ref\": \"#/definitions/types.ResourceRequirements\"\n                },\n                \"scaling\": {\n                    \"description\": \"Scaling is the scaling configuration for the inference.\",\n                    \"$ref\": \"#/definitions/types.ScalingConfig\"\n                },\n                \"secrets\": {\n                    \"description\": \"Secrets list of secrets to be made available to inference.\",\n                    \"type\": \"array\",\n                    \"items\": {\n                        \"type\": \"string\"\n                    }\n                }\n            }\n        },\n        \"types.InferenceDeploymentStatus\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"availableReplicas\": {\n                    \"description\": \"AvailableReplicas is the count of replicas ready to receive\\ninvocations as reported by the faas-provider\",\n                    \"type\": \"integer\"\n                },\n                \"createdAt\": {\n                    \"description\": \"CreatedAt is the time read back from the faas backend's\\ndata store for when the function or its container was created.\",\n                    \"type\": \"string\"\n                },\n                \"eventMessage\": {\n                    \"description\": \"EventMessage record human readable message indicating details about the event of deployment.\",\n                    \"type\": \"string\"\n                },\n                \"invocationCount\": {\n                    \"description\": \"InvocationCount count of invocations\",\n                    \"type\": \"integer\"\n                },\n                \"phase\": {\n                    \"type\": \"string\"\n                },\n                \"replicas\": {\n                    \"description\": \"Replicas desired within the cluster\",\n                    \"type\": \"integer\"\n                },\n                \"usage\": {\n                    \"description\": \"Usage represents CPU and RAM used by all of the\\nfunctions' replicas. Divide by AvailableReplicas for an\\naverage value per replica.\",\n                    \"$ref\": \"#/definitions/types.InferenceUsage\"\n                }\n            }\n        },\n        \"types.InferenceUsage\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"cpu\": {\n                    \"description\": \"CPU is the increase in CPU usage since the last measurement\\nequivalent to Kubernetes' concept of millicores.\",\n                    \"type\": \"number\"\n                },\n                \"gpu\": {\n                    \"type\": \"number\"\n                },\n                \"totalMemoryBytes\": {\n                    \"description\": \"TotalMemoryBytes is the total memory usage in bytes.\",\n                    \"type\": \"number\"\n                }\n            }\n        },\n        \"types.Message\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"instance\": {\n                    \"description\": \"instance is the name/id of the specific function instance\",\n                    \"type\": \"string\"\n                },\n                \"name\": {\n                    \"description\": \"Name is the function name\",\n                    \"type\": \"string\"\n                },\n                \"namespace\": {\n                    \"type\": \"string\"\n                },\n                \"text\": {\n                    \"description\": \"Text is the raw log message content\",\n                    \"type\": \"string\"\n                },\n                \"timestamp\": {\n                    \"description\": \"Timestamp is the timestamp of when the log message was recorded\",\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"types.NamespaceRequest\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"name\": {\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"types.NodeSystemInfo\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"architecture\": {\n                    \"description\": \"The Architecture reported by the node\",\n                    \"type\": \"string\"\n                },\n                \"kernelVersion\": {\n                    \"description\": \"Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).\",\n                    \"type\": \"string\"\n                },\n                \"machineID\": {\n                    \"description\": \"MachineID reported by the node. For unique machine identification\\nin the cluster this field is preferred. Learn more from man(5)\\nmachine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html\",\n                    \"type\": \"string\"\n                },\n                \"operatingSystem\": {\n                    \"description\": \"The Operating System reported by the node\",\n                    \"type\": \"string\"\n                },\n                \"osImage\": {\n                    \"description\": \"OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).\",\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"types.ProviderInfo\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"orchestration\": {\n                    \"type\": \"string\"\n                },\n                \"provider\": {\n                    \"type\": \"string\"\n                },\n                \"version\": {\n                    \"$ref\": \"#/definitions/types.VersionInfo\"\n                }\n            }\n        },\n        \"types.ResourceList\": {\n            \"type\": \"object\",\n            \"additionalProperties\": {\n                \"type\": \"string\"\n            }\n        },\n        \"types.ResourceRequirements\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"limits\": {\n                    \"description\": \"Limits describes the maximum amount of compute resources allowed.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\\n+optional\",\n                    \"$ref\": \"#/definitions/types.ResourceList\"\n                },\n                \"requests\": {\n                    \"description\": \"Requests describes the minimum amount of compute resources required.\\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\\notherwise to an implementation-defined value.\\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\\n+optional\",\n                    \"$ref\": \"#/definitions/types.ResourceList\"\n                }\n            }\n        },\n        \"types.ScaleServiceRequest\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"attempt\": {\n                    \"type\": \"integer\"\n                },\n                \"eventMessage\": {\n                    \"type\": \"string\"\n                },\n                \"replicas\": {\n                    \"type\": \"integer\"\n                },\n                \"serviceName\": {\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"types.ScalingConfig\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"max_replicas\": {\n                    \"description\": \"MaxReplicas is the upper limit for the number of replicas to which the\\nautoscaler can scale up. It cannot be less that minReplicas. It defaults\\nto 1.\",\n                    \"type\": \"integer\"\n                },\n                \"min_replicas\": {\n                    \"description\": \"MinReplicas is the lower limit for the number of replicas to which the\\nautoscaler can scale down. It defaults to 0.\",\n                    \"type\": \"integer\"\n                },\n                \"startup_duration\": {\n                    \"description\": \"StartupDuration is the duration (in seconds) of startup time.\",\n                    \"type\": \"integer\"\n                },\n                \"target_load\": {\n                    \"description\": \"TargetLoad is the target load. In capacity mode, it is the expected number of the inflight requests per replica.\",\n                    \"type\": \"integer\"\n                },\n                \"type\": {\n                    \"description\": \"Type is the scaling type. It can be either \\\"capacity\\\" or \\\"rps\\\". Default is \\\"capacity\\\".\",\n                    \"type\": \"string\"\n                },\n                \"zero_duration\": {\n                    \"description\": \"ZeroDuration is the duration (in seconds) of zero load before scaling down to zero. Default is 5 minutes.\",\n                    \"type\": \"integer\"\n                }\n            }\n        },\n        \"types.Server\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"spec\": {\n                    \"$ref\": \"#/definitions/types.ServerSpec\"\n                },\n                \"status\": {\n                    \"$ref\": \"#/definitions/types.ServerStatus\"\n                }\n            }\n        },\n        \"types.ServerSpec\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"labels\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": {\n                        \"type\": \"string\"\n                    }\n                },\n                \"name\": {\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"types.ServerStatus\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"allocatable\": {\n                    \"$ref\": \"#/definitions/types.ResourceList\"\n                },\n                \"capacity\": {\n                    \"$ref\": \"#/definitions/types.ResourceList\"\n                },\n                \"phase\": {\n                    \"type\": \"string\"\n                },\n                \"system\": {\n                    \"$ref\": \"#/definitions/types.NodeSystemInfo\"\n                }\n            }\n        },\n        \"types.VersionInfo\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"build_date\": {\n                    \"type\": \"string\"\n                },\n                \"compiler\": {\n                    \"type\": \"string\"\n                },\n                \"git_commit\": {\n                    \"type\": \"string\"\n                },\n                \"git_tag\": {\n                    \"type\": \"string\"\n                },\n                \"git_tree_state\": {\n                    \"type\": \"string\"\n                },\n                \"go_version\": {\n                    \"type\": \"string\"\n                },\n                \"platform\": {\n                    \"type\": \"string\"\n                },\n                \"version\": {\n                    \"type\": \"string\"\n                }\n            }\n        }\n    }\n}`\n\n// SwaggerInfo holds exported Swagger Info so clients can modify it\nvar SwaggerInfo = &swag.Spec{\n\tVersion:          \"v0.0.23\",\n\tHost:             \"localhost:8081\",\n\tBasePath:         \"/\",\n\tSchemes:          []string{\"http\"},\n\tTitle:            \"modelz cluster agent\",\n\tDescription:      \"modelz kubernetes cluster agent\",\n\tInfoInstanceName: \"swagger\",\n\tSwaggerTemplate:  docTemplate,\n}\n\nfunc init() {\n\tswag.Register(SwaggerInfo.InstanceName(), SwaggerInfo)\n}\n"
  },
  {
    "path": "agent/pkg/event/event.go",
    "content": "package event\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/client\"\n)\n\ntype Interface interface {\n\tCreateDeploymentEvent(namespace, deployment, event, message string) error\n}\n\ntype EventRecorder struct {\n\tClient     *client.Client\n\tAgentToken string\n}\n\nfunc NewEventRecorder(client *client.Client, token string) Interface {\n\treturn &EventRecorder{\n\t\tClient:     client,\n\t\tAgentToken: token,\n\t}\n}\n\nfunc (e *EventRecorder) CreateDeploymentEvent(namespace, deployment, event, message string) error {\n\tuser, err := client.GetUserIDFromNamespace(namespace)\n\tif err != nil {\n\t\treturn err\n\t} else if user == \"\" {\n\t\treturn fmt.Errorf(\"user id is empty\")\n\t}\n\n\tdeploymentEvent := types.DeploymentEvent{\n\t\tUserID:       user,\n\t\tDeploymentID: deployment,\n\t\tEventType:    event,\n\t\tMessage:      message,\n\t}\n\terr = e.Client.CreateDeploymentEvent(context.TODO(), e.AgentToken, deploymentEvent)\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed to create deployment event: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/event/fake.go",
    "content": "package event\n\ntype Fake struct {\n}\n\nfunc NewFake() Interface {\n\treturn &Fake{}\n}\n\nfunc (f *Fake) CreateDeploymentEvent(namespace, deployment, event, message string) error {\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/event/suite_test.go",
    "content": "package event\n\nimport (\n\t\"testing\"\n\n\t. \"github.com/onsi/ginkgo/v2\"\n\t. \"github.com/onsi/gomega\"\n)\n\nfunc TestBuilder(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"event\")\n}\n"
  },
  {
    "path": "agent/pkg/event/username.go",
    "content": "package event\n\nimport \"fmt\"\n\nconst (\n\tDefaultPrefix = \"modelz-\"\n)\n\nfunc getUserIDFromNamespace(ns string) (string, error) {\n\tif len(ns) < 8 {\n\t\treturn \"\", fmt.Errorf(\"namespace too short\")\n\t}\n\n\tif ns[:len(DefaultPrefix)] != DefaultPrefix {\n\t\treturn \"\", fmt.Errorf(\"namespace does not start with %s\", DefaultPrefix)\n\t}\n\n\treturn ns[len(DefaultPrefix):], nil\n}\n"
  },
  {
    "path": "agent/pkg/event/util.go",
    "content": "package event\n\nimport (\n\t\"database/sql\"\n)\n\nfunc NullStringBuilder(String string, Valid bool) sql.NullString {\n\treturn sql.NullString{String: String, Valid: Valid}\n}\n"
  },
  {
    "path": "agent/pkg/k8s/convert_inference.go",
    "content": "package k8s\n\nimport (\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tv1 \"k8s.io/api/core/v1\"\n)\n\nfunc AsInferenceDeployment(inf *v2alpha1.Inference, item *appsv1.Deployment) *types.InferenceDeployment {\n\tif inf == nil {\n\t\treturn nil\n\t}\n\n\tres := &types.InferenceDeployment{\n\t\tSpec: types.InferenceDeploymentSpec{\n\t\t\tName:        inf.Name,\n\t\t\tFramework:   types.Framework(inf.Spec.Framework),\n\t\t\tImage:       inf.Spec.Image,\n\t\t\tNamespace:   inf.Namespace,\n\t\t\tEnvVars:     inf.Spec.EnvVars,\n\t\t\tSecrets:     inf.Spec.Secrets,\n\t\t\tConstraints: inf.Spec.Constraints,\n\t\t\tLabels:      inf.Spec.Labels,\n\t\t\tAnnotations: inf.Spec.Annotations,\n\t\t},\n\t\tStatus: types.InferenceDeploymentStatus{\n\t\t\tPhase: types.PhaseNoReplicas,\n\t\t},\n\t}\n\n\tif inf.Spec.Scaling != nil {\n\t\tres.Spec.Scaling = &types.ScalingConfig{\n\t\t\tMinReplicas:     inf.Spec.Scaling.MinReplicas,\n\t\t\tMaxReplicas:     inf.Spec.Scaling.MaxReplicas,\n\t\t\tTargetLoad:      inf.Spec.Scaling.TargetLoad,\n\t\t\tZeroDuration:    inf.Spec.Scaling.ZeroDuration,\n\t\t\tStartupDuration: inf.Spec.Scaling.StartupDuration,\n\t\t}\n\t\tif inf.Spec.Scaling.Type != nil {\n\t\t\ttyp := types.ScalingType(*inf.Spec.Scaling.Type)\n\t\t\tres.Spec.Scaling.Type = &typ\n\t\t}\n\t}\n\n\tif inf.Spec.Port != nil {\n\t\tres.Spec.Port = inf.Spec.Port\n\t}\n\n\tvar replicas int32 = 0\n\t// Get status according to the deployment.\n\tif item != nil {\n\t\tif item.Spec.Replicas != nil {\n\t\t\treplicas = *item.Spec.Replicas\n\t\t}\n\t\tres.Status.Replicas = replicas\n\t\tres.Status.CreatedAt = &item.CreationTimestamp.Time\n\t\tres.Status.InvocationCount = 0\n\t\tres.Status.AvailableReplicas = item.Status.AvailableReplicas\n\t\tres.Status.Phase = AsStatusPhase(item)\n\t}\n\treturn res\n}\n\nfunc AsResourceList(resources v1.ResourceList) types.ResourceList {\n\tres := types.ResourceList{}\n\tgpuResource := resources[consts.ResourceNvidiaGPU]\n\tgpuPtr := &gpuResource\n\n\tif !resources.Cpu().IsZero() {\n\t\tres[types.ResourceCPU] = types.Quantity(\n\t\t\tresources.Cpu().String())\n\t}\n\tif !resources.Memory().IsZero() {\n\t\tres[types.ResourceMemory] = types.Quantity(\n\t\t\tresources.Memory().String())\n\t}\n\tif !gpuPtr.IsZero() {\n\t\tres[types.ResourceGPU] = types.Quantity(\n\t\t\tgpuPtr.String())\n\t}\n\treturn res\n}\n\nfunc AsStatusPhase(item *appsv1.Deployment) types.Phase {\n\tphase := types.PhaseNotReady\n\tfor _, c := range item.Status.Conditions {\n\t\tif c.Type == appsv1.DeploymentAvailable && c.Status == v1.ConditionTrue {\n\t\t\tphase = types.PhaseReady\n\t\t} else if c.Type == appsv1.DeploymentProgressing && c.Status == v1.ConditionFalse {\n\t\t\tphase = types.PhaseScaling\n\t\t}\n\t}\n\n\tif item.Spec.Replicas != nil && *item.Spec.Replicas == 0 {\n\t\tphase = types.PhaseNoReplicas\n\t}\n\n\tif item.DeletionTimestamp != nil {\n\t\tphase = types.PhaseTerminating\n\t}\n\treturn phase\n}\n"
  },
  {
    "path": "agent/pkg/k8s/convert_inference_test.go",
    "content": "package k8s\n\nimport (\n\t\"time\"\n\n\t. \"github.com/onsi/ginkgo/v2\"\n\t. \"github.com/onsi/gomega\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\t. \"github.com/tensorchord/openmodelz/modelzetes/pkg/pointer\"\n\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tv1types \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar _ = Describe(\"agent/pkg/k8s/convert_inference\", func() {\n\tIt(\"function AsResourceList\", func() {\n\t\ttcs := []struct {\n\t\t\tresource v1.ResourceList\n\t\t\texpect   types.ResourceList\n\t\t}{\n\t\t\t{\n\t\t\t\tresource: map[v1types.ResourceName]resource.Quantity{\n\t\t\t\t\tv1types.ResourceCPU:      resource.MustParse(\"0\"),\n\t\t\t\t\tv1types.ResourceMemory:   resource.MustParse(\"0\"),\n\t\t\t\t\tconsts.ResourceNvidiaGPU: resource.MustParse(\"0\"),\n\t\t\t\t},\n\t\t\t\texpect: types.ResourceList{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tresource: map[v1types.ResourceName]resource.Quantity{\n\t\t\t\t\tv1types.ResourceCPU:      resource.MustParse(\"0\"),\n\t\t\t\t\tv1types.ResourceMemory:   resource.MustParse(\"500m\"),\n\t\t\t\t\tconsts.ResourceNvidiaGPU: resource.MustParse(\"0\"),\n\t\t\t\t},\n\t\t\t\texpect: types.ResourceList{\n\t\t\t\t\ttypes.ResourceMemory: types.Quantity(\"500m\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tresource: map[v1types.ResourceName]resource.Quantity{\n\t\t\t\t\tv1types.ResourceCPU:      resource.MustParse(\"0\"),\n\t\t\t\t\tv1types.ResourceMemory:   resource.MustParse(\"0\"),\n\t\t\t\t\tconsts.ResourceNvidiaGPU: resource.MustParse(\"0.5\"),\n\t\t\t\t},\n\t\t\t\texpect: types.ResourceList{\n\t\t\t\t\ttypes.ResourceGPU: types.Quantity(\"500m\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tresource: map[v1types.ResourceName]resource.Quantity{\n\t\t\t\t\tv1types.ResourceCPU:      resource.MustParse(\"0.1\"),\n\t\t\t\t\tv1types.ResourceMemory:   resource.MustParse(\"0\"),\n\t\t\t\t\tconsts.ResourceNvidiaGPU: resource.MustParse(\"0\"),\n\t\t\t\t},\n\t\t\t\texpect: types.ResourceList{\n\t\t\t\t\ttypes.ResourceCPU: types.Quantity(\"100m\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tfor _, tc := range tcs {\n\t\t\tvalue := AsResourceList(tc.resource)\n\t\t\tExpect(value).To(Equal(tc.expect))\n\t\t}\n\t})\n\tIt(\"function AsInferenceDeployment\", func() {\n\t\tmockTime, _ := time.Parse(\"2006-01-02\", \"2023-09-07\")\n\t\ttcs := []struct {\n\t\t\tinf        *v2alpha1.Inference\n\t\t\tdeployment *appsv1.Deployment\n\t\t\texpect     *types.InferenceDeployment\n\t\t}{\n\t\t\t{\n\t\t\t\tinf:        nil,\n\t\t\t\tdeployment: nil,\n\t\t\t\texpect:     nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinf: Ptr(v2alpha1.Inference{}),\n\t\t\t\tdeployment: Ptr(appsv1.Deployment{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tCreationTimestamp: metav1.Time{\n\t\t\t\t\t\t\tTime: mockTime,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t\t\texpect: Ptr(types.InferenceDeployment{\n\t\t\t\t\tStatus: types.InferenceDeploymentStatus{\n\t\t\t\t\t\tPhase:     types.PhaseNotReady,\n\t\t\t\t\t\tCreatedAt: Ptr(mockTime),\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinf: Ptr(v2alpha1.Inference{\n\t\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\t\tScaling: Ptr(v2alpha1.ScalingConfig{\n\t\t\t\t\t\t\tType: Ptr(v2alpha1.ScalingTypeCapacity),\n\t\t\t\t\t\t}),\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t\t\tdeployment: Ptr(appsv1.Deployment{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tCreationTimestamp: metav1.Time{\n\t\t\t\t\t\t\tTime: mockTime,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t\t\texpect: Ptr(types.InferenceDeployment{\n\t\t\t\t\tSpec: types.InferenceDeploymentSpec{\n\t\t\t\t\t\tScaling: Ptr(types.ScalingConfig{\n\t\t\t\t\t\t\tType: Ptr(types.ScalingTypeCapacity),\n\t\t\t\t\t\t}),\n\t\t\t\t\t},\n\t\t\t\t\tStatus: types.InferenceDeploymentStatus{\n\t\t\t\t\t\tPhase:     types.PhaseNotReady,\n\t\t\t\t\t\tCreatedAt: Ptr(mockTime),\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t\t},\n\t\t}\n\t\tfor _, tc := range tcs {\n\t\t\tvalue := AsInferenceDeployment(tc.inf, tc.deployment)\n\t\t\tExpect(value).To(Equal(tc.expect))\n\t\t}\n\t})\n})\n"
  },
  {
    "path": "agent/pkg/k8s/convert_job.go",
    "content": "package k8s\n\nimport (\n\tv1 \"k8s.io/api/batch/v1\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nfunc AsBuild(job v1.Job) (types.Build, error) {\n\tbuild := types.Build{\n\t\tSpec: types.BuildSpec{\n\t\t\tName:      job.Name,\n\t\t\tNamespace: job.Namespace,\n\t\t},\n\t}\n\n\tif job.Status.Succeeded > 0 {\n\t\tbuild.Status.Phase = types.BuildPhaseSucceeded\n\t} else if job.Status.Failed > 0 {\n\t\tbuild.Status.Phase = types.BuildPhaseFailed\n\t} else if job.Status.Active > 0 {\n\t\tbuild.Status.Phase = types.BuildPhaseRunning\n\t} else {\n\t\tbuild.Status.Phase = types.BuildPhasePending\n\t}\n\n\treturn build, nil\n}\n"
  },
  {
    "path": "agent/pkg/k8s/convert_pod.go",
    "content": "package k8s\n\nimport (\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tv1 \"k8s.io/api/core/v1\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nfunc MakeLabelSelector(name string) map[string]string {\n\treturn map[string]string{\n\t\t\"app\": name,\n\t}\n}\n\nfunc InstanceFromPod(pod v1.Pod) *types.InferenceDeploymentInstance {\n\ti := &types.InferenceDeploymentInstance{\n\t\tSpec: types.InferenceDeploymentInstanceSpec{\n\t\t\tNamespace:      pod.Namespace,\n\t\t\tName:           pod.Name,\n\t\t\tOwnerReference: pod.Labels[consts.LabelInferenceName],\n\t\t},\n\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\tReason:  pod.Status.Reason,\n\t\t\tMessage: pod.Status.Message,\n\t\t},\n\t}\n\n\tif pod.Status.StartTime != nil {\n\t\ti.Status.StartTime = pod.Status.StartTime.Time\n\t}\n\n\tswitch pod.Status.Phase {\n\tcase v1.PodRunning:\n\t\ti.Status.Phase = types.InstancePhaseRunning\n\tcase v1.PodPending:\n\t\ti.Status.Phase = types.InstancePhasePending\n\tcase v1.PodFailed:\n\t\ti.Status.Phase = types.InstancePhaseFailed\n\tcase v1.PodSucceeded:\n\t\ti.Status.Phase = types.InstancePhaseSucceeded\n\tcase v1.PodUnknown:\n\t\ti.Status.Phase = types.InstancePhaseUnknown\n\t}\n\n\tif pod.Status.Conditions != nil {\n\t\tfor _, c := range pod.Status.Conditions {\n\t\t\tif c.Type == v1.PodScheduled && c.Status == v1.ConditionFalse {\n\t\t\t\ti.Status.Phase = types.InstancePhaseScheduling\n\t\t\t\ti.Status.Reason = c.Reason\n\t\t\t\ti.Status.Message = c.Message\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(pod.Status.ContainerStatuses) != 0 {\n\t\tif pod.Status.ContainerStatuses[0].Started != nil &&\n\t\t\t!*pod.Status.ContainerStatuses[0].Started {\n\t\t\ti.Status.Phase = types.InstancePhaseCreating\n\t\t\tif pod.Status.ContainerStatuses[0].State.Waiting != nil {\n\t\t\t\ti.Status.Reason = pod.Status.ContainerStatuses[0].State.Waiting.Reason\n\t\t\t\ti.Status.Message = pod.Status.ContainerStatuses[0].State.Waiting.Message\n\t\t\t\ti.Status.Phase = types.InstancePhase(\n\t\t\t\t\tpod.Status.ContainerStatuses[0].State.Waiting.Reason)\n\t\t\t} else if pod.Status.ContainerStatuses[0].State.Running != nil {\n\t\t\t\ti.Status.Phase = types.InstancePhaseInitializing\n\t\t\t} else if pod.Status.ContainerStatuses[0].State.Terminated != nil {\n\t\t\t\ti.Status.Phase = types.InstancePhaseFailed\n\t\t\t\ti.Status.Reason = pod.Status.ContainerStatuses[0].State.Terminated.Reason\n\t\t\t\ti.Status.Message = pod.Status.ContainerStatuses[0].State.Terminated.Message\n\t\t\t}\n\t\t}\n\t}\n\treturn i\n}\n"
  },
  {
    "path": "agent/pkg/k8s/convert_pod_test.go",
    "content": "package k8s\n\nimport (\n\t. \"github.com/onsi/ginkgo/v2\"\n\t. \"github.com/onsi/gomega\"\n\t\"github.com/sirupsen/logrus\"\n\t. \"github.com/tensorchord/openmodelz/modelzetes/pkg/pointer\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\tv1 \"k8s.io/api/core/v1\"\n)\n\nvar _ = Describe(\"agent/pkg/k8s/convert_pod\", func() {\n\tIt(\"function InstanceFromPod\", func() {\n\t\ttcs := []struct {\n\t\t\tdesc   string\n\t\t\tpod    v1.Pod\n\t\t\texpect *types.InferenceDeploymentInstance\n\t\t}{\n\t\t\t{\n\t\t\t\tdesc: \"empty pod\",\n\t\t\t\tpod:  v1.Pod{},\n\t\t\t\texpect: Ptr(\n\t\t\t\t\ttypes.InferenceDeploymentInstance{},\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tdesc: \"running pod\",\n\t\t\t\tpod: v1.Pod{\n\t\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\t\tPhase: v1.PodRunning,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\texpect: Ptr(\n\t\t\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\t\t\tPhase: types.InstancePhaseRunning,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tdesc: \"pending pod\",\n\t\t\t\tpod: v1.Pod{\n\t\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\t\tPhase: v1.PodPending,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\texpect: Ptr(\n\t\t\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\t\t\tPhase: types.InstancePhasePending,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tdesc: \"scheduling pod\",\n\t\t\t\tpod: v1.Pod{\n\t\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\t\tPhase: v1.PodPending,\n\t\t\t\t\t\tConditions: []v1.PodCondition{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tType:   v1.PodScheduled,\n\t\t\t\t\t\t\t\tStatus: v1.ConditionFalse,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\texpect: Ptr(\n\t\t\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\t\t\tPhase: types.InstancePhaseScheduling,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tdesc: \"failed pod\",\n\t\t\t\tpod: v1.Pod{\n\t\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\t\tPhase: v1.PodFailed,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\texpect: Ptr(\n\t\t\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\t\t\tPhase: types.InstancePhaseFailed,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tdesc: \"succeed pod\",\n\t\t\t\tpod: v1.Pod{\n\t\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\t\tPhase: v1.PodSucceeded,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\texpect: Ptr(\n\t\t\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\t\t\tPhase: types.InstancePhaseSucceeded,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tdesc: \"unknown pod\",\n\t\t\t\tpod: v1.Pod{\n\t\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\t\tPhase: v1.PodUnknown,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\texpect: Ptr(\n\t\t\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\t\t\tPhase: types.InstancePhaseUnknown,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tdesc: \"creating pod\",\n\t\t\t\tpod: v1.Pod{\n\t\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tStarted: Ptr(false),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\texpect: Ptr(\n\t\t\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\t\t\tPhase: types.InstancePhaseCreating,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tdesc: \"waiting pod\",\n\t\t\t\tpod: v1.Pod{\n\t\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tStarted: Ptr(false),\n\t\t\t\t\t\t\t\tState: v1.ContainerState{\n\t\t\t\t\t\t\t\t\tWaiting: Ptr(v1.ContainerStateWaiting{\n\t\t\t\t\t\t\t\t\t\tReason: \"mock-status\",\n\t\t\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\texpect: Ptr(\n\t\t\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\t\t\tPhase:  types.InstancePhase(\"mock-status\"),\n\t\t\t\t\t\t\tReason: \"mock-status\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tdesc: \"initializing pod\",\n\t\t\t\tpod: v1.Pod{\n\t\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tStarted: Ptr(false),\n\t\t\t\t\t\t\t\tState: v1.ContainerState{\n\t\t\t\t\t\t\t\t\tRunning: Ptr(v1.ContainerStateRunning{}),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\texpect: Ptr(\n\t\t\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\t\t\tPhase: types.InstancePhaseInitializing,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tdesc: \"terminated pod\",\n\t\t\t\tpod: v1.Pod{\n\t\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tStarted: Ptr(false),\n\t\t\t\t\t\t\t\tState: v1.ContainerState{\n\t\t\t\t\t\t\t\t\tTerminated: Ptr(v1.ContainerStateTerminated{}),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\texpect: Ptr(\n\t\t\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\t\t\tPhase: types.InstancePhaseFailed,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t}\n\t\tfor _, tc := range tcs {\n\t\t\tlogrus.Info(tc.desc)\n\t\t\tvalue := InstanceFromPod(tc.pod)\n\t\t\tExpect(value).To(Equal(tc.expect))\n\t\t}\n\t})\n})\n"
  },
  {
    "path": "agent/pkg/k8s/generate_image_cache.go",
    "content": "package k8s\n\nimport (\n\t\"time\"\n\n\tkubefledged \"github.com/senthilrch/kube-fledged/pkg/apis/kubefledged/v1alpha3\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\tmodelzetes \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n)\n\nfunc MakeImageCache(req types.ImageCache, inference *modelzetes.Inference) *kubefledged.ImageCache {\n\tnodeSlector := map[string]string{\n\t\tconsts.LabelServerResource: string(req.NodeSelector),\n\t}\n\tcache := &kubefledged.ImageCache{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName:      req.Name,\n\t\t\tNamespace: req.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(inference, schema.GroupVersionKind{\n\t\t\t\t\tGroup:   modelzetes.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: modelzetes.SchemeGroupVersion.Version,\n\t\t\t\t\tKind:    modelzetes.Kind,\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\tSpec: kubefledged.ImageCacheSpec{\n\t\t\tCacheSpec: []kubefledged.CacheSpecImages{\n\t\t\t\t{\n\t\t\t\t\tImages: []kubefledged.Image{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:           req.Image,\n\t\t\t\t\t\t\tForceFullCache: req.ForceFullCache,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tNodeSelector: nodeSlector,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tStatus: kubefledged.ImageCacheStatus{\n\t\t\tStartTime: &metav1.Time{Time: time.Now()},\n\t\t},\n\t}\n\treturn cache\n}\n"
  },
  {
    "path": "agent/pkg/k8s/generate_job.go",
    "content": "package k8s\n\nimport (\n\t\"time\"\n\n\t\"github.com/cockroachdb/errors\"\n\tbatchv1 \"k8s.io/api/batch/v1\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n)\n\nfunc MakeBuild(req types.Build, inference *v2alpha1.Inference, builderImage, buildkitdAddr, buildctlBin, secret string) (*batchv1.Job, error) {\n\tjob := &batchv1.Job{}\n\tduration, err := time.ParseDuration(req.Spec.BuildTarget.Duration)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to parse duration\")\n\t}\n\tseconds := int64(duration.Seconds())\n\tdefaultBackoffLimit := int32(0)\n\tdefaultTTLSecondsAfterFinished := int32(60 * 60 * 24 * 7) // 7 days\n\n\tenvs := []corev1.EnvVar{\n\t\t{\n\t\t\tName:  \"MODELZ_BUILD_NAME\",\n\t\t\tValue: req.Spec.Name,\n\t\t},\n\t\t{\n\t\t\tName:  \"MODELZ_BUILDER\",\n\t\t\tValue: string(req.Spec.BuildTarget.Builder),\n\t\t},\n\t\t{\n\t\t\tName:  \"MODELZ_BUILD_ARTIFACT_IMAGE\",\n\t\t\tValue: req.Spec.BuildTarget.ArtifactImage,\n\t\t},\n\t\t{\n\t\t\tName:  \"MODELZ_BUILD_ARTIFACT_IMAGE_TAG\",\n\t\t\tValue: req.Spec.BuildTarget.ArtifactImageTag,\n\t\t},\n\t\t{\n\t\t\tName:  \"MODELZ_REGISTRY\",\n\t\t\tValue: req.Spec.BuildTarget.Registry,\n\t\t},\n\t\t{\n\t\t\tName:  \"MODELZ_REGISTRY_TOKEN\",\n\t\t\tValue: req.Spec.BuildTarget.RegistryToken,\n\t\t},\n\t}\n\tif req.Spec.BuildTarget.Builder != types.BuilderTypeImage {\n\t\tenvs = append(envs, buildEnvsForDockerfileOrEnvd(req, buildkitdAddr, buildctlBin)...)\n\t} else {\n\t\tenvs = append(envs, buildEnvsForImage(req)...)\n\t}\n\n\townerReference := []metav1.OwnerReference{\n\t\t*metav1.NewControllerRef(inference, schema.GroupVersionKind{\n\t\t\tGroup:   v2alpha1.SchemeGroupVersion.Group,\n\t\t\tVersion: v2alpha1.SchemeGroupVersion.Version,\n\t\t\tKind:    v2alpha1.Kind,\n\t\t}),\n\t}\n\tjob = &batchv1.Job{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:            req.Spec.Name,\n\t\t\tNamespace:       req.Spec.Namespace,\n\t\t\tOwnerReferences: ownerReference,\n\t\t\tLabels: map[string]string{\n\t\t\t\tconsts.LabelBuildName:     req.Spec.Name,\n\t\t\t\tconsts.AnnotationBuilding: \"true\",\n\t\t\t},\n\t\t},\n\t\tSpec: batchv1.JobSpec{\n\t\t\tActiveDeadlineSeconds:   &seconds,\n\t\t\tBackoffLimit:            &defaultBackoffLimit,\n\t\t\tTTLSecondsAfterFinished: &defaultTTLSecondsAfterFinished,\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tconsts.LabelBuildName: req.Spec.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tImagePullSecrets: []corev1.LocalObjectReference{\n\t\t\t\t\t\t{Name: secret},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"workspace\",\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: corev1.RestartPolicyNever,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:            req.Spec.Name,\n\t\t\t\t\t\t\tImage:           builderImage,\n\t\t\t\t\t\t\tImagePullPolicy: corev1.PullAlways,\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"workspace\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/workspace\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: envs,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn job, nil\n}\n\nfunc buildEnvsForImage(req types.Build) []corev1.EnvVar {\n\tenvs := []corev1.EnvVar{}\n\tif req.Spec.DockerSource.AuthN.Username != \"\" {\n\t\tenvs = append(envs, corev1.EnvVar{\n\t\t\tName:  \"MODELZ_SOURCE_REGISTRY_USERNAME\",\n\t\t\tValue: req.Spec.DockerSource.AuthN.Username,\n\t\t})\n\t}\n\n\tif req.Spec.DockerSource.AuthN.Password != \"\" {\n\t\tenvs = append(envs, corev1.EnvVar{\n\t\t\tName:  \"MODELZ_SOURCE_REGISTRY_PASSWORD\",\n\t\t\tValue: req.Spec.DockerSource.AuthN.Password,\n\t\t})\n\t}\n\n\tif req.Spec.DockerSource.AuthN.Token != \"\" {\n\t\tenvs = append(envs, corev1.EnvVar{\n\t\t\tName:  \"MODELZ_SOURCE_REGISTRY_TOKEN\",\n\t\t\tValue: req.Spec.DockerSource.AuthN.Token,\n\t\t})\n\t}\n\n\tif req.Spec.DockerSource.ArtifactImage != \"\" {\n\t\tenvs = append(envs, corev1.EnvVar{\n\t\t\tName:  \"MODELZ_SOURCE_REGISTRY_IMAGE\",\n\t\t\tValue: req.Spec.DockerSource.ArtifactImage,\n\t\t})\n\t}\n\n\tif req.Spec.DockerSource.ArtifactImageTag != \"\" {\n\t\tenvs = append(envs, corev1.EnvVar{\n\t\t\tName:  \"MODELZ_SOURCE_REGISTRY_IMAGE_TAG\",\n\t\t\tValue: req.Spec.DockerSource.ArtifactImageTag,\n\t\t})\n\t}\n\n\treturn envs\n}\n\nfunc buildEnvsForDockerfileOrEnvd(req types.Build, buildkitdAddr, buildctlBin string) []corev1.EnvVar {\n\treturn []corev1.EnvVar{\n\t\t{\n\t\t\tName:  \"MODELZ_BUILD_GIT_URL\",\n\t\t\tValue: req.Spec.Repository,\n\t\t},\n\t\t{\n\t\t\tName:  \"MODELZ_BUILD_GIT_BRANCH\",\n\t\t\tValue: req.Spec.Branch,\n\t\t},\n\t\t{\n\t\t\tName:  \"MODELZ_BUILD_GIT_COMMIT\",\n\t\t\tValue: req.Spec.Revision,\n\t\t},\n\t\t{\n\t\t\tName:  \"MODELZ_BUILD_BASE_DIR\",\n\t\t\tValue: req.Spec.BuildTarget.Directory,\n\t\t},\n\t\t{\n\t\t\tName:  \"MODELZ_WORKSPACE\",\n\t\t\tValue: \"/workspace\",\n\t\t},\n\t\t{\n\t\t\tName:  \"MODELZ_BUILDKITD_ADDRESS\",\n\t\t\tValue: buildkitdAddr,\n\t\t},\n\t\t{\n\t\t\tName:  \"MODELZ_BUILDER_BIN\",\n\t\t\tValue: buildctlBin,\n\t\t},\n\t}\n}\n"
  },
  {
    "path": "agent/pkg/k8s/managed_cluster.go",
    "content": "package k8s\n\nimport (\n\t\"k8s.io/apimachinery/pkg/version\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\nfunc GetKubernetesVersion(client kubernetes.Interface) (*version.Info, error) {\n\treturn client.Discovery().ServerVersion()\n}\n"
  },
  {
    "path": "agent/pkg/k8s/resolver.go",
    "content": "package k8s\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"net/url\"\n\t\"strconv\"\n\n\t\"github.com/anthhub/forwarder\"\n\t\"github.com/phayes/freeport\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\tcorelister \"k8s.io/client-go/listers/core/v1\"\n\t\"k8s.io/client-go/rest\"\n\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\ntype Resolver interface {\n\tResolve(namespace, name string) (url.URL, error)\n\tClose(url url.URL)\n}\n\nfunc NewPortForwardingResolver(cfg *rest.Config, cli kubernetes.Interface) Resolver {\n\treturn &PortForwardingResolver{\n\t\tconfig:  cfg,\n\t\tcli:     cli,\n\t\tresults: make(map[int]*forwarder.Result),\n\t}\n}\n\nfunc NewEndpointResolver(lister corelister.EndpointsLister) Resolver {\n\treturn &EndpointResolver{\n\t\tEndpointLister: lister,\n\t}\n}\n\ntype PortForwardingResolver struct {\n\tconfig  *rest.Config\n\tcli     kubernetes.Interface\n\tresults map[int]*forwarder.Result\n}\n\nfunc (e *PortForwardingResolver) Resolve(namespace, name string) (url.URL, error) {\n\tport, err := freeport.GetFreePort()\n\tif err != nil {\n\t\treturn url.URL{}, err\n\t}\n\n\tsvc, err := e.cli.CoreV1().Services(namespace).Get(context.Background(), \"mdz-\"+name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn url.URL{}, err\n\t}\n\tif svc.Spec.Ports == nil || len(svc.Spec.Ports) == 0 {\n\t\treturn url.URL{}, errdefs.System(fmt.Errorf(\"no ports found in service %s\", svc.Name))\n\t}\n\n\toptions := []*forwarder.Option{\n\t\t{\n\t\t\t// the local port for forwarding\n\t\t\tLocalPort: port,\n\t\t\t// the k8s pod port\n\t\t\tRemotePort: svc.Spec.Ports[0].TargetPort.IntValue(),\n\t\t\t// the forwarding service name\n\t\t\tServiceName: \"mdz-\" + name,\n\t\t\t// namespace default is \"default\"\n\t\t\tNamespace: namespace,\n\t\t},\n\t}\n\n\tret, err := forwarder.WithRestConfig(context.Background(), options, e.config)\n\tif err != nil {\n\t\treturn url.URL{}, err\n\t}\n\te.results[port] = ret\n\t// wait forwarding ready\n\t// the remote and local ports are listed\n\t_, err = ret.Ready()\n\tif err != nil {\n\t\treturn url.URL{}, err\n\t}\n\n\t// the ports are ready\n\tres, err := url.Parse(\"http://localhost:\" + strconv.Itoa(port))\n\treturn *res, err\n}\n\nfunc (e *PortForwardingResolver) Close(url url.URL) {\n\tport, err := strconv.Atoi(url.Port())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlogrus.Infof(\"close port forwarding %d\\n\", port)\n\tif e.results[port] == nil {\n\t\tlogrus.Infof(\"port forwarding %d not found\\n\", port)\n\t\treturn\n\t}\n\tlogrus.Infof(\"pointer: %v\", e.results[port])\n\te.results[port].Close()\n}\n\ntype EndpointResolver struct {\n\tEndpointLister corelister.EndpointsLister\n}\n\nfunc (e EndpointResolver) Resolve(namespace, name string) (url.URL, error) {\n\tsvcName := consts.DefaultServicePrefix + name\n\n\tsvc, err := e.EndpointLister.Endpoints(namespace).Get(svcName)\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn url.URL{}, errdefs.NotFound(err)\n\t\t}\n\t\treturn url.URL{}, errdefs.System(err)\n\t}\n\n\tif len(svc.Subsets) == 0 {\n\t\treturn url.URL{}, errdefs.NotFound(\n\t\t\tfmt.Errorf(\"no subsets for \\\"%s.%s\\\"\", svcName, namespace))\n\t}\n\n\tall := len(svc.Subsets[0].Addresses)\n\tif len(svc.Subsets[0].Addresses) == 0 {\n\t\treturn url.URL{}, errdefs.NotFound(\n\t\t\tfmt.Errorf(\"no addresses for \\\"%s.%s\\\"\", svcName, namespace))\n\t}\n\n\ttarget := rand.Intn(all)\n\n\tserviceIP := svc.Subsets[0].Addresses[target].IP\n\tservicePort := svc.Subsets[0].Ports[0].Port\n\n\turlStr := fmt.Sprintf(\"http://%s:%d\", serviceIP, servicePort)\n\n\turlRes, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn url.URL{}, errdefs.System(err)\n\t}\n\n\treturn *urlRes, nil\n}\n\nfunc (e EndpointResolver) Close(url.URL) {\n\t// do nothing\n}\n"
  },
  {
    "path": "agent/pkg/k8s/suite_test.go",
    "content": "package k8s\n\nimport (\n\t\"testing\"\n\n\t. \"github.com/onsi/ginkgo/v2\"\n\t. \"github.com/onsi/gomega\"\n)\n\nfunc TestBuilder(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"k8s\")\n}\n"
  },
  {
    "path": "agent/pkg/log/factory.go",
    "content": "package log\n\nimport (\n\t\"context\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// Requester submits queries the logging system.\ntype Requester interface {\n\t// Query submits a log request to the actual logging system.\n\tQuery(ctx context.Context, req types.LogRequest) (<-chan types.Message, error)\n}\n"
  },
  {
    "path": "agent/pkg/log/k8s.go",
    "content": "package log\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/informers\"\n\t\"k8s.io/client-go/informers/internalinterfaces\"\n\t\"k8s.io/client-go/kubernetes\"\n\tv1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n\t\"k8s.io/client-go/tools/cache\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\nconst (\n\t// podInformerResync is the period between cache syncs in the pod informer\n\tpodInformerResync = 5 * time.Second\n\n\t// defaultLogSince is the fallback log stream history\n\tdefaultLogSince = 5 * time.Minute\n\n\t// LogBufferSize number of log messages that may be buffered\n\tLogBufferSize = 500 * 2\n)\n\n// K8sAPIRequestor implements the Requestor interface for k8s\ntype K8sAPIRequestor struct {\n\tclient kubernetes.Interface\n}\n\nfunc NewK8sAPIRequestor(client kubernetes.Interface) Requester {\n\treturn &K8sAPIRequestor{\n\t\tclient: client,\n\t}\n}\n\nfunc (k *K8sAPIRequestor) Query(ctx context.Context,\n\tr types.LogRequest) (<-chan types.Message, error) {\n\tvar sinceTime, endTime time.Time\n\tif r.Since != \"\" {\n\t\tvar err error\n\t\tsinceTime, err = time.Parse(time.RFC3339, r.Since)\n\t\tif err != nil {\n\t\t\treturn nil, errdefs.InvalidParameter(err)\n\t\t}\n\t}\n\n\tif r.End != \"\" {\n\t\tvar err error\n\t\tendTime, err = time.Parse(time.RFC3339, r.End)\n\t\tif err != nil {\n\t\t\treturn nil, errdefs.InvalidParameter(err)\n\t\t}\n\t} else if r.Follow {\n\t\t// avoid truncate\n\t\tendTime = time.Now().Add(time.Hour)\n\t} else {\n\t\tendTime = time.Now()\n\t}\n\n\tlogStream, err := getLogs(ctx,\n\t\tk.client, r.Name, r.Namespace, int64(r.Tail), &sinceTime, r.Follow)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmsgStream := make(chan types.Message, LogBufferSize)\n\tgo func() {\n\t\tdefer close(msgStream)\n\t\t// here we depend on the fact that logStream will close when the context is cancelled,\n\t\t// this ensures that the go routine will resolve\n\t\tfor msg := range logStream {\n\t\t\t// if we have an end time, we should stop streaming logs after that time\n\t\t\tif endTime.After(msg.Timestamp) {\n\t\t\t\tmsgStream <- types.Message{\n\t\t\t\t\tTimestamp: msg.Timestamp,\n\t\t\t\t\tText:      msg.Text,\n\t\t\t\t\tName:      msg.Name,\n\t\t\t\t\tInstance:  msg.Instance,\n\t\t\t\t\tNamespace: msg.Namespace,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn msgStream, nil\n}\n\n// getLogs returns a channel of logs for the given function\nfunc getLogs(ctx context.Context, client kubernetes.Interface, functionName,\n\tnamespace string, tail int64, since *time.Time, follow bool) (\n\t<-chan types.Message, error) {\n\tadded, err := startFunctionPodInformer(ctx, client, functionName, namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogs := make(chan types.Message, LogBufferSize)\n\n\tgo func() {\n\t\tvar watching uint\n\t\tdefer close(logs)\n\n\t\tfinished := make(chan error)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-finished:\n\t\t\t\twatching--\n\t\t\t\tif watching == 0 && !follow {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase p := <-added:\n\t\t\t\twatching++\n\t\t\t\tgo func() {\n\t\t\t\t\tfinished <- podLogs(ctx, client.CoreV1().Pods(namespace),\n\t\t\t\t\t\tp, functionName, namespace, tail, since, follow, logs)\n\t\t\t\t}()\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn logs, nil\n}\n\n// podLogs returns a stream of logs lines from the specified pod\nfunc podLogs(ctx context.Context, i v1.PodInterface, pod, container,\n\tnamespace string, tail int64, since *time.Time, follow bool,\n\tdst chan<- types.Message) error {\n\topts := &corev1.PodLogOptions{\n\t\tFollow:     follow,\n\t\tTimestamps: true,\n\t\tContainer:  container,\n\t}\n\n\tif tail > 0 {\n\t\topts.TailLines = &tail\n\t}\n\n\tif opts.TailLines == nil || since != nil {\n\t\topts.SinceSeconds = parseSince(since)\n\t}\n\n\tstream, err := i.GetLogs(pod, opts).Stream(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stream.Close()\n\n\tdone := make(chan error)\n\tgo func() {\n\t\tscanner := bufio.NewScanner(stream)\n\t\tfor scanner.Scan() {\n\t\t\tmsg, ts := extractTimestampAndMsg(scanner.Text())\n\t\t\tdst <- types.Message{\n\t\t\t\tTimestamp: ts,\n\t\t\t\tText:      msg,\n\t\t\t\tInstance:  pod,\n\t\t\t\tName:      container,\n\t\t\t\tNamespace: namespace,\n\t\t\t}\n\t\t}\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tdone <- err\n\t\t\treturn\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tlogrus.Debug(\"get-log context cancelled\")\n\t\treturn ctx.Err()\n\tcase err := <-done:\n\t\tif err != io.EOF {\n\t\t\tlogrus.Debugf(\"failed to read from pod log: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// startFunctionPodInformer will gather the list of existing Pods for the function, it will\n// watch for newly added or deleted function instances.\nfunc startFunctionPodInformer(ctx context.Context, client kubernetes.Interface, functionName, namespace string) (<-chan string, error) {\n\tfunctionSelector := &metav1.LabelSelector{\n\t\tMatchLabels: map[string]string{consts.LabelInferenceName: functionName},\n\t}\n\tselector, err := metav1.LabelSelectorAsSelector(functionSelector)\n\tif err != nil {\n\t\treturn nil, errdefs.InvalidParameter(err)\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"selector\":  selector.String(),\n\t\t\"namespace\": namespace,\n\t}).Debugf(\"starting log pod informer\")\n\tfactory := informers.NewFilteredSharedInformerFactory(\n\t\tclient,\n\t\tpodInformerResync,\n\t\tnamespace,\n\t\twithLabels(selector.String()),\n\t)\n\n\tpodInformer := factory.Core().V1().Pods()\n\tpodsResp, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn nil, errdefs.NotFound(err)\n\t\t} else {\n\t\t\treturn nil, errdefs.System(err)\n\t\t}\n\t}\n\n\tpods := podsResp.Items\n\tif len(pods) == 0 {\n\t\treturn nil, errdefs.NotFound(\n\t\t\tfmt.Errorf(\"no pods found for inference: %s\", functionName))\n\t}\n\n\t// prepare channel with enough space for the current instance set\n\tadded := make(chan string, len(pods))\n\tpodInformer.Informer().AddEventHandler(&podLoggerEventHandler{\n\t\tadded: added,\n\t})\n\n\t// will add existing pods to the chan and then listen for any new pods\n\tgo podInformer.Informer().Run(ctx.Done())\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tclose(added)\n\t}()\n\n\treturn added, nil\n}\n\n// parseSince returns the time.Duration of the requested Since value _or_ 5 minutes\nfunc parseSince(r *time.Time) *int64 {\n\tvar since int64\n\tif r == nil || r.IsZero() {\n\t\tsince = int64(defaultLogSince.Seconds())\n\t\treturn &since\n\t}\n\tsince = int64(time.Since(*r).Seconds())\n\treturn &since\n}\n\nfunc extractTimestampAndMsg(logText string) (string, time.Time) {\n\t// first 32 characters is the k8s timestamp\n\tparts := strings.SplitN(logText, \" \", 2)\n\tts, err := time.Parse(time.RFC3339Nano, parts[0])\n\tif err != nil {\n\t\tlogrus.WithField(\"logText\", logText).\n\t\t\tErrorf(\"error parsing timestamp: %s\", err)\n\t\treturn \"\", time.Time{}\n\t}\n\n\tif len(parts) == 2 {\n\t\treturn parts[1], ts\n\t}\n\n\treturn \"\", ts\n}\n\nfunc withLabels(selector string) internalinterfaces.TweakListOptionsFunc {\n\treturn func(opts *metav1.ListOptions) {\n\t\topts.LabelSelector = selector\n\t}\n}\n\ntype podLoggerEventHandler struct {\n\tcache.ResourceEventHandler\n\tadded   chan<- string\n\tdeleted chan<- string\n}\n\nfunc (h *podLoggerEventHandler) OnAdd(obj interface{}, isInitialList bool) {\n\tpod := obj.(*corev1.Pod)\n\tlogrus.WithField(\"pod\", pod.Name).Debugf(\"log pod informer added a pod\")\n\th.added <- pod.Name\n}\n\nfunc (h *podLoggerEventHandler) OnUpdate(oldObj, newObj interface{}) {\n\t// purposefully empty, we don't need to do anything for logs on update\n}\n\nfunc (h *podLoggerEventHandler) OnDelete(obj interface{}) {\n\t// this may not be needed, the log stream Reader _should_ close on its own without\n\t// us needing to watch and close it\n\t// pod := obj.(*corev1.Pod)\n\t// h.deleted <- pod.Name\n}\n"
  },
  {
    "path": "agent/pkg/log/loki.go",
    "content": "package log\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\nconst (\n\t// refer to https://grafana.com/docs/loki/latest/api/#query-loki-over-a-range-of-time\n\tlokiQueryRangePath = \"/loki/api/v1/query_range\"\n)\n\ntype RangeQueryResponse struct {\n\tData struct {\n\t\tResult []struct {\n\t\t\tStream struct {\n\t\t\t\tCluster   string `json:\"cluster,omitempty\"`\n\t\t\t\tContainer string `json:\"container,omitempty\"`\n\t\t\t\tNamespace string `json:\"namespace,omitempty\"`\n\t\t\t\tPod       string `json:\"pod,omitempty\"`\n\t\t\t\tJob       string `json:\"job,omitempty\"`\n\t\t\t}\n\t\t\tValues [][]string `json:\"values,omitempty\"`\n\t\t}\n\t\tResultType string `json:\"resultType,omitempty\"`\n\t}\n\tStatus string `json:\"status,omitempty\"`\n}\n\ntype LokiAPIRequestor struct {\n\tclient http.Client\n\turl    string\n\tuser   string\n\ttoken  string\n}\n\nfunc NewLokiAPIRequestor(url, user, token string) Requester {\n\tloki := LokiAPIRequestor{\n\t\turl:    url,\n\t\tuser:   user,\n\t\ttoken:  token,\n\t\tclient: http.Client{},\n\t}\n\treturn &loki\n}\n\nfunc (l *LokiAPIRequestor) Query(ctx context.Context, r types.LogRequest) (<-chan types.Message, error) {\n\tvar sinceTime time.Time\n\tif r.Since != \"\" {\n\t\tvar err error\n\t\tsinceTime, err = time.Parse(time.RFC3339, r.Since)\n\t\tif err != nil {\n\t\t\treturn nil, errdefs.InvalidParameter(err)\n\t\t}\n\t}\n\n\tlogs, err := l.getLogs(ctx, &sinceTime, r.Namespace, r.Name)\n\treturn logs, err\n}\n\nfunc (l *LokiAPIRequestor) getLogs(ctx context.Context, since *time.Time,\n\tnamespace, name string) (<-chan types.Message, error) {\n\tendpoint, err := url.JoinPath(l.url, lokiQueryRangePath)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to construct the query URL\")\n\t}\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to construct the Loki request\")\n\t}\n\treq.SetBasicAuth(l.user, l.token)\n\tquery := url.Values{}\n\tif since != nil {\n\t\tquery.Add(\"start\", since.String())\n\t\tif time.Since(*since) > time.Hour*24*30 {\n\t\t\t// max query range is 30 days\n\t\t\tquery.Add(\"end\", strconv.Itoa(int(since.Add(time.Hour*24*30).UnixNano())))\n\t\t}\n\t}\n\tquery.Add(\"query\", fmt.Sprintf(`{namespace=\"%s\",pod=\"%s\"}`, namespace, name))\n\treq.URL.RawQuery = query.Encode()\n\tlogrus.Debugf(\"get log from %s\", req.URL.String())\n\n\tresp, err := l.client.Do(req)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to request the Loki service\")\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, errors.Newf(\"failed to request the Loki, err[%s]\", resp.Status)\n\t}\n\n\tvar queryResp RangeQueryResponse\n\terr = json.NewDecoder(resp.Body).Decode(&queryResp)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unmarshal json\")\n\t}\n\n\tif len(queryResp.Data.Result) == 0 {\n\t\treturn nil, errors.New(\"result contains \")\n\t}\n\n\tmsgStream := make(chan types.Message, LogBufferSize)\n\tgo func() {\n\t\tdefer close(msgStream)\n\t\tfor _, value := range queryResp.Data.Result[0].Values {\n\t\t\ttimestamp, err := time.Parse(time.RFC3339, value[0])\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Infof(\"failed to parse timestamp %s during parse log from %s:%s\\n\",\n\t\t\t\t\tvalue[0], namespace, name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmsgStream <- types.Message{\n\t\t\t\tTimestamp: timestamp,\n\t\t\t\tText:      value[1],\n\t\t\t\tName:      name,\n\t\t\t\tNamespace: namespace,\n\t\t\t\tInstance:  name,\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn msgStream, nil\n}\n"
  },
  {
    "path": "agent/pkg/metrics/exporter.go",
    "content": "// Copyright (c) Alex Ellis 2017\n// Copyright (c) 2018 OpenFaaS Author(s)\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage metrics\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/runtime\"\n)\n\n// Exporter is a prometheus metrics collector.\n// It is an implementation of https://pkg.go.dev/github.com/prometheus/client_golang/prometheus#Collector.\ntype Exporter struct {\n\tmetricOptions MetricOptions\n\truntime       runtime.Runtime\n\tservices      []types.InferenceDeployment\n\tlogger        *logrus.Entry\n}\n\n// NewExporter creates a new exporter for the OpenFaaS gateway metrics\nfunc NewExporter(options MetricOptions, r runtime.Runtime) *Exporter {\n\treturn &Exporter{\n\t\tmetricOptions: options,\n\t\truntime:       r,\n\t\tservices:      []types.InferenceDeployment{},\n\t\tlogger:        logrus.WithField(\"component\", \"exporter\"),\n\t}\n}\n\n// Describe is to describe the metrics for Prometheus\nfunc (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\te.metricOptions.GatewayInferenceInvocation.Describe(ch)\n\te.metricOptions.GatewayInferencesHistogram.Describe(ch)\n\te.metricOptions.ServiceReplicasGauge.Describe(ch)\n\te.metricOptions.ServiceAvailableReplicasGauge.Describe(ch)\n\te.metricOptions.ServiceTargetLoad.Describe(ch)\n\te.metricOptions.GatewayInferenceInvocationStarted.Describe(ch)\n\te.metricOptions.GatewayInferenceInvocationInflight.Describe(ch)\n}\n\n// Collect collects data to be consumed by prometheus\nfunc (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.metricOptions.GatewayInferenceInvocation.Collect(ch)\n\te.metricOptions.GatewayInferencesHistogram.Collect(ch)\n\n\te.metricOptions.ServiceReplicasGauge.Reset()\n\te.metricOptions.ServiceAvailableReplicasGauge.Reset()\n\te.metricOptions.ServiceTargetLoad.Reset()\n\n\te.metricOptions.PodStartHistogram.Collect(ch)\n\n\tfor _, service := range e.services {\n\t\tvar serviceName string\n\t\tif len(service.Spec.Namespace) > 0 {\n\t\t\tserviceName = fmt.Sprintf(\"%s.%s\", service.Spec.Name,\n\t\t\t\tservice.Spec.Namespace)\n\t\t} else {\n\t\t\tserviceName = service.Spec.Name\n\t\t}\n\t\t// Initial services information if nil after recent deployment\n\t\te.metricOptions.GatewayInferenceInvocationStarted.WithLabelValues(serviceName)\n\t\te.metricOptions.GatewayInferenceInvocationInflight.WithLabelValues(serviceName)\n\t\t// Set current replica count\n\t\te.metricOptions.ServiceReplicasGauge.WithLabelValues(serviceName).\n\t\t\tSet(float64(service.Status.Replicas))\n\t\t// Set available replica count\n\t\te.metricOptions.ServiceAvailableReplicasGauge.WithLabelValues(serviceName).\n\t\t\tSet(float64(service.Status.AvailableReplicas))\n\n\t\t// Set target load\n\t\tif service.Spec.Scaling != nil {\n\t\t\te.metricOptions.ServiceTargetLoad.WithLabelValues(\n\t\t\t\tserviceName, string(*service.Spec.Scaling.Type)).\n\t\t\t\tSet(float64(*service.Spec.Scaling.TargetLoad))\n\t\t}\n\t}\n\n\te.metricOptions.GatewayInferenceInvocationStarted.Collect(ch)\n\te.metricOptions.GatewayInferenceInvocationInflight.Collect(ch)\n\te.metricOptions.ServiceReplicasGauge.Collect(ch)\n\te.metricOptions.ServiceAvailableReplicasGauge.Collect(ch)\n\te.metricOptions.ServiceTargetLoad.Collect(ch)\n}\n\n// StartServiceWatcher starts a ticker and collects service replica counts to expose to prometheus\nfunc (e *Exporter) StartServiceWatcher(\n\tctx context.Context, interval time.Duration) {\n\tticker := time.NewTicker(interval)\n\tquit := make(chan struct{})\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\n\t\t\t\tnamespaces, err := e.runtime.NamespaceList(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\te.logger.Debug(\"unable to list namespaces: \", err)\n\t\t\t\t}\n\n\t\t\t\tservices := []types.InferenceDeployment{}\n\n\t\t\t\tfor _, namespace := range namespaces {\n\t\t\t\t\tnsServices, err := e.runtime.InferenceList(namespace)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\te.logger.Debug(\"unable to list services: \", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tservices = append(services, nsServices...)\n\t\t\t\t}\n\n\t\t\t\te.services = services\n\t\t\t\tbreak\n\t\t\tcase <-quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n"
  },
  {
    "path": "agent/pkg/metrics/metrics.go",
    "content": "// Copyright (c) Alex Ellis 2017. All rights reserved.\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage metrics\n\nimport (\n\t\"net/http\"\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n)\n\n// MetricOptions to be used by web handlers\ntype MetricOptions struct {\n\tGatewayInferenceInvocation         *prometheus.CounterVec\n\tGatewayInferencesHistogram         *prometheus.HistogramVec\n\tGatewayInferenceInvocationStarted  *prometheus.CounterVec\n\tGatewayInferenceInvocationInflight *prometheus.GaugeVec\n\n\tServiceReplicasGauge          *prometheus.GaugeVec\n\tServiceAvailableReplicasGauge *prometheus.GaugeVec\n\tServiceTargetLoad             *prometheus.GaugeVec\n\n\tPodStartHistogram *prometheus.HistogramVec\n}\n\n// ServiceMetricOptions provides RED metrics\ntype ServiceMetricOptions struct {\n\tHistogram *prometheus.HistogramVec\n\tCounter   *prometheus.CounterVec\n}\n\n// Synchronize to make sure MustRegister only called once\nvar once = sync.Once{}\n\n// RegisterExporter registers with Prometheus for tracking\nfunc RegisterExporter(exporter *Exporter) {\n\tonce.Do(func() {\n\t\tprometheus.MustRegister(exporter)\n\t})\n}\n\n// PrometheusHandler Bootstraps prometheus for metrics collection\nfunc PrometheusHandler() http.Handler {\n\treturn promhttp.Handler()\n}\n\n// BuildMetricsOptions builds metrics for tracking inferences in the API gateway\nfunc BuildMetricsOptions() MetricOptions {\n\tgatewayInferencesHistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\tName: \"gateway_inferences_seconds\",\n\t\tHelp: \"Inference time taken\",\n\t}, []string{\"inference_name\", \"code\"})\n\n\tgatewayInferenceInvocation := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tNamespace: \"gateway\",\n\t\t\tSubsystem: \"inference\",\n\t\t\tName:      \"invocation_total\",\n\t\t\tHelp:      \"Inference metrics\",\n\t\t},\n\t\t[]string{\"inference_name\", \"code\"},\n\t)\n\n\tserviceReplicas := prometheus.NewGaugeVec(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"gateway\",\n\t\t\tName:      \"service_count\",\n\t\t\tHelp:      \"Current count of replicas for inference\",\n\t\t},\n\t\t[]string{\"inference_name\"},\n\t)\n\n\tserviceAvailableReplicas := prometheus.NewGaugeVec(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"gateway\",\n\t\t\tName:      \"service_available_count\",\n\t\t\tHelp:      \"Current count of available replicas for inference\",\n\t\t},\n\t\t[]string{\"inference_name\"},\n\t)\n\n\tserviceTargetLoad := prometheus.NewGaugeVec(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"gateway\",\n\t\t\tName:      \"service_target_load\",\n\t\t\tHelp:      \"Target load for inference\",\n\t\t},\n\t\t[]string{\"inference_name\", \"scaling_type\"},\n\t)\n\n\tgatewayInferenceInvocationStarted := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tNamespace: \"gateway\",\n\t\t\tSubsystem: \"inference\",\n\t\t\tName:      \"invocation_started\",\n\t\t\tHelp:      \"The total number of inference HTTP requests started.\",\n\t\t},\n\t\t[]string{\"inference_name\"},\n\t)\n\n\tgatewayInferenceInvocationInflight := prometheus.NewGaugeVec(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"gateway\",\n\t\t\tSubsystem: \"inference\",\n\t\t\tName:      \"invocation_inflight\",\n\t\t\tHelp:      \"The number of inference HTTP inflight requests.\",\n\t\t},\n\t\t[]string{\"inference_name\"},\n\t)\n\n\tpodStartHistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\tName:    \"pod_start_seconds\",\n\t\tHelp:    \"Pod start time taken\",\n\t\tBuckets: prometheus.ExponentialBuckets(8, 1.5, 10),\n\t}, []string{\"inference_name\", \"source_image\"})\n\n\tmetricsOptions := MetricOptions{\n\t\tGatewayInferencesHistogram:         gatewayInferencesHistogram,\n\t\tGatewayInferenceInvocation:         gatewayInferenceInvocation,\n\t\tServiceReplicasGauge:               serviceReplicas,\n\t\tServiceAvailableReplicasGauge:      serviceAvailableReplicas,\n\t\tServiceTargetLoad:                  serviceTargetLoad,\n\t\tGatewayInferenceInvocationStarted:  gatewayInferenceInvocationStarted,\n\t\tGatewayInferenceInvocationInflight: gatewayInferenceInvocationInflight,\n\t\tPodStartHistogram:                  podStartHistogram,\n\t}\n\n\treturn metricsOptions\n}\n"
  },
  {
    "path": "agent/pkg/prom/prometheus_query.go",
    "content": "package prom\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// PrometheusQuery represents parameters for querying Prometheus\ntype PrometheusQuery struct {\n\tPort   int\n\tHost   string\n\tClient *http.Client\n}\n\ntype PrometheusQueryFetcher interface {\n\tFetch(query string) (*VectorQueryResponse, error)\n}\n\n// NewPrometheusQuery create a NewPrometheusQuery\nfunc NewPrometheusQuery(host string, port int, client *http.Client) PrometheusQuery {\n\treturn PrometheusQuery{\n\t\tClient: client,\n\t\tHost:   host,\n\t\tPort:   port,\n\t}\n}\n\nfunc (p PrometheusQuery) AddMetrics(inferences []types.InferenceDeployment) {\n\tif len(inferences) > 0 {\n\t\tns := inferences[0].Spec.Namespace\n\t\tq := fmt.Sprintf(`sum(gateway_inference_invocation_total{inference_name=~\".*.%s\"}) by (inference_name)`, ns)\n\t\t// Restrict query results to only inference names matching namespace suffix.\n\n\t\tresults, err := p.Fetch(url.QueryEscape(q))\n\t\tif err != nil {\n\t\t\t// log the error but continue, the mixIn will correctly handle the empty results.\n\t\t\tlogrus.Debugf(\"Error querying Prometheus: %s\\n\", err.Error())\n\t\t}\n\t\tmixIn(inferences, results)\n\t}\n}\n\n// Fetch queries aggregated stats\nfunc (q PrometheusQuery) Fetch(query string) (*VectorQueryResponse, error) {\n\n\treq, reqErr := http.NewRequest(http.MethodGet, fmt.Sprintf(\"http://%s:%d/api/v1/query?query=%s\", q.Host, q.Port, query), nil)\n\tif reqErr != nil {\n\t\treturn nil, reqErr\n\t}\n\n\tres, getErr := q.Client.Do(req)\n\tif getErr != nil {\n\t\treturn nil, getErr\n\t}\n\n\tif res.Body != nil {\n\t\tdefer res.Body.Close()\n\t}\n\n\tbytesOut, readErr := ioutil.ReadAll(res.Body)\n\tif readErr != nil {\n\t\treturn nil, readErr\n\t}\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"unexpected status code from Prometheus want: %d, got: %d, body: %s\", http.StatusOK, res.StatusCode, string(bytesOut))\n\t}\n\n\tvar values VectorQueryResponse\n\n\tunmarshalErr := json.Unmarshal(bytesOut, &values)\n\tif unmarshalErr != nil {\n\t\treturn nil, fmt.Errorf(\"error unmarshalling result: %s, '%s'\", unmarshalErr, string(bytesOut))\n\t}\n\n\treturn &values, nil\n}\n\ntype VectorQueryResponse struct {\n\tData struct {\n\t\tResult []struct {\n\t\t\tMetric struct {\n\t\t\t\tCode          string `json:\"code\"`\n\t\t\t\tScalingType   string `json:\"scaling_type\"`\n\t\t\t\tInferenceName string `json:\"inference_name\"`\n\t\t\t}\n\t\t\tValue []interface{} `json:\"value\"`\n\t\t}\n\t}\n}\n\nfunc mixIn(inferences []types.InferenceDeployment, metrics *VectorQueryResponse) {\n\n\tif inferences == nil || metrics == nil {\n\t\treturn\n\t}\n\n\tfor i, inference := range inferences {\n\t\tfor _, v := range metrics.Data.Result {\n\n\t\t\tif v.Metric.InferenceName == fmt.Sprintf(\"%s.%s\",\n\t\t\t\tinference.Spec.Name, inference.Spec.Namespace) {\n\t\t\t\tmetricValue := v.Value[1]\n\t\t\t\tswitch value := metricValue.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tf, err := strconv.ParseFloat(value, 64)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogrus.Debugf(\"add_metrics: unable to convert value %q for metric: %s\", value, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tinferences[i].Status.InvocationCount += int32(f)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "agent/pkg/runtime/build.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/k8s\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n)\n\nfunc (r generalRuntime) BuildList(ctx context.Context, namespace string) (\n\t[]types.Build, error) {\n\tres := []types.Build{}\n\tjobs, err := r.kubeClient.BatchV1().Jobs(namespace).\n\t\tList(ctx, metav1.ListOptions{\n\t\t\tLabelSelector: fmt.Sprintf(\"%s=true\", consts.AnnotationBuilding),\n\t\t})\n\tif err != nil {\n\t\tif !k8serrors.IsNotFound(err) {\n\t\t\treturn nil, errdefs.System(err)\n\t\t}\n\t}\n\n\tif jobs != nil {\n\t\tfor _, job := range jobs.Items {\n\t\t\tbuild, err := k8s.AsBuild(job)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errdefs.System(err)\n\t\t\t}\n\n\t\t\tres = append(res, build)\n\t\t}\n\t}\n\treturn res, nil\n}\n\nfunc (r generalRuntime) BuildCreate(ctx context.Context,\n\treq types.Build, inference *v2alpha1.Inference, builderImage, buildkitdAddress, buildCtlBin, secret string) error {\n\tbuildJob, err := k8s.MakeBuild(req, inference, builderImage,\n\t\tbuildkitdAddress, buildCtlBin, secret)\n\tif err != nil {\n\t\treturn errdefs.System(err)\n\t}\n\n\tif _, err := r.kubeClient.BatchV1().Jobs(req.Spec.Namespace).\n\t\tCreate(ctx, buildJob, metav1.CreateOptions{}); err != nil {\n\t\treturn errdefs.System(err)\n\t}\n\n\treturn nil\n}\n\nfunc (r generalRuntime) BuildGet(ctx context.Context, namespace, buildName string) (types.Build, error) {\n\tjob, err := r.kubeClient.BatchV1().Jobs(namespace).Get(ctx,\n\t\tbuildName, metav1.GetOptions{})\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn types.Build{}, errdefs.NotFound(err)\n\t\t}\n\t\treturn types.Build{}, errdefs.System(err)\n\t}\n\n\tres, err := k8s.AsBuild(*job)\n\tif err != nil {\n\t\treturn types.Build{}, errdefs.System(err)\n\t}\n\treturn res, nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/cluster_info_get.go",
    "content": "package runtime\n\nimport (\n\t\"strings\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/k8s\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/version\"\n)\n\nfunc (r generalRuntime) GetClusterInfo(cluster *types.ManagedCluster) error {\n\tinfo, err := k8s.GetKubernetesVersion(r.kubeClient)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcluster.KubernetesVersion = info.GitVersion\n\tcluster.Platform = info.Platform\n\n\tv := version.GetVersion()\n\tcluster.Version = v.Version\n\n\tresources, err := r.ListServerResource()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcluster.ServerResources = strings.Join(resources, \";\")\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/image_cache.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/k8s\"\n\tmodelzetes \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nfunc (r generalRuntime) ImageCacheCreate(ctx context.Context, req types.ImageCache, inference *modelzetes.Inference) error {\n\timageCache := k8s.MakeImageCache(req, inference)\n\tlogrus.Infof(\"%v\", imageCache)\n\n\tif _, err := r.kubefledgedClient.KubefledgedV1alpha3().\n\t\tImageCaches(req.Namespace).\n\t\tCreate(ctx, imageCache, metav1.CreateOptions{}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/inference_create.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tv1 \"k8s.io/api/core/v1\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/config\"\n\tlocalconsts \"github.com/tensorchord/openmodelz/agent/pkg/consts\"\n\tingressv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n)\n\nfunc (r generalRuntime) InferenceCreate(ctx context.Context,\n\treq types.InferenceDeployment, cfg config.IngressConfig, event string, serverPort int) error {\n\n\tnamespace := req.Spec.Namespace\n\n\tif r.eventEnabled {\n\t\terr := r.eventRecorder.CreateDeploymentEvent(namespace, req.Spec.Name, event, \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tinf, err := makeInference(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create the ingress\n\t// TODO(gaocegege): Check if the domain is already used.\n\tif r.ingressEnabled {\n\t\tname := req.Spec.Labels[consts.LabelName]\n\n\t\tif r.ingressAnyIPToDomain {\n\t\t\t// Get the service with type=loadbalancer.\n\t\t\tsvcs, err := r.kubeClient.CoreV1().Services(\"\").List(ctx, metav1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn errdefs.System(fmt.Errorf(\"failed to list services: %v\", err))\n\t\t\t}\n\n\t\t\tif len(svcs.Items) == 0 {\n\t\t\t\treturn errdefs.System(fmt.Errorf(\"no service with type=LoadBalancer\"))\n\t\t\t}\n\t\t\tvar externalIP string\n\t\t\tfor _, s := range svcs.Items {\n\t\t\t\tif s.Spec.Type == v1.ServiceTypeLoadBalancer {\n\t\t\t\t\tif len(s.Status.LoadBalancer.Ingress) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\texternalIP = s.Status.LoadBalancer.Ingress[0].IP\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Set the domain to\n\t\t\tingressDomain := fmt.Sprintf(\"%s.%s\", externalIP, localconsts.Domain)\n\t\t\tcfg.Domain = ingressDomain\n\t\t}\n\n\t\tdomain, err := makeDomain(name, cfg.Domain)\n\t\tif err != nil {\n\t\t\treturn errdefs.InvalidParameter(err)\n\t\t}\n\n\t\t// Set the domain.\n\t\t// Create the inference with the ingress domain.\n\t\tif inf.Spec.Annotations == nil {\n\t\t\tinf.Spec.Annotations = make(map[string]string)\n\t\t}\n\t\tif cfg.TLSEnabled {\n\t\t\tinf.Spec.Annotations[AnnotationDomain] = fmt.Sprintf(\"https://%s\", domain)\n\t\t} else {\n\t\t\tinf.Spec.Annotations[AnnotationDomain] = fmt.Sprintf(\"http://%s\", domain)\n\t\t}\n\n\t\t_, err = r.inferenceClient.TensorchordV2alpha1().\n\t\t\tInferences(namespace).Create(\n\t\t\tctx, inf, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tif k8serrors.IsAlreadyExists(err) {\n\t\t\t\treturn errdefs.Conflict(err)\n\t\t\t} else {\n\t\t\t\treturn errdefs.System(err)\n\t\t\t}\n\t\t}\n\n\t\tcfg.Domain = domain\n\t\tingress, err := makeIngress(req, cfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = r.ingressClient.TensorchordV1().\n\t\t\tInferenceIngresses(cfg.Namespace).\n\t\t\tCreate(ctx, ingress, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tif k8serrors.IsAlreadyExists(err) {\n\t\t\t\treturn errdefs.Conflict(err)\n\t\t\t} else {\n\t\t\t\treturn errdefs.System(err)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// Set the gateway kubernetes service domain.\n\t\tdomain := fmt.Sprintf(\"gateway.default:%d/api/v1/%s/%s/\", serverPort, string(req.Spec.Framework), req.Spec.Name)\n\t\tif inf.Spec.Annotations == nil {\n\t\t\tinf.Spec.Annotations = make(map[string]string)\n\t\t}\n\t\tif cfg.TLSEnabled {\n\t\t\tinf.Spec.Annotations[AnnotationDomain] = fmt.Sprintf(\"https://%s\", domain)\n\t\t} else {\n\t\t\tinf.Spec.Annotations[AnnotationDomain] = fmt.Sprintf(\"http://%s\", domain)\n\t\t}\n\t\t_, err = r.inferenceClient.TensorchordV2alpha1().\n\t\t\tInferences(namespace).Create(\n\t\t\tctx, inf, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tif k8serrors.IsAlreadyExists(err) {\n\t\t\t\treturn errdefs.Conflict(err)\n\t\t\t} else {\n\t\t\t\treturn errdefs.System(err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc makeInference(request types.InferenceDeployment) (*v2alpha1.Inference, error) {\n\tis := &v2alpha1.Inference{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      request.Spec.Name,\n\t\t\tNamespace: request.Spec.Namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\tconsts.LabelInferenceName: request.Spec.Name,\n\t\t\t},\n\t\t},\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:          request.Spec.Name,\n\t\t\tImage:         request.Spec.Image,\n\t\t\tFramework:     v2alpha1.Framework(request.Spec.Framework),\n\t\t\tPort:          request.Spec.Port,\n\t\t\tCommand:       request.Spec.Command,\n\t\t\tEnvVars:       request.Spec.EnvVars,\n\t\t\tSecrets:       request.Spec.Secrets,\n\t\t\tConstraints:   request.Spec.Constraints,\n\t\t\tLabels:        request.Spec.Labels,\n\t\t\tAnnotations:   request.Spec.Annotations,\n\t\t\tHTTPProbePath: request.Spec.HTTPProbePath,\n\t\t},\n\t}\n\n\tif request.Spec.Scaling != nil {\n\t\tis.Spec.Scaling = &v2alpha1.ScalingConfig{\n\t\t\tMinReplicas:     request.Spec.Scaling.MinReplicas,\n\t\t\tMaxReplicas:     request.Spec.Scaling.MaxReplicas,\n\t\t\tTargetLoad:      request.Spec.Scaling.TargetLoad,\n\t\t\tZeroDuration:    request.Spec.Scaling.ZeroDuration,\n\t\t\tStartupDuration: request.Spec.Scaling.StartupDuration,\n\t\t}\n\t\tif request.Spec.Scaling.Type != nil {\n\t\t\tbuf := v2alpha1.ScalingType(*request.Spec.Scaling.Type)\n\t\t\tis.Spec.Scaling.Type = &buf\n\t\t}\n\t}\n\n\trr, err := createResources(request)\n\tif err != nil {\n\t\treturn nil, errdefs.InvalidParameter(err)\n\t}\n\n\tis.Spec.Resources = &rr\n\treturn is, nil\n}\n\nfunc makeIngress(request types.InferenceDeployment, cfg config.IngressConfig) (*ingressv1.InferenceIngress, error) {\n\tlabels := map[string]string{\n\t\tconsts.LabelInferenceName:      request.Spec.Name,\n\t\tconsts.LabelInferenceNamespace: request.Spec.Namespace,\n\t}\n\n\tif request.Spec.Labels == nil {\n\t\treturn nil, errdefs.InvalidParameter(fmt.Errorf(\"labels is required\"))\n\t}\n\n\tingress := &ingressv1.InferenceIngress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      request.Spec.Name,\n\t\t\tNamespace: cfg.Namespace,\n\t\t\tLabels:    labels,\n\t\t},\n\t\tSpec: ingressv1.InferenceIngressSpec{\n\t\t\tDomain:        cfg.Domain,\n\t\t\tFramework:     string(request.Spec.Framework),\n\t\t\tIngressType:   \"nginx\",\n\t\t\tBypassGateway: false,\n\t\t\tFunction:      request.Spec.Name,\n\t\t\tTLS: &ingressv1.InferenceIngressTLS{\n\t\t\t\tEnabled: cfg.TLSEnabled,\n\t\t\t},\n\t\t},\n\t}\n\n\tannotation := map[string]string{}\n\tif value, exist := request.Spec.Annotations[consts.AnnotationControlPlaneKey]; exist {\n\t\tannotation[consts.AnnotationControlPlaneKey] = value\n\t}\n\tingress.Annotations = annotation\n\n\treturn ingress, nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/inference_delete.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\n\tingressclientset \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned\"\n\tinferenceclientset \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\nfunc (r generalRuntime) InferenceDelete(ctx context.Context, namespace,\n\tinferenceName, ingressNamespace, event string) error {\n\n\tif r.eventEnabled {\n\t\terr := r.eventRecorder.CreateDeploymentEvent(namespace, inferenceName, event, \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tgetOpts := metav1.GetOptions{}\n\n\t// This makes sure we don't delete non-labelled deployments\n\t_, err := r.inferenceClient.TensorchordV2alpha1().\n\t\tInferences(namespace).\n\t\tGet(context.TODO(), inferenceName, getOpts)\n\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn errdefs.NotFound(err)\n\t\t} else {\n\t\t\treturn errdefs.System(err)\n\t\t}\n\t}\n\n\tif err := deleteInference(ctx, namespace, r.inferenceClient,\n\t\tr.ingressClient, ingressNamespace,\n\t\tinferenceName, r.ingressEnabled); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc deleteInference(ctx context.Context,\n\tnamespace string,\n\tclientset inferenceclientset.Interface,\n\tingressClient ingressclientset.Interface,\n\tbaseNamespace string,\n\tinferenceName string, ingressEnabled bool) error {\n\tforegroundPolicy := metav1.DeletePropagationForeground\n\topts := &metav1.DeleteOptions{PropagationPolicy: &foregroundPolicy}\n\n\tif deployErr := clientset.TensorchordV2alpha1().Inferences(namespace).\n\t\tDelete(ctx, inferenceName, *opts); deployErr != nil {\n\n\t\tif k8serrors.IsNotFound(deployErr) {\n\t\t\treturn errdefs.NotFound(deployErr)\n\t\t} else {\n\t\t\treturn errdefs.System(deployErr)\n\t\t}\n\t}\n\n\tif ingressEnabled && ingressClient != nil {\n\t\tif err := ingressClient.TensorchordV1().InferenceIngresses(baseNamespace).Delete(ctx, inferenceName, *opts); err != nil {\n\t\t\tif k8serrors.IsNotFound(err) {\n\t\t\t\treturn errdefs.NotFound(err)\n\t\t\t} else {\n\t\t\t\treturn errdefs.System(err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/inference_exec.go",
    "content": "package runtime\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/sirupsen/logrus\"\n\tv1 \"k8s.io/api/core/v1\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/util/rand\"\n\tclientsetscheme \"k8s.io/client-go/kubernetes/scheme\"\n\t\"k8s.io/client-go/tools/remotecommand\"\n\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\nconst (\n\t// Time allowed to write a message to the peer.\n\twriteWait = 10 * time.Second\n\t// ctrl+d to close terminal.\n\tendOfTransmission = \"\\u0004\"\n)\n\nfunc (r generalRuntime) InferenceExec(ctx *gin.Context, namespace, instance string,\n\tcommands []string, tty bool) error {\n\tpod, err := r.kubeClient.CoreV1().Pods(namespace).Get(\n\t\tctx.Request.Context(), instance, metav1.GetOptions{})\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn errdefs.NotFound(errors.New(\"inference instance not found\"))\n\t\t}\n\t\treturn errdefs.System(err)\n\t}\n\n\tif pod.Status.Phase != v1.PodRunning {\n\t\treturn errdefs.Unavailable(errors.New(\"inference instance is not running\"))\n\t}\n\n\treq := r.kubeClient.CoreV1().RESTClient().Post().\n\t\tResource(\"pods\").\n\t\tName(instance).\n\t\tNamespace(namespace).\n\t\tSubResource(\"exec\")\n\treq.VersionedParams(&v1.PodExecOptions{\n\t\tCommand: commands,\n\t\tStdin:   tty,\n\t\tStdout:  true,\n\t\tStderr:  true,\n\t\tTTY:     tty,\n\t}, clientsetscheme.ParameterCodec)\n\n\texec, err := remotecommand.NewSPDYExecutor(r.clientConfig, http.MethodPost, req.URL())\n\tif err != nil {\n\t\treturn errdefs.System(err)\n\t}\n\n\tif tty {\n\t\tt, err := newTerminalSession(fmt.Sprintf(\"exec/%s/%s/%s\", namespace, instance, rand.String(5)),\n\t\t\tctx.Request, ctx.Writer)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer t.Close()\n\n\t\tlogrus.WithField(\"exec\", exec).Debugf(\"executing command\")\n\t\tif err = exec.StreamWithContext(ctx.Request.Context(), remotecommand.StreamOptions{\n\t\t\tStdin:             t,\n\t\t\tStdout:            t,\n\t\t\tStderr:            t,\n\t\t\tTerminalSizeQueue: t,\n\t\t\tTty:               true,\n\t\t}); err != nil {\n\t\t\t// The response is already hijacked, so we can't return an error.\n\t\t\tlogrus.Warnf(\"exec stream failed: %v\", err)\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\tlogrus.Debugf(\"running without tty\")\n\t\tif err := exec.StreamWithContext(ctx.Request.Context(),\n\t\t\tremotecommand.StreamOptions{\n\t\t\t\tStdout: ctx.Writer,\n\t\t\t\tStderr: ctx.Writer,\n\t\t\t\tTty:    tty,\n\t\t\t}); err != nil {\n\t\t\treturn errdefs.System(err)\n\t\t}\n\t}\n\treturn nil\n}\n\ntype PtyHandler interface {\n\tio.Reader\n\tio.Writer\n\tremotecommand.TerminalSizeQueue\n}\n\n// TerminalMessage is the messaging protocol between ShellController and TerminalSession.\n//\n// OP      DIRECTION  FIELD(S) USED  DESCRIPTION\n// ---------------------------------------------------------------------\n// bind    fe->be     SessionID      Id sent back from TerminalResponse\n// stdin   fe->be     Data           Keystrokes/paste buffer\n// resize  fe->be     Rows, Cols     New terminal size\n// stdout  be->fe     Data           Output from the process\n// toast   be->fe     Data           OOB message to be shown to the user\ntype TerminalMessage struct {\n\tID   string `json:\"id,omitempty\"`\n\tOp   string `json:\"op,omitempty\"`\n\tData string `json:\"data,omitempty\"`\n\tRows uint16 `json:\"rows,omitempty\"`\n\tCols uint16 `json:\"cols,omitempty\"`\n}\n\n// TerminalSession\ntype TerminalSession struct {\n\tID       string\n\twsConn   *websocket.Conn\n\tsizeChan chan remotecommand.TerminalSize\n\tdoneChan chan struct{}\n}\n\n// TerminalSize handles pty->process resize events\n// Called in a loop from remotecommand as long as the process is running\nfunc (t *TerminalSession) Next() *remotecommand.TerminalSize {\n\tselect {\n\tcase size := <-t.sizeChan:\n\t\treturn &size\n\tcase <-t.doneChan:\n\t\treturn nil\n\t}\n}\n\n// Read handles pty->process messages (stdin, resize)\n// Called in a loop from remotecommand as long as the process is running\nfunc (t *TerminalSession) Read(p []byte) (int, error) {\n\tvar msg TerminalMessage\n\tif err := t.wsConn.ReadJSON(&msg); err != nil {\n\t\tlogrus.Debugf(\"%s: read json failed: %v\", t.ID, err)\n\t\treturn copy(p, endOfTransmission), err\n\t}\n\tlogrus.Debugf(\"%s: read json: %v\", t.ID, msg)\n\tswitch msg.Op {\n\tcase \"stdin\":\n\t\tlogrus.WithField(\"remote\", t.wsConn.RemoteAddr()).Debugf(\"%s: read %d bytes: %s\", t.ID, len(msg.Data), msg.Data)\n\t\tsize := copy(p, msg.Data)\n\t\tlogrus.WithField(\"remote\", t.wsConn.RemoteAddr()).Debugf(\"%s: copied %d bytes: %s\", t.ID, size, p)\n\t\treturn size, nil\n\tcase \"resize\":\n\t\tt.sizeChan <- remotecommand.TerminalSize{Width: msg.Cols, Height: msg.Rows}\n\t\treturn 0, nil\n\tdefault:\n\t\tlogrus.WithField(\"remote\", t.wsConn.RemoteAddr()).Debugf(\"%s: unknown message type '%s'\", t.ID, msg.Op)\n\t\treturn copy(p, endOfTransmission), fmt.Errorf(\"unknown message type '%s'\", msg.Op)\n\t}\n}\n\n// Write handles process->pty stdout\n// Called from remotecommand whenever there is any output\nfunc (t *TerminalSession) Write(p []byte) (int, error) {\n\tmsg := TerminalMessage{\n\t\tOp:   \"stdout\",\n\t\tData: string(p),\n\t}\n\n\tlogrus.WithField(\"remote\", t.wsConn.RemoteAddr()).Debugf(\"%s: write %d bytes: %s\", t.ID, len(p), string(p))\n\n\tif err := t.wsConn.WriteJSON(msg); err != nil {\n\t\tlogrus.WithField(\"remote\", t.wsConn.RemoteAddr()).Debugf(\"write message failed: %v\", err)\n\t\treturn 0, err\n\t}\n\treturn len(p), nil\n}\n\nfunc (t *TerminalSession) Close() error {\n\tclose(t.doneChan)\n\treturn t.wsConn.Close()\n}\n\nfunc newTerminalSession(id string, r *http.Request, w http.ResponseWriter) (*TerminalSession, error) {\n\tupgrader := websocket.Upgrader{\n\t\tReadBufferSize:  1024,\n\t\tWriteBufferSize: 1024,\n\t\tCheckOrigin: func(r *http.Request) bool {\n\t\t\treturn true\n\t\t},\n\t}\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TerminalSession{\n\t\tID:       id,\n\t\twsConn:   conn,\n\t\tsizeChan: make(chan remotecommand.TerminalSize),\n\t\tdoneChan: make(chan struct{}),\n\t}, nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/inference_get.go",
    "content": "package runtime\n\nimport (\n\t\"fmt\"\n\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tv1 \"k8s.io/client-go/listers/apps/v1\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/k8s\"\n\tapis \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/client/listers/modelzetes/v2alpha1\"\n)\n\nfunc (r generalRuntime) InferenceGet(namespace, inferenceName string) (\n\t*types.InferenceDeployment, error) {\n\treturn inferenceGet(namespace, inferenceName,\n\t\tr.inferenceInformer.Lister(), r.deploymentInformer.Lister())\n}\n\nfunc (r generalRuntime) InferenceGetCRD(namespace, name string) (*apis.Inference, error) {\n\tinference, err := r.inferenceInformer.Lister().Inferences(namespace).Get(name)\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn nil, errdefs.NotFound(err)\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn inference, nil\n}\n\n// inferenceGet returns a inference or nil if not found\nfunc inferenceGet(namespace string, inferenceName string,\n\tinfLister v2alpha1.InferenceLister,\n\tlister v1.DeploymentLister) (*types.InferenceDeployment, error) {\n\n\tinference, err := infLister.Inferences(namespace).Get(inferenceName)\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn nil, errdefs.NotFound(err)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\titem, err := lister.Deployments(namespace).\n\t\tGet(inferenceName)\n\tif err != nil {\n\t\tif !k8serrors.IsNotFound(err) {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tinf := k8s.AsInferenceDeployment(inference, item)\n\tif inf != nil {\n\t\treturn inf, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"inference: %s not found\", inferenceName)\n}\n"
  },
  {
    "path": "agent/pkg/runtime/inference_instance.go",
    "content": "package runtime\n\nimport (\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\tv1 \"k8s.io/client-go/listers/core/v1\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/k8s\"\n)\n\nfunc (r generalRuntime) InferenceInstanceList(namespace, inferenceName string) (\n\t[]types.InferenceDeploymentInstance, error) {\n\treturn getInstances(namespace, inferenceName, r.podInformer.Lister())\n}\n\nfunc getInstances(functionNamespace string, functionName string,\n\tlister v1.PodLister) ([]types.InferenceDeploymentInstance, error) {\n\tinstances := make([]types.InferenceDeploymentInstance, 0)\n\n\titems, err := lister.List(\n\t\tlabels.SelectorFromSet(k8s.MakeLabelSelector(functionName)))\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, errdefs.System(err)\n\t}\n\n\tfor _, item := range items {\n\t\tif item != nil {\n\t\t\tinstance := k8s.InstanceFromPod(*item)\n\t\t\tif instance != nil {\n\t\t\t\tinstances = append(instances, *instance)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn instances, nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/inference_list.go",
    "content": "package runtime\n\nimport (\n\t\"sort\"\n\n\tmv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tmodelzetesv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/listers/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\t\"k8s.io/apimachinery/pkg/selection\"\n\tv1 \"k8s.io/client-go/listers/apps/v1\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/k8s\"\n)\n\nfunc (r generalRuntime) InferenceList(namespace string) ([]types.InferenceDeployment, error) {\n\tinfLister := r.inferenceInformer.Lister()\n\tdeploymentLister := r.deploymentInformer.Lister()\n\n\tfunctions, err := inferenceList(namespace, infLister,\n\t\tdeploymentLister)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn functions, nil\n}\n\nfunc inferenceList(functionNamespace string,\n\tinfLister modelzetesv2alpha1.InferenceLister,\n\tdeploymentLister v1.DeploymentLister) ([]types.InferenceDeployment, error) {\n\tfunctions := []types.InferenceDeployment{}\n\n\tsel := labels.NewSelector()\n\treq, err := labels.NewRequirement(consts.LabelInferenceName, selection.Exists, []string{})\n\tif err != nil {\n\t\treturn functions, errdefs.NotFound(err)\n\t}\n\tonlyFunctions := sel.Add(*req)\n\n\tinferences, err := infLister.Inferences(functionNamespace).\n\t\tList(labels.Everything())\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn functions, nil\n\t\t} else {\n\t\t\treturn functions, errdefs.System(err)\n\t\t}\n\t}\n\n\tdeploys, err := deploymentLister.Deployments(functionNamespace).List(onlyFunctions)\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn getInferences(inferences, deploys)\n\t\t} else {\n\t\t\treturn functions, errdefs.System(err)\n\t\t}\n\t}\n\n\treturn getInferences(inferences, deploys)\n}\n\nfunc getInferences(inferences []*mv2alpha1.Inference, deploys []*appsv1.Deployment) ([]types.InferenceDeployment, error) {\n\tsort.Slice(inferences, func(i, j int) bool {\n\t\treturn (*inferences[i]).Name < (*inferences[j]).Name\n\t})\n\tsort.Slice(deploys, func(i, j int) bool {\n\t\treturn (*deploys[i]).Name < (*deploys[j]).Name\n\t})\n\n\tres := []types.InferenceDeployment{}\n\tj := 0\n\tfor i := range inferences {\n\t\tif j >= len(deploys) {\n\t\t\tres = append(res, *k8s.AsInferenceDeployment(inferences[i], nil))\n\t\t} else if inferences[i].Name != deploys[j].Name {\n\t\t\tres = append(res, *k8s.AsInferenceDeployment(inferences[i], nil))\n\t\t} else {\n\t\t\tres = append(res, *k8s.AsInferenceDeployment(inferences[i], deploys[j]))\n\t\t\tj++\n\t\t}\n\t}\n\n\treturn res, nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/inference_replicas.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\nfunc (r generalRuntime) InferenceScale(ctx context.Context, namespace string,\n\treq types.ScaleServiceRequest, inf *types.InferenceDeployment) (err error) {\n\toptions := metav1.GetOptions{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind:       \"Deployment\",\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t},\n\t}\n\n\tdeployment, err := r.kubeClient.AppsV1().Deployments(namespace).\n\t\tGet(ctx, req.ServiceName, options)\n\tif err != nil {\n\t\treturn errdefs.InvalidParameter(err)\n\t}\n\n\toldReplicas := *deployment.Spec.Replicas\n\treplicas := int32(req.Replicas)\n\n\tif inf.Spec.Scaling != nil {\n\t\tminReplicas := *inf.Spec.Scaling.MinReplicas\n\t\tif replicas < minReplicas {\n\t\t\treplicas = minReplicas\n\t\t}\n\n\t\tmaxReplicas := *inf.Spec.Scaling.MaxReplicas\n\t\tif replicas > maxReplicas {\n\t\t\treplicas = maxReplicas\n\t\t}\n\t}\n\n\tif replicas >= consts.MaxReplicas {\n\t\treplicas = consts.MaxReplicas\n\t}\n\n\tif oldReplicas == replicas {\n\t\treturn nil\n\t}\n\tevent := types.DeploymentScaleDownEvent\n\tif oldReplicas < replicas {\n\t\tevent = types.DeploymentScaleUpEvent\n\t}\n\n\tvar building bool\n\tif r.buildEnabled {\n\t\t_, building = deployment.Annotations[consts.AnnotationBuilding]\n\t}\n\n\tif building {\n\t\tevent = types.DeploymentScaleBlockEvent\n\t\treq.EventMessage = \"Deployment is building image, scale is blocked\"\n\t\treplicas = 0\n\t}\n\n\tif r.eventEnabled {\n\t\t// Only create event when the first time scale up/down\n\t\tif req.Attempt == 0 {\n\t\t\terr = r.eventRecorder.CreateDeploymentEvent(namespace, deployment.Name, event, req.EventMessage)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tdeployment.Spec.Replicas = &replicas\n\tr.logger.WithField(\"deployment\", deployment.Name).\n\t\tWithField(\"namespace\", namespace).\n\t\tWithField(\"replicas\", replicas).Debug(\"scaling deployment\")\n\n\tif _, err = r.kubeClient.AppsV1().Deployments(namespace).\n\t\tUpdate(ctx, deployment, metav1.UpdateOptions{}); err != nil {\n\t\treturn errdefs.System(err)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/inference_update.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tinferenceclientset \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\nfunc (r generalRuntime) InferenceUpdate(ctx context.Context, namespace string,\n\treq types.InferenceDeployment, event string) (err error) {\n\n\tif r.eventEnabled {\n\t\terr := r.eventRecorder.CreateDeploymentEvent(namespace, req.Spec.Name, event, req.Status.EventMessage)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err = updateInference(ctx, namespace, r.inferenceClient, req); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc updateInference(\n\tctx context.Context,\n\tfunctionNamespace string,\n\tinferenceClient inferenceclientset.Interface,\n\trequest types.InferenceDeployment) (err error) {\n\n\tactual, err := inferenceClient.TensorchordV2alpha1().\n\t\tInferences(functionNamespace).Get(\n\t\tctx, request.Spec.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn errdefs.NotFound(err)\n\t\t} else {\n\t\t\treturn errdefs.System(err)\n\t\t}\n\t}\n\n\texpected := actual.DeepCopy()\n\n\tif request.Spec.Image != \"\" {\n\t\texpected.Spec.Image = request.Spec.Image\n\t}\n\tif request.Spec.Scaling != nil {\n\t\texpected.Spec.Scaling = &v2alpha1.ScalingConfig{\n\t\t\tMinReplicas:     request.Spec.Scaling.MinReplicas,\n\t\t\tMaxReplicas:     request.Spec.Scaling.MaxReplicas,\n\t\t\tTargetLoad:      request.Spec.Scaling.TargetLoad,\n\t\t\tZeroDuration:    request.Spec.Scaling.ZeroDuration,\n\t\t\tStartupDuration: request.Spec.Scaling.StartupDuration,\n\t\t}\n\t\tif request.Spec.Scaling.Type != nil {\n\t\t\texpected.Spec.Scaling.Type = new(v2alpha1.ScalingType)\n\t\t\t*expected.Spec.Scaling.Type = v2alpha1.ScalingType(*request.Spec.Scaling.Type)\n\t\t}\n\t}\n\tif request.Spec.EnvVars != nil {\n\t\texpected.Spec.EnvVars = request.Spec.EnvVars\n\t}\n\tif request.Spec.Secrets != nil {\n\t\texpected.Spec.Secrets = request.Spec.Secrets\n\t}\n\tif request.Spec.Constraints != nil {\n\t\texpected.Spec.Constraints = request.Spec.Constraints\n\t}\n\tif request.Spec.Labels != nil {\n\t\texpected.Spec.Labels = request.Spec.Labels\n\t}\n\tif request.Spec.Annotations != nil {\n\t\texpected.Spec.Annotations = request.Spec.Annotations\n\t}\n\tif request.Spec.Resources != nil {\n\t\trr, err := createResources(request)\n\t\tif err != nil {\n\t\t\treturn errdefs.InvalidParameter(err)\n\t\t}\n\t\texpected.Spec.Resources = &rr\n\t}\n\n\tif _, err := inferenceClient.TensorchordV2alpha1().\n\t\tInferences(functionNamespace).Update(\n\t\tctx, expected, metav1.UpdateOptions{}); err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn errdefs.NotFound(err)\n\t\t} else {\n\t\t\treturn errdefs.System(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/mock/mock.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: pkg/runtime/runtime.go\n\n// Package mock is a generated GoMock package.\npackage mock\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\n\tgin \"github.com/gin-gonic/gin\"\n\tgomock \"github.com/golang/mock/gomock\"\n\ttypes \"github.com/tensorchord/openmodelz/agent/api/types\"\n\tconfig \"github.com/tensorchord/openmodelz/agent/pkg/config\"\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n)\n\n// MockRuntime is a mock of Runtime interface.\ntype MockRuntime struct {\n\tctrl     *gomock.Controller\n\trecorder *MockRuntimeMockRecorder\n}\n\n// MockRuntimeMockRecorder is the mock recorder for MockRuntime.\ntype MockRuntimeMockRecorder struct {\n\tmock *MockRuntime\n}\n\n// NewMockRuntime creates a new mock instance.\nfunc NewMockRuntime(ctrl *gomock.Controller) *MockRuntime {\n\tmock := &MockRuntime{ctrl: ctrl}\n\tmock.recorder = &MockRuntimeMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockRuntime) EXPECT() *MockRuntimeMockRecorder {\n\treturn m.recorder\n}\n\n// BuildCreate mocks base method.\nfunc (m *MockRuntime) BuildCreate(ctx context.Context, req types.Build, inference *v2alpha1.Inference, builderImage, buildkitdAddress, buildCtlBin, secret string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"BuildCreate\", ctx, req, inference, builderImage, buildkitdAddress, buildCtlBin, secret)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// BuildCreate indicates an expected call of BuildCreate.\nfunc (mr *MockRuntimeMockRecorder) BuildCreate(ctx, req, inference, builderImage, buildkitdAddress, buildCtlBin, secret interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"BuildCreate\", reflect.TypeOf((*MockRuntime)(nil).BuildCreate), ctx, req, inference, builderImage, buildkitdAddress, buildCtlBin, secret)\n}\n\n// BuildGet mocks base method.\nfunc (m *MockRuntime) BuildGet(ctx context.Context, namespace, buildName string) (types.Build, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"BuildGet\", ctx, namespace, buildName)\n\tret0, _ := ret[0].(types.Build)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// BuildGet indicates an expected call of BuildGet.\nfunc (mr *MockRuntimeMockRecorder) BuildGet(ctx, namespace, buildName interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"BuildGet\", reflect.TypeOf((*MockRuntime)(nil).BuildGet), ctx, namespace, buildName)\n}\n\n// BuildList mocks base method.\nfunc (m *MockRuntime) BuildList(ctx context.Context, namespace string) ([]types.Build, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"BuildList\", ctx, namespace)\n\tret0, _ := ret[0].([]types.Build)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// BuildList indicates an expected call of BuildList.\nfunc (mr *MockRuntimeMockRecorder) BuildList(ctx, namespace interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"BuildList\", reflect.TypeOf((*MockRuntime)(nil).BuildList), ctx, namespace)\n}\n\n// GetClusterInfo mocks base method.\nfunc (m *MockRuntime) GetClusterInfo(cluster *types.ManagedCluster) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetClusterInfo\", cluster)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// GetClusterInfo indicates an expected call of GetClusterInfo.\nfunc (mr *MockRuntimeMockRecorder) GetClusterInfo(cluster interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetClusterInfo\", reflect.TypeOf((*MockRuntime)(nil).GetClusterInfo), cluster)\n}\n\n// ImageCacheCreate mocks base method.\nfunc (m *MockRuntime) ImageCacheCreate(ctx context.Context, req types.ImageCache, inference *v2alpha1.Inference) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageCacheCreate\", ctx, req, inference)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// ImageCacheCreate indicates an expected call of ImageCacheCreate.\nfunc (mr *MockRuntimeMockRecorder) ImageCacheCreate(ctx, req, inference interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ImageCacheCreate\", reflect.TypeOf((*MockRuntime)(nil).ImageCacheCreate), ctx, req, inference)\n}\n\n// InferenceCreate mocks base method.\nfunc (m *MockRuntime) InferenceCreate(ctx context.Context, req types.InferenceDeployment, cfg config.IngressConfig, event string, serverPort int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InferenceCreate\", ctx, req, cfg, event, serverPort)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// InferenceCreate indicates an expected call of InferenceCreate.\nfunc (mr *MockRuntimeMockRecorder) InferenceCreate(ctx, req, cfg, event, serverPort interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"InferenceCreate\", reflect.TypeOf((*MockRuntime)(nil).InferenceCreate), ctx, req, cfg, event, serverPort)\n}\n\n// InferenceDelete mocks base method.\nfunc (m *MockRuntime) InferenceDelete(ctx context.Context, namespace, inferenceName, ingressNamespace, event string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InferenceDelete\", ctx, namespace, inferenceName, ingressNamespace, event)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// InferenceDelete indicates an expected call of InferenceDelete.\nfunc (mr *MockRuntimeMockRecorder) InferenceDelete(ctx, namespace, inferenceName, ingressNamespace, event interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"InferenceDelete\", reflect.TypeOf((*MockRuntime)(nil).InferenceDelete), ctx, namespace, inferenceName, ingressNamespace, event)\n}\n\n// InferenceExec mocks base method.\nfunc (m *MockRuntime) InferenceExec(ctx *gin.Context, namespace, instance string, commands []string, tty bool) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InferenceExec\", ctx, namespace, instance, commands, tty)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// InferenceExec indicates an expected call of InferenceExec.\nfunc (mr *MockRuntimeMockRecorder) InferenceExec(ctx, namespace, instance, commands, tty interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"InferenceExec\", reflect.TypeOf((*MockRuntime)(nil).InferenceExec), ctx, namespace, instance, commands, tty)\n}\n\n// InferenceGet mocks base method.\nfunc (m *MockRuntime) InferenceGet(namespace, inferenceName string) (*types.InferenceDeployment, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InferenceGet\", namespace, inferenceName)\n\tret0, _ := ret[0].(*types.InferenceDeployment)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// InferenceGet indicates an expected call of InferenceGet.\nfunc (mr *MockRuntimeMockRecorder) InferenceGet(namespace, inferenceName interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"InferenceGet\", reflect.TypeOf((*MockRuntime)(nil).InferenceGet), namespace, inferenceName)\n}\n\n// InferenceGetCRD mocks base method.\nfunc (m *MockRuntime) InferenceGetCRD(namespace, name string) (*v2alpha1.Inference, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InferenceGetCRD\", namespace, name)\n\tret0, _ := ret[0].(*v2alpha1.Inference)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// InferenceGetCRD indicates an expected call of InferenceGetCRD.\nfunc (mr *MockRuntimeMockRecorder) InferenceGetCRD(namespace, name interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"InferenceGetCRD\", reflect.TypeOf((*MockRuntime)(nil).InferenceGetCRD), namespace, name)\n}\n\n// InferenceInstanceList mocks base method.\nfunc (m *MockRuntime) InferenceInstanceList(namespace, inferenceName string) ([]types.InferenceDeploymentInstance, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InferenceInstanceList\", namespace, inferenceName)\n\tret0, _ := ret[0].([]types.InferenceDeploymentInstance)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// InferenceInstanceList indicates an expected call of InferenceInstanceList.\nfunc (mr *MockRuntimeMockRecorder) InferenceInstanceList(namespace, inferenceName interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"InferenceInstanceList\", reflect.TypeOf((*MockRuntime)(nil).InferenceInstanceList), namespace, inferenceName)\n}\n\n// InferenceList mocks base method.\nfunc (m *MockRuntime) InferenceList(namespace string) ([]types.InferenceDeployment, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InferenceList\", namespace)\n\tret0, _ := ret[0].([]types.InferenceDeployment)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// InferenceList indicates an expected call of InferenceList.\nfunc (mr *MockRuntimeMockRecorder) InferenceList(namespace interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"InferenceList\", reflect.TypeOf((*MockRuntime)(nil).InferenceList), namespace)\n}\n\n// InferenceScale mocks base method.\nfunc (m *MockRuntime) InferenceScale(ctx context.Context, namespace string, req types.ScaleServiceRequest, inf *types.InferenceDeployment) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InferenceScale\", ctx, namespace, req, inf)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// InferenceScale indicates an expected call of InferenceScale.\nfunc (mr *MockRuntimeMockRecorder) InferenceScale(ctx, namespace, req, inf interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"InferenceScale\", reflect.TypeOf((*MockRuntime)(nil).InferenceScale), ctx, namespace, req, inf)\n}\n\n// InferenceUpdate mocks base method.\nfunc (m *MockRuntime) InferenceUpdate(ctx context.Context, namespace string, req types.InferenceDeployment, event string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InferenceUpdate\", ctx, namespace, req, event)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// InferenceUpdate indicates an expected call of InferenceUpdate.\nfunc (mr *MockRuntimeMockRecorder) InferenceUpdate(ctx, namespace, req, event interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"InferenceUpdate\", reflect.TypeOf((*MockRuntime)(nil).InferenceUpdate), ctx, namespace, req, event)\n}\n\n// NamespaceCreate mocks base method.\nfunc (m *MockRuntime) NamespaceCreate(ctx context.Context, name string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespaceCreate\", ctx, name)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// NamespaceCreate indicates an expected call of NamespaceCreate.\nfunc (mr *MockRuntimeMockRecorder) NamespaceCreate(ctx, name interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"NamespaceCreate\", reflect.TypeOf((*MockRuntime)(nil).NamespaceCreate), ctx, name)\n}\n\n// NamespaceDelete mocks base method.\nfunc (m *MockRuntime) NamespaceDelete(ctx context.Context, name string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespaceDelete\", ctx, name)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// NamespaceDelete indicates an expected call of NamespaceDelete.\nfunc (mr *MockRuntimeMockRecorder) NamespaceDelete(ctx, name interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"NamespaceDelete\", reflect.TypeOf((*MockRuntime)(nil).NamespaceDelete), ctx, name)\n}\n\n// NamespaceGet mocks base method.\nfunc (m *MockRuntime) NamespaceGet(ctx context.Context, name string) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespaceGet\", ctx, name)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}\n\n// NamespaceGet indicates an expected call of NamespaceGet.\nfunc (mr *MockRuntimeMockRecorder) NamespaceGet(ctx, name interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"NamespaceGet\", reflect.TypeOf((*MockRuntime)(nil).NamespaceGet), ctx, name)\n}\n\n// NamespaceList mocks base method.\nfunc (m *MockRuntime) NamespaceList(ctx context.Context) ([]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NamespaceList\", ctx)\n\tret0, _ := ret[0].([]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// NamespaceList indicates an expected call of NamespaceList.\nfunc (mr *MockRuntimeMockRecorder) NamespaceList(ctx interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"NamespaceList\", reflect.TypeOf((*MockRuntime)(nil).NamespaceList), ctx)\n}\n\n// ServerDeleteNode mocks base method.\nfunc (m *MockRuntime) ServerDeleteNode(ctx context.Context, name string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ServerDeleteNode\", ctx, name)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// ServerDeleteNode indicates an expected call of ServerDeleteNode.\nfunc (mr *MockRuntimeMockRecorder) ServerDeleteNode(ctx, name interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ServerDeleteNode\", reflect.TypeOf((*MockRuntime)(nil).ServerDeleteNode), ctx, name)\n}\n\n// ServerLabelCreate mocks base method.\nfunc (m *MockRuntime) ServerLabelCreate(ctx context.Context, name string, spec types.ServerSpec) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ServerLabelCreate\", ctx, name, spec)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// ServerLabelCreate indicates an expected call of ServerLabelCreate.\nfunc (mr *MockRuntimeMockRecorder) ServerLabelCreate(ctx, name, spec interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ServerLabelCreate\", reflect.TypeOf((*MockRuntime)(nil).ServerLabelCreate), ctx, name, spec)\n}\n\n// ServerList mocks base method.\nfunc (m *MockRuntime) ServerList(ctx context.Context) ([]types.Server, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ServerList\", ctx)\n\tret0, _ := ret[0].([]types.Server)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// ServerList indicates an expected call of ServerList.\nfunc (mr *MockRuntimeMockRecorder) ServerList(ctx interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ServerList\", reflect.TypeOf((*MockRuntime)(nil).ServerList), ctx)\n}\n"
  },
  {
    "path": "agent/pkg/runtime/namespace.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tv1 \"k8s.io/api/core/v1\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n)\n\nfunc (r generalRuntime) NamespaceList(ctx context.Context) ([]string, error) {\n\tns, err := r.kubeClient.CoreV1().Namespaces().List(ctx,\n\t\tmetav1.ListOptions{\n\t\t\tLabelSelector: fmt.Sprintf(\"%s=true\", consts.LabelNamespace),\n\t\t})\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn nil, nil\n\t\t} else {\n\t\t\treturn nil, errdefs.System(err)\n\t\t}\n\t}\n\n\tres := make([]string, len(ns.Items))\n\tfor i, n := range ns.Items {\n\t\tres[i] = n.Name\n\t}\n\treturn res, nil\n}\n\nfunc (r generalRuntime) NamespaceCreate(ctx context.Context, name string) error {\n\tns := &v1.Namespace{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tLabels: map[string]string{\n\t\t\t\tconsts.LabelNamespace: \"true\",\n\t\t\t},\n\t\t},\n\t}\n\n\t_, err := r.kubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})\n\tif err != nil {\n\t\tif k8serrors.IsAlreadyExists(err) {\n\t\t\treturn errdefs.Conflict(err)\n\t\t} else {\n\t\t\treturn errdefs.System(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (r generalRuntime) NamespaceGet(ctx context.Context, name string) bool {\n\t_, err := r.kubeClient.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (r generalRuntime) NamespaceDelete(ctx context.Context, name string) error {\n\terr := r.kubeClient.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{})\n\treturn err\n}\n"
  },
  {
    "path": "agent/pkg/runtime/node.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nfunc (r generalRuntime) ListServerResource() ([]string, error) {\n\tresources := []string{}\n\tlistOptions := metav1.ListOptions{\n\t\tLabelSelector: consts.LabelServerResource,\n\t}\n\n\tnodes, err := r.kubeClient.CoreV1().Nodes().List(context.Background(), listOptions)\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed to list nodes: %v\", err)\n\t\treturn resources, err\n\t}\n\tfor _, node := range nodes.Items {\n\t\tresources = append(resources, node.Labels[consts.LabelServerResource])\n\t}\n\treturn resources, nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/runtime.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t\"github.com/sirupsen/logrus\"\n\tapicorev1 \"k8s.io/api/core/v1\"\n\tappsv1 \"k8s.io/client-go/informers/apps/v1\"\n\tcorev1 \"k8s.io/client-go/informers/core/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\tclientsetscheme \"k8s.io/client-go/kubernetes/scheme\"\n\t\"k8s.io/client-go/rest\"\n\n\tkubefledged \"github.com/senthilrch/kube-fledged/pkg/client/clientset/versioned\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/config\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/event\"\n\tingressclient \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tapis \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tmodelzetes \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tclientset \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned\"\n\tmodelzv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/informers/externalversions/modelzetes/v2alpha1\"\n)\n\ntype Runtime interface {\n\t// build\n\tBuildList(ctx context.Context, namespace string) ([]types.Build, error)\n\tBuildCreate(ctx context.Context, req types.Build, inference *v2alpha1.Inference, builderImage,\n\t\tbuildkitdAddress, buildCtlBin, secret string) error\n\tBuildGet(ctx context.Context, namespace, buildName string) (types.Build, error)\n\t// cache\n\tImageCacheCreate(ctx context.Context, req types.ImageCache, inference *modelzetes.Inference) error\n\t// inference\n\tInferenceCreate(ctx context.Context,\n\t\treq types.InferenceDeployment, cfg config.IngressConfig, event string, serverPort int) error\n\tInferenceDelete(ctx context.Context, namespace, inferenceName, ingressNamespace, event string) error\n\tInferenceExec(ctx *gin.Context, namespace, instance string, commands []string, tty bool) error\n\tInferenceGet(namespace, inferenceName string) (*types.InferenceDeployment, error)\n\tInferenceGetCRD(namespace, name string) (*apis.Inference, error)\n\tInferenceInstanceList(namespace, inferenceName string) ([]types.InferenceDeploymentInstance, error)\n\tInferenceList(namespace string) ([]types.InferenceDeployment, error)\n\tInferenceScale(ctx context.Context, namespace string, req types.ScaleServiceRequest, inf *types.InferenceDeployment) error\n\tInferenceUpdate(ctx context.Context, namespace string, req types.InferenceDeployment, event string) (err error)\n\t// namespace\n\tNamespaceList(ctx context.Context) ([]string, error)\n\tNamespaceCreate(ctx context.Context, name string) error\n\tNamespaceGet(ctx context.Context, name string) bool\n\tNamespaceDelete(ctx context.Context, name string) error\n\t// server\n\tServerDeleteNode(ctx context.Context, name string) error\n\tServerLabelCreate(ctx context.Context, name string, spec types.ServerSpec) error\n\tServerList(ctx context.Context) ([]types.Server, error)\n\t// managed cluster\n\tGetClusterInfo(cluster *types.ManagedCluster) error\n}\n\ntype generalRuntime struct {\n\tendpointsInformer  corev1.EndpointsInformer\n\tdeploymentInformer appsv1.DeploymentInformer\n\tinferenceInformer  modelzv2alpha1.InferenceInformer\n\tpodInformer        corev1.PodInformer\n\n\tkubeClient        kubernetes.Interface\n\tclientConfig      *rest.Config\n\trestClient        *rest.RESTClient\n\tingressClient     ingressclient.Interface\n\tinferenceClient   clientset.Interface\n\tkubefledgedClient kubefledged.Interface\n\n\tlogger        *logrus.Entry\n\teventRecorder event.Interface\n\n\tingressEnabled       bool\n\tingressAnyIPToDomain bool\n\teventEnabled         bool\n\tbuildEnabled         bool\n}\n\nfunc New(clientConfig *rest.Config,\n\tendpointsInformer corev1.EndpointsInformer,\n\tdeploymentInformer appsv1.DeploymentInformer,\n\tinferenceInformer modelzv2alpha1.InferenceInformer,\n\tpodInformer corev1.PodInformer,\n\tkubeClient kubernetes.Interface,\n\tingressClient ingressclient.Interface,\n\tkubefledgedClient kubefledged.Interface,\n\tinferenceClient clientset.Interface,\n\teventRecorder event.Interface,\n\tingressEnabled bool,\n\teventEnabled bool,\n\tbuildEnabled bool,\n\tingressAnyIPToDomain bool,\n) (Runtime, error) {\n\tr := generalRuntime{\n\t\tendpointsInformer:    endpointsInformer,\n\t\tdeploymentInformer:   deploymentInformer,\n\t\tinferenceInformer:    inferenceInformer,\n\t\tpodInformer:          podInformer,\n\t\tkubeClient:           kubeClient,\n\t\tkubefledgedClient:    kubefledgedClient,\n\t\tclientConfig:         clientConfig,\n\t\tingressClient:        ingressClient,\n\t\tinferenceClient:      inferenceClient,\n\t\tlogger:               logrus.WithField(\"component\", \"runtime\"),\n\t\teventRecorder:        eventRecorder,\n\t\tingressEnabled:       ingressEnabled,\n\t\tingressAnyIPToDomain: ingressAnyIPToDomain,\n\t\teventEnabled:         eventEnabled,\n\t\tbuildEnabled:         buildEnabled,\n\t}\n\t// Ref https://github.com/operator-framework/operator-sdk/issues/1570\n\tclientConfig.APIPath = \"api\"\n\tclientConfig.GroupVersion = &apicorev1.SchemeGroupVersion\n\tclientConfig.NegotiatedSerializer = clientsetscheme.Codecs\n\tr.clientConfig = clientConfig\n\trestClient, err := rest.RESTClientFor(clientConfig)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\tr.restClient = restClient\n\treturn r, nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/server_delete.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nfunc (r generalRuntime) ServerDeleteNode(ctx context.Context, name string) error {\n\terr := r.kubeClient.CoreV1().Nodes().Delete(ctx, name, metav1.DeleteOptions{})\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn errdefs.NotFound(err)\n\t\t}\n\t\treturn errdefs.System(err)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/server_label_create.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\t\"path/filepath\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nfunc (r generalRuntime) ServerLabelCreate(ctx context.Context, name string, spec types.ServerSpec) error {\n\tnode, err := r.kubeClient.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn errdefs.NotFound(err)\n\t\t} else {\n\t\t\treturn errdefs.System(err)\n\t\t}\n\t}\n\n\tif len(node.Labels) == 0 {\n\t\tnode.Labels = map[string]string{}\n\t}\n\n\tfor k, v := range spec.Labels {\n\t\tnode.Labels[filepath.Join(\"tensorchord.ai\", k)] = v\n\t}\n\n\t_, err = r.kubeClient.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn errdefs.NotFound(err)\n\t\t} else {\n\t\t\treturn errdefs.System(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/server_list.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\t\"strings\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/k8s\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tv1 \"k8s.io/api/core/v1\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nfunc (r generalRuntime) ServerList(ctx context.Context) ([]types.Server, error) {\n\tnodes, err := r.kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn nil, errdefs.NotFound(err)\n\t\t} else {\n\t\t\treturn nil, errdefs.System(err)\n\t\t}\n\t}\n\n\tif len(nodes.Items) == 0 {\n\t\treturn nil, nil\n\t}\n\n\treturn getServers(nodes.Items), nil\n}\n\nfunc getServers(nodes []v1.Node) []types.Server {\n\tres := []types.Server{}\n\tfor _, n := range nodes {\n\t\tres = append(res, getServer(n))\n\t}\n\treturn res\n}\n\nfunc getServer(n v1.Node) types.Server {\n\tnode := types.Server{\n\t\tSpec: types.ServerSpec{\n\t\t\tName:   n.Name,\n\t\t\tLabels: make(map[string]string),\n\t\t},\n\t\tStatus: types.ServerStatus{\n\t\t\tAllocatable: k8s.AsResourceList(n.Status.Allocatable),\n\t\t\tCapacity:    k8s.AsResourceList(n.Status.Capacity),\n\t\t\tSystem: types.NodeSystemInfo{\n\t\t\t\tMachineID:       n.Status.NodeInfo.MachineID,\n\t\t\t\tKernelVersion:   n.Status.NodeInfo.KernelVersion,\n\t\t\t\tOSImage:         n.Status.NodeInfo.OSImage,\n\t\t\t\tOperatingSystem: n.Status.NodeInfo.OperatingSystem,\n\t\t\t\tArchitecture:    n.Status.NodeInfo.Architecture,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor k, v := range n.Labels {\n\t\tif strings.HasPrefix(k, \"ai.tensorchord.\") {\n\t\t\tnode.Spec.Labels[strings.TrimPrefix(k, \"ai.tensorchord.\")] = v\n\t\t}\n\n\t\tif k == consts.LabelServerResource {\n\t\t\tnode.Status.System.ResourceType = v\n\t\t}\n\t}\n\n\tphase := \"Ready\"\n\tfor _, c := range n.Status.Conditions {\n\t\tif c.Type == v1.NodeReady && c.Status != v1.ConditionTrue {\n\t\t\tphase = \"NotReady\"\n\t\t} else if c.Type == v1.NodeDiskPressure && c.Status != v1.ConditionFalse {\n\t\t\tphase = \"DiskPressure\"\n\t\t} else if c.Type == v1.NodeMemoryPressure && c.Status != v1.ConditionFalse {\n\t\t\tphase = \"MemoryPressure\"\n\t\t} else if c.Type == v1.NodePIDPressure && c.Status != v1.ConditionFalse {\n\t\t\tphase = \"PIDPressure\"\n\t\t} else if c.Type == v1.NodeNetworkUnavailable && c.Status != v1.ConditionFalse {\n\t\t\tphase = \"NetworkUnavailable\"\n\t\t}\n\t}\n\tnode.Status.Phase = phase\n\treturn node\n}\n"
  },
  {
    "path": "agent/pkg/runtime/util_domain.go",
    "content": "package runtime\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/dchest/uniuri\"\n)\n\nconst (\n\tAnnotationDomain = \"ai.tensorchord.domain\"\n)\n\nconst (\n\t// stdLen is a standard length of uniuri string to achieve ~95 bits of entropy.\n\tstdLen = 16\n)\n\n// StdChars is a set of standard characters allowed in uniuri string.\nvar stdChars = []byte(\"abcdefghijklmnopqrstuvwxyz0123456789\")\n\nfunc makeDomain(name, baseDomain string) (string, error) {\n\tif baseDomain == \"\" {\n\t\treturn \"\", fmt.Errorf(\"base domain is required\")\n\t}\n\n\tif name == \"\" {\n\t\treturn \"\", fmt.Errorf(\"domain name is required\")\n\t}\n\n\thash := uniuri.NewLenChars(stdLen, stdChars)\n\n\treturn fmt.Sprintf(\"%s-%s.%s\",\n\t\tname, hash, baseDomain), nil\n}\n"
  },
  {
    "path": "agent/pkg/runtime/util_resource.go",
    "content": "package runtime\n\nimport (\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nfunc createResources(request types.InferenceDeployment) (corev1.ResourceRequirements, error) {\n\tresources := corev1.ResourceRequirements{\n\t\tLimits:   corev1.ResourceList{},\n\t\tRequests: corev1.ResourceList{},\n\t}\n\n\tif request.Spec.Resources == nil {\n\t\treturn resources, nil\n\t}\n\n\t// Set Memory limits\n\tif request.Spec.Resources.Limits[types.ResourceMemory] != \"\" {\n\t\tqty, err := resource.ParseQuantity(\n\t\t\tstring(request.Spec.Resources.Limits[types.ResourceMemory]))\n\t\tif err != nil {\n\t\t\treturn resources, err\n\t\t}\n\t\tresources.Limits[corev1.ResourceMemory] = qty\n\t}\n\n\tif request.Spec.Resources.Requests[types.ResourceMemory] != \"\" {\n\t\tqty, err := resource.ParseQuantity(\n\t\t\tstring(request.Spec.Resources.Requests[types.ResourceMemory]))\n\t\tif err != nil {\n\t\t\treturn resources, err\n\t\t}\n\t\tresources.Requests[corev1.ResourceMemory] = qty\n\t}\n\n\t// Set CPU limits\n\tif request.Spec.Resources.Limits[types.ResourceCPU] != \"\" {\n\t\tqty, err := resource.ParseQuantity(\n\t\t\tstring(request.Spec.Resources.Limits[types.ResourceCPU]))\n\t\tif err != nil {\n\t\t\treturn resources, err\n\t\t}\n\t\tresources.Limits[corev1.ResourceCPU] = qty\n\t}\n\n\tif request.Spec.Resources.Requests[types.ResourceCPU] != \"\" {\n\t\tqty, err := resource.ParseQuantity(\n\t\t\tstring(request.Spec.Resources.Requests[types.ResourceCPU]))\n\t\tif err != nil {\n\t\t\treturn resources, err\n\t\t}\n\t\tresources.Requests[corev1.ResourceCPU] = qty\n\t}\n\n\t// Set GPU limits\n\tif request.Spec.Resources.Limits[types.ResourceGPU] != \"\" {\n\t\tqty, err := resource.ParseQuantity(\n\t\t\tstring(request.Spec.Resources.Limits[types.ResourceGPU]))\n\t\tif err != nil {\n\t\t\treturn resources, err\n\t\t}\n\t\tresources.Limits[consts.ResourceNvidiaGPU] = qty\n\t}\n\n\tif request.Spec.Resources.Requests[types.ResourceGPU] != \"\" {\n\t\tqty, err := resource.ParseQuantity(\n\t\t\tstring(request.Spec.Resources.Requests[types.ResourceGPU]))\n\t\tif err != nil {\n\t\t\treturn resources, err\n\t\t}\n\t\tresources.Requests[consts.ResourceNvidiaGPU] = qty\n\t}\n\n\treturn resources, nil\n}\n"
  },
  {
    "path": "agent/pkg/scaling/function_scaler.go",
    "content": "package scaling\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/dgraph-io/ristretto\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/runtime\"\n)\n\nconst (\n\tmaxPollCount = 1000\n\tretries      = 20\n\tpollInterval = time.Millisecond * 100\n)\n\n// InferenceScaler create a new scaler with the specified\n// ScalingConfig\nfunc NewInferenceScaler(r runtime.Runtime,\n\tdefaultTTL time.Duration) (*InferenceScaler, error) {\n\tcache, err := ristretto.NewCache(&ristretto.Config{\n\t\tNumCounters: 1e7,\n\t\tMaxCost:     1 << 28,\n\t\tBufferItems: 64,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &InferenceScaler{\n\t\tcache:      *cache,\n\t\truntime:    r,\n\t\tdefaultTTL: defaultTTL,\n\t}, nil\n}\n\n// InferenceScaler scales from zero\ntype InferenceScaler struct {\n\tcache   ristretto.Cache\n\tmu      sync.RWMutex\n\truntime runtime.Runtime\n\n\tdefaultTTL time.Duration\n}\n\n// FunctionScaleResult holds the result of scaling from zero\ntype FunctionScaleResult struct {\n\tAvailable bool\n\tError     error\n\tFound     bool\n\tDuration  time.Duration\n}\n\nfunc (s *InferenceScaler) get(\n\tnamespace, inferenceName string) (ServiceQueryResponse, error) {\n\tkey := inferenceName + \".\" + namespace\n\n\ts.mu.RLock()\n\traw, exit := s.cache.Get(key)\n\ts.mu.RUnlock()\n\tif exit {\n\t\treturn raw.(ServiceQueryResponse), nil\n\t}\n\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\traw, exit = s.cache.Get(key)\n\tif exit {\n\t\treturn raw.(ServiceQueryResponse), nil\n\t}\n\n\t// The wasn't a hit, or there were no available replicas found\n\t// so query the live endpoint\n\tinf, err := s.runtime.InferenceGet(namespace, inferenceName)\n\tif err != nil {\n\t\treturn ServiceQueryResponse{}, err\n\t}\n\tsqr, err := AsServerQueryResponse(inf)\n\tif err != nil {\n\t\treturn ServiceQueryResponse{}, err\n\t}\n\tif sqr == nil {\n\t\treturn ServiceQueryResponse{},\n\t\t\tfmt.Errorf(\"unable to get service query response\")\n\t}\n\ts.cache.SetWithTTL(key, *sqr, 1, s.defaultTTL)\n\treturn *sqr, nil\n}\n\n// Scale scales a function from zero replicas to 1 or the value set in\n// the minimum replicas metadata\nfunc (s *InferenceScaler) Scale(ctx context.Context,\n\tnamespace, inferenceName string) FunctionScaleResult {\n\tstart := time.Now()\n\n\tresp, err := s.get(namespace, inferenceName)\n\tif err != nil {\n\t\treturn FunctionScaleResult{\n\t\t\tError:     err,\n\t\t\tAvailable: false,\n\t\t\tFound:     false,\n\t\t\tDuration:  time.Since(start),\n\t\t}\n\t}\n\n\t// Check if there are available replicas in the live data\n\tif resp.AvailableReplicas > 0 {\n\t\treturn FunctionScaleResult{\n\t\t\tError:     nil,\n\t\t\tAvailable: true,\n\t\t\tFound:     true,\n\t\t\tDuration:  time.Since(start),\n\t\t}\n\t}\n\n\t// If the desired replica count is 0, then a scale up event\n\t// is required.\n\tif resp.Replicas == 0 {\n\t\t// If the max replicas is 0, then the function is not\n\t\t// scalable\n\t\tif resp.MaxReplicas == 0 {\n\t\t\treturn FunctionScaleResult{\n\t\t\t\tError:     fmt.Errorf(\"unable to scale up %s, max replicas is 0\", inferenceName),\n\t\t\t\tAvailable: false,\n\t\t\t\tFound:     true,\n\t\t\t\tDuration:  time.Since(start),\n\t\t\t}\n\t\t}\n\n\t\tminReplicas := uint64(1)\n\t\tif resp.MinReplicas > 0 {\n\t\t\tminReplicas = resp.MinReplicas\n\t\t}\n\n\t\t// In a retry-loop, first query desired replicas, then\n\t\t// set them if the value is still at 0.\n\t\tscaleResult := Retry(func(attempt int) error {\n\t\t\tinf, err := s.runtime.InferenceGet(namespace, inferenceName)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// The scale up is complete because the desired replica count\n\t\t\t// has been set to 1 or more.\n\t\t\tif inf.Status.Replicas > 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Request a scale up to the minimum amount of replicas\n\t\t\tif err := s.runtime.InferenceScale(ctx, namespace, types.ScaleServiceRequest{\n\t\t\t\tServiceName:  inferenceName,\n\t\t\t\tReplicas:     minReplicas,\n\t\t\t\tEventMessage: fmt.Sprintf(\"scale up to replicas %d\", minReplicas),\n\t\t\t\tAttempt:      attempt,\n\t\t\t}, inf); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlogrus.WithField(\"inference\", inferenceName).\n\t\t\t\tWithField(\"replicas\", minReplicas).\n\t\t\t\tDebug(\"scaling up inference\")\n\t\t\treturn nil\n\t\t}, \"Scale\", retries, pollInterval)\n\n\t\tif scaleResult != nil {\n\t\t\treturn FunctionScaleResult{\n\t\t\t\tError:     scaleResult,\n\t\t\t\tAvailable: false,\n\t\t\t\tFound:     true,\n\t\t\t\tDuration:  time.Since(start),\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch resp.Framework {\n\t// Return early for prototype frameworks.\n\tcase \"gradio\", \"streamlit\":\n\t\treturn FunctionScaleResult{\n\t\t\tError:     nil,\n\t\t\tAvailable: false,\n\t\t\tFound:     true,\n\t\t\tDuration:  time.Since(start),\n\t\t}\n\t}\n\n\t// Holding pattern for at least one function replica to be available\n\tfor i := 0; i < maxPollCount; i++ {\n\t\tinf, err := s.runtime.InferenceGet(namespace, inferenceName)\n\t\tif err != nil {\n\t\t\treturn FunctionScaleResult{\n\t\t\t\tError:     err,\n\t\t\t\tAvailable: false,\n\t\t\t\tFound:     true,\n\t\t\t\tDuration:  time.Since(start),\n\t\t\t}\n\t\t}\n\n\t\ttotalTime := time.Since(start)\n\t\tif inf.Status.AvailableReplicas > 0 {\n\t\t\tlogrus.Debugf(\"[Ready] function=%s waited for - %.4fs\",\n\t\t\t\tinferenceName, totalTime.Seconds())\n\n\t\t\treturn FunctionScaleResult{\n\t\t\t\tError:     nil,\n\t\t\t\tAvailable: true,\n\t\t\t\tFound:     true,\n\t\t\t\tDuration:  totalTime,\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(pollInterval)\n\t}\n\n\treturn FunctionScaleResult{\n\t\tError:     nil,\n\t\tAvailable: true,\n\t\tFound:     true,\n\t\tDuration:  time.Since(start),\n\t}\n}\n"
  },
  {
    "path": "agent/pkg/scaling/ranges.go",
    "content": "package scaling\n\nimport \"time\"\n\ntype ScaleType string\n\nconst (\n\t// DefaultMinReplicas is the minimal amount of replicas for a service.\n\tDefaultMinReplicas = 1\n\n\t// DefaultMaxReplicas is the amount of replicas a service will auto-scale up to.\n\tDefaultMaxReplicas = 5\n\n\tDefaultZeroDuration = 3 * time.Minute\n\n\t// DefaultScalingFactor is the defining proportion for the scaling increments.\n\tDefaultScalingFactor = 10\n\n\tScaleTypeRPS      ScaleType = \"rps\"\n\tScaleTypeCapacity ScaleType = \"capacity\"\n\n\t// MinScaleLabel label indicating min scale for a Inference\n\tMinScaleLabel = \"ai.tensorchord.scale.min\"\n\n\t// MaxScaleLabel label indicating max scale for a Inference\n\tMaxScaleLabel = \"ai.tensorchord.scale.max\"\n\n\t// ScalingFactorLabel label indicates the scaling factor for a Inference\n\tScalingFactorLabel = \"ai.tensorchord.scale.factor\"\n\n\t// TargetLoadLabel label indicates the target load for a Inference\n\tTargetLoadLabel = \"ai.tensorchord.scale.target\"\n\n\t// ZeroDurationLabel label indicates the zero duration for a Inference\n\tZeroDurationLabel = \"ai.tensorchord.scale.zero-duration\"\n\n\t// ScaleTypeLabel label indicates the scale type for a Inference\n\tScaleTypeLabel = \"ai.tensorchord.scale.type\"\n\n\tFrameworkLabel = \"ai.tensorchord.framework\"\n)\n"
  },
  {
    "path": "agent/pkg/scaling/retry.go",
    "content": "package scaling\n\nimport (\n\t\"log\"\n\t\"time\"\n)\n\ntype routine func(attempt int) error\n\nfunc Retry(r routine, label string, attempts int, interval time.Duration) error {\n\tvar err error\n\n\tfor i := 0; i < attempts; i++ {\n\t\tres := r(i)\n\t\tif res != nil {\n\t\t\terr = res\n\t\t\tlog.Printf(\"[%s]: %d/%d, error: %s\\n\", label, i, attempts, res)\n\t\t} else {\n\t\t\terr = nil\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(interval)\n\t}\n\treturn err\n}\n"
  },
  {
    "path": "agent/pkg/scaling/service_query.go",
    "content": "// Copyright (c) OpenFaaS Author(s). All rights reserved.\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage scaling\n\nimport \"time\"\n\n// ServiceQuery provides interface for replica querying/setting\ntype ServiceQuery interface {\n\tGetReplicas(service, namespace string) (response ServiceQueryResponse, err error)\n\tSetReplicas(service, namespace string, count uint64) error\n}\n\n// ServiceQueryResponse response from querying a function status\ntype ServiceQueryResponse struct {\n\tFramework         string\n\tTargetLoad        uint64\n\tZeroDuration      time.Duration\n\tReplicas          uint64\n\tMaxReplicas       uint64\n\tMinReplicas       uint64\n\tScalingFactor     uint64\n\tAvailableReplicas uint64\n\tAnnotations       map[string]string\n}\n"
  },
  {
    "path": "agent/pkg/scaling/util.go",
    "content": "package scaling\n\nimport (\n\t\"time\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nfunc AsServerQueryResponse(inf *types.InferenceDeployment) (*ServiceQueryResponse, error) {\n\tif inf == nil {\n\t\treturn nil, nil\n\t}\n\tres := ServiceQueryResponse{}\n\n\tres.Replicas = uint64(inf.Status.Replicas)\n\tres.Annotations = inf.Spec.Annotations\n\tres.AvailableReplicas = uint64(inf.Status.AvailableReplicas)\n\tres.Framework = string(inf.Spec.Framework)\n\tres.MinReplicas = uint64(*inf.Spec.Scaling.MinReplicas)\n\tres.MaxReplicas = uint64(*inf.Spec.Scaling.MaxReplicas)\n\tres.TargetLoad = uint64(*inf.Spec.Scaling.TargetLoad)\n\tres.ZeroDuration = time.Duration(*inf.Spec.Scaling.ZeroDuration) * time.Second\n\treturn &res, nil\n}\n"
  },
  {
    "path": "agent/pkg/server/error.go",
    "content": "package server\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\n// Error defines a standard application error.\ntype Error struct {\n\t// Machine-readable error code.\n\tHTTPStatusCode int `json:\"http_status_code,omitempty\"`\n\n\t// Human-readable message.\n\tMessage string `json:\"message,omitempty\"`\n\tRequest string `json:\"request,omitempty\"`\n\n\t// Logical operation and nested error.\n\tOp  string `json:\"op,omitempty\"`\n\tErr error  `json:\"error,omitempty\"`\n}\n\n// Error returns the string representation of the error message.\nfunc (e *Error) Error() string {\n\tvar buf bytes.Buffer\n\n\t// Print the current operation in our stack, if any.\n\tif e.Op != \"\" {\n\t\tfmt.Fprintf(&buf, \"%s: \", e.Op)\n\t}\n\n\t// If wrapping an error, print its Error() message.\n\t// Otherwise print the error code & message.\n\tif e.Err != nil {\n\t\tbuf.WriteString(e.Err.Error())\n\t} else {\n\t\tif e.HTTPStatusCode != 0 {\n\t\t\tfmt.Fprintf(&buf, \"<%s> \", http.StatusText(e.HTTPStatusCode))\n\t\t}\n\t\tbuf.WriteString(e.Message)\n\t}\n\treturn buf.String()\n}\n\nfunc NewError(code int, err error, op string) error {\n\treturn &Error{\n\t\tHTTPStatusCode: code,\n\t\tErr:            err,\n\t\tMessage:        err.Error(),\n\t\tOp:             op,\n\t}\n}\n\nfunc errFromErrDefs(err error, op string) error {\n\tif errdefs.IsCancelled(err) {\n\t\treturn NewError(http.StatusRequestTimeout, err, op)\n\t} else if errdefs.IsConflict(err) {\n\t\treturn NewError(http.StatusConflict, err, op)\n\t} else if errdefs.IsDataLoss(err) {\n\t\treturn NewError(http.StatusInternalServerError, err, op)\n\t} else if errdefs.IsDeadline(err) {\n\t\treturn NewError(http.StatusRequestTimeout, err, op)\n\t} else if errdefs.IsForbidden(err) {\n\t\treturn NewError(http.StatusForbidden, err, op)\n\t} else if errdefs.IsInvalidParameter(err) {\n\t\treturn NewError(http.StatusBadRequest, err, op)\n\t} else if errdefs.IsNotFound(err) {\n\t\treturn NewError(http.StatusNotFound, err, op)\n\t} else if errdefs.IsNotImplemented(err) {\n\t\treturn NewError(http.StatusNotImplemented, err, op)\n\t} else if errdefs.IsNotModified(err) {\n\t\treturn NewError(http.StatusNotModified, err, op)\n\t} else if errdefs.IsSystem(err) {\n\t\treturn NewError(http.StatusInternalServerError, err, op)\n\t} else if errdefs.IsUnauthorized(err) {\n\t\treturn NewError(http.StatusUnauthorized, err, op)\n\t} else if errdefs.IsUnavailable(err) {\n\t\treturn NewError(http.StatusServiceUnavailable, err, op)\n\t} else if errdefs.IsUnknown(err) {\n\t\treturn NewError(http.StatusInternalServerError, err, op)\n\t}\n\treturn NewError(http.StatusInternalServerError, err, op)\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_build_create.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Create the build.\n// @Description Create the build.\n// @Tags        build\n// @Accept      json\n// @Produce     json\n// @Param       body body     types.Build true \"build\"\n// @Success     200  {object} types.Build\n// @Router      /system/build [post]\nfunc (s *Server) handleBuildCreate(c *gin.Context) error {\n\tvar req types.Build\n\tif err := c.ShouldBindJSON(&req); err != nil {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, err, \"build-create\")\n\t}\n\n\tif err := s.validator.ValidateBuildRequest(&req); err != nil {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, err, \"build-create\")\n\t}\n\ts.validator.DefaultBuildRequest(&req)\n\n\tinference, err := s.runtime.InferenceGetCRD(req.Spec.Namespace, req.Spec.Name)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"inference-instance-list\")\n\t}\n\n\tif err := s.runtime.BuildCreate(c.Request.Context(), req, inference,\n\t\ts.config.Build.BuilderImage, s.config.Build.BuildkitdAddress,\n\t\ts.config.Build.BuildCtlBin, s.config.Build.BuildImagePullSecret); err != nil {\n\t\tlogrus.Errorf(\"failed to create build: %v\", err)\n\t\treturn errFromErrDefs(err, \"build-create\")\n\t}\n\tc.JSON(http.StatusOK, req)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_build_get.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t_ \"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Get the build by name.\n// @Description Get the build by name.\n// @Tags        build\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string true \"Namespace\"\n// @Param       name      path     string true \"inference id\"\n// @Success     200       {object} types.Build\n// @Router      /system/build/{name} [get]\nfunc (s *Server) handleBuildGet(c *gin.Context) error {\n\tnamespace := c.Query(\"namespace\")\n\tif namespace == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"namespace is required\"), \"inference-list\")\n\t}\n\tname := c.Param(\"name\")\n\tif name == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"name is required\"), \"build-get\")\n\t}\n\n\tbuild, err := s.runtime.BuildGet(c.Request.Context(), namespace, name)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"build-get\")\n\t}\n\n\tc.JSON(http.StatusOK, build)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_build_list.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t_ \"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     List the builds.\n// @Description List the builds.\n// @Tags        build\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string true \"Namespace\"\n// @Success     200       {object} []types.Build\n// @Router      /system/build [get]\nfunc (s *Server) handleBuildList(c *gin.Context) error {\n\tnamespace := c.Query(\"namespace\")\n\tif namespace == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"namespace is required\"), \"inference-list\")\n\t}\n\n\tbuilds, err := s.runtime.BuildList(c.Request.Context(), namespace)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"build-list\")\n\t}\n\tc.JSON(http.StatusOK, builds)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_build_logs.go",
    "content": "package server\n\nimport (\n\t\"github.com/gin-gonic/gin\"\n\n\t_ \"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Get the build logs.\n// @Description Get the build logs.\n// @Tags        log\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string true  \"Namespace\"\n// @Param       name      query    string true  \"Build Name\"\n// @Param       instance  query    string false \"Instance\"\n// @Param       tail      query    int    false \"Tail\"\n// @Param       follow    query    bool   false \"Follow\"\n// @Param       since     query    string false \"Since\"\n// @Success     200       {object} []types.Message\n// @Router      /system/logs/build [get]\nfunc (s *Server) handleBuildLogs(c *gin.Context) error {\n\treturn s.getLogsFromRequester(c, s.buildLogRequester)\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_gradio_proxy.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\t\"path\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/consts\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/server/static\"\n)\n\n// @Summary     Reverse proxy to the backend gradio.\n// @Description Reverse proxy to the backend gradio.\n// @Tags        inference\n// @Accept      */*\n// @Produce     json\n// @Param       id path string true \"Deployment ID\"\n// @Router      /gradio/{id} [get]\n// @Router      /gradio/{id} [post]\n// @Success     201\nfunc (s *Server) proxyGradio(c *gin.Context) error {\n\tremote, err := url.Parse(fmt.Sprintf(\"http://0.0.0.0:%d\", s.config.Server.ServerPort))\n\tif err != nil {\n\t\treturn err\n\t}\n\tproxy := httputil.NewSingleHostReverseProxy(remote)\n\tproxy.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout:   s.config.ModelZCloud.UpstreamTimeout,\n\t\t\tKeepAlive: s.config.ModelZCloud.UpstreamTimeout,\n\t\t\tDualStack: true,\n\t\t}).DialContext,\n\t\tMaxIdleConns:          s.config.ModelZCloud.MaxIdleConnections,\n\t\tMaxIdleConnsPerHost:   s.config.ModelZCloud.MaxIdleConnectionsPerHost,\n\t\tIdleConnTimeout:       90 * time.Second,\n\t\tTLSHandshakeTimeout:   10 * time.Second,\n\t\tExpectContinueTimeout: 1 * time.Second,\n\t}\n\n\tuid, deployment, err := s.proxyNoAuth(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tns := consts.DefaultPrefix + uid\n\tproxy.Director = func(req *http.Request) {\n\t\treq.Header = c.Request.Header\n\t\treq.Host = remote.Host\n\t\treq.URL.Scheme = remote.Scheme\n\t\treq.URL.Host = remote.Host\n\t\treq.URL.Path = path.Join(\n\t\t\t\"/\", \"inference\", fmt.Sprintf(\"%s.%s\", deployment, ns), c.Param(\"proxyPath\"))\n\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"deployment\": deployment,\n\t\t\t\"uid\":        uid,\n\t\t\t\"ns\":         ns,\n\t\t\t\"path\":       req.URL.Path,\n\t\t\t\"remote\":     remote.String(),\n\t\t}).Debug(\"proxying to gradio\")\n\t}\n\n\tproxy.ModifyResponse = func(resp *http.Response) error {\n\t\t// http.StatusSeeOther indicates that the server is still loading.\n\t\tif resp.StatusCode == http.StatusSeeOther {\n\t\t\tresp.StatusCode = http.StatusOK\n\n\t\t\tinstances, err := s.runtime.InferenceInstanceList(ns, deployment)\n\t\t\tif err != nil {\n\t\t\t\treturn NewError(http.StatusInternalServerError, err, \"instance-list\")\n\t\t\t}\n\n\t\t\tbuf, err := static.RenderDeploymentLoadingPage(\"gradio\", resp.Header.Get(\"X-Call-Id\"),\n\t\t\t\t\"We are currently processing your request.\", deployment, instances)\n\t\t\tif err != nil {\n\t\t\t\treturn NewError(http.StatusInternalServerError, err, \"render-loading-page\")\n\t\t\t}\n\t\t\tresp.Body = io.NopCloser(buf)\n\t\t\tresp.ContentLength = int64(buf.Len())\n\t\t\tresp.Header.Set(\"Content-Length\", strconv.Itoa(buf.Len()))\n\t\t\tresp.Header.Set(\"Content-Type\", \"text/html\")\n\t\t\tresp.StatusCode = http.StatusServiceUnavailable\n\t\t}\n\t\treturn nil\n\t}\n\n\tproxy.ServeHTTP(c.Writer, c.Request)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_healthz.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n)\n\n// @Summary     Healthz\n// @Description Healthz\n// @Tags        system\n// @Accept      json\n// @Produce     json\n// @Success     200\n// @Router      /healthz [get]\nfunc (s *Server) handleHealthz(c *gin.Context) error {\n\tc.Status(http.StatusOK)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_healthz_test.go",
    "content": "package server\n\nimport (\n\t\"github.com/gin-gonic/gin\"\n\t. \"github.com/onsi/ginkgo/v2\"\n\t. \"github.com/onsi/gomega\"\n)\n\nvar _ = Describe(\"healthz\", func() {\n\tBeforeEach(func() {\n\t\tserver = &Server{\n\t\t\trouter:        gin.New(),\n\t\t\tmetricsRouter: gin.New(),\n\t\t\truntime:       mockRuntime,\n\t\t}\n\t})\n\tIt(\"healthz\", func() {\n\t\tc := mkContext(\"GET\", \"/\", nil, nil)\n\t\terr := server.handleHealthz(c)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n})\n"
  },
  {
    "path": "agent/pkg/server/handler_image_cache.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Create the image cache.\n// @Description Create the image cache.\n// @Tags        image-cache\n// @Accept      json\n// @Produce     json\n// @Param       body body     types.ImageCache true \"image-cache\"\n// @Success     201  {object} types.ImageCache\n// @Router      /system/image-cache [post]\nfunc (s *Server) handleImageCacheCreate(c *gin.Context) error {\n\tvar req types.ImageCache\n\tif err := c.ShouldBindJSON(&req); err != nil {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, err, \"image-cache-create\")\n\t}\n\n\tif err := s.validator.ValidateImageCacheRequest(&req); err != nil {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, err, \"image-cache-create\")\n\t}\n\tinference, err := s.runtime.InferenceGetCRD(req.Namespace, req.Name)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"inference-instance-list\")\n\t}\n\n\tif err := s.runtime.ImageCacheCreate(c.Request.Context(), req, inference); err != nil {\n\t\treturn errFromErrDefs(err, \"image-cache-create\")\n\t}\n\tc.JSON(http.StatusOK, req)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_create.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/client\"\n)\n\n// @Summary     Create the inferences.\n// @Description Create the inferences.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       request body     types.InferenceDeployment true \"query params\"\n// @Success     201     {object} types.InferenceDeployment\n// @Router      /system/inferences [post]\nfunc (s *Server) handleInferenceCreate(c *gin.Context) error {\n\tevent := types.DeploymentCreateEvent\n\n\tvar req types.InferenceDeployment\n\tif err := c.ShouldBindJSON(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, event)\n\t}\n\n\tif s.config.ModelZCloud.Enabled {\n\t\tns := req.Spec.Namespace\n\t\tuser, err := client.GetUserIDFromNamespace(ns)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if user == \"\" {\n\t\t\treturn fmt.Errorf(\"user id is empty\")\n\t\t}\n\t\ts.cache.SetWithTTL(req.Spec.Name, user, 1, 0)\n\n\t\texist := s.runtime.NamespaceGet(c.Request.Context(), ns)\n\t\tif !exist {\n\t\t\tif err := s.runtime.NamespaceCreate(c.Request.Context(), ns); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Set the default values.\n\ts.validator.DefaultDeployRequest(&req)\n\n\t// Validate the request.\n\tif err := s.validator.ValidateDeployRequest(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, event)\n\t}\n\n\t// Create the inference.\n\tif err := s.runtime.InferenceCreate(c.Request.Context(), req,\n\t\ts.config.Ingress, event, s.config.Server.ServerPort); err != nil {\n\t\treturn errFromErrDefs(err, event)\n\t}\n\tc.JSON(http.StatusCreated, req)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_create_test.go",
    "content": "package server\n\nimport (\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/golang/mock/gomock\"\n\t. \"github.com/onsi/ginkgo/v2\"\n\t. \"github.com/onsi/gomega\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/server/validator\"\n\t. \"github.com/tensorchord/openmodelz/modelzetes/pkg/pointer\"\n)\n\nvar _ = Describe(\"inference create\", func() {\n\tBeforeEach(func() {\n\t\tserver = &Server{\n\t\t\trouter:        gin.New(),\n\t\t\tmetricsRouter: gin.New(),\n\t\t\truntime:       mockRuntime,\n\t\t\tvalidator:     validator.New(),\n\t\t}\n\t})\n\tIt(\"invalid request - nil\", func() {\n\t\tc := mkContext(\"GET\", \"/\", nil, nil)\n\t\terr := server.handleInferenceCreate(c)\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\tIt(\"invalid request - empty\", func() {\n\t\tc := mkJsonBodyContext(\"GET\", \"/\", nil, types.InferenceDeployment{})\n\t\terr := server.handleInferenceCreate(c)\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\tIt(\"good request\", func() {\n\t\tmockRuntime.EXPECT().InferenceCreate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(nil)\n\t\tc := mkJsonBodyContext(\"GET\", \"/\", nil, types.InferenceDeployment{\n\t\t\tSpec: types.InferenceDeploymentSpec{\n\t\t\t\tName:  \"abc\",\n\t\t\t\tImage: \"mock-image\",\n\t\t\t\tPort:  Ptr(int32(123)),\n\t\t\t},\n\t\t})\n\t\terr := server.handleInferenceCreate(c)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n})\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_delete.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Delete the inferences.\n// @Description Delete the inferences.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       request   body     types.DeleteFunctionRequest true \"query params\"\n// @Param       namespace query    string                      true \"Namespace\"\n// @Success     202       {object} types.DeleteFunctionRequest\n// @Router      /system/inferences [delete]\nfunc (s *Server) handleInferenceDelete(c *gin.Context) error {\n\tevent := types.DeploymentDeleteEvent\n\tvar req types.DeleteFunctionRequest\n\tif err := c.ShouldBindJSON(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, event)\n\t}\n\n\tnamespace := c.Query(\"namespace\")\n\tif namespace == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest,\n\t\t\terrors.New(\"namespace is required\"), event)\n\t}\n\n\tif req.FunctionName == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest,\n\t\t\terrors.New(\"function name is required\"), event)\n\t}\n\n\tif err := s.runtime.InferenceDelete(c.Request.Context(),\n\t\tnamespace, req.FunctionName, s.config.Ingress.Namespace, event); err != nil {\n\t\treturn errFromErrDefs(err, event)\n\t}\n\n\tc.JSON(http.StatusAccepted, req)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_delete_test.go",
    "content": "package server\n\nimport (\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/golang/mock/gomock\"\n\t. \"github.com/onsi/ginkgo/v2\"\n\t. \"github.com/onsi/gomega\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/server/validator\"\n)\n\nvar _ = Describe(\"inference delete\", func() {\n\tBeforeEach(func() {\n\t\tserver = &Server{\n\t\t\trouter:        gin.New(),\n\t\t\tmetricsRouter: gin.New(),\n\t\t\truntime:       mockRuntime,\n\t\t\tvalidator:     validator.New(),\n\t\t}\n\t})\n\tIt(\"invalid request - nil\", func() {\n\t\tc := mkContext(\"GET\", \"/\", nil, nil)\n\t\terr := server.handleInferenceDelete(c)\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\tIt(\"invalid request - empty\", func() {\n\t\tc := mkJsonBodyContext(\"GET\", \"/\", nil, types.DeleteFunctionRequest{})\n\t\terr := server.handleInferenceDelete(c)\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\tIt(\"good request\", func() {\n\t\tmockRuntime.EXPECT().InferenceDelete(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(nil)\n\t\tc := mkJsonBodyContext(\"GET\", \"/\", nil, types.DeleteFunctionRequest{\n\t\t\tFunctionName: \"mock-inference\",\n\t\t})\n\t\tsetQuery(c, map[string]string{\"namespace\": \"mock-namespace\"})\n\t\terr := server.handleInferenceDelete(c)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n})\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_get.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t_ \"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Get the inference by name.\n// @Description Get the inference by name.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string true \"Namespace\"\n// @Param       name      path     string true \"inference id\"\n// @Success     200       {object} types.InferenceDeployment\n// @Router      /system/inference/{name} [get]\nfunc (s *Server) handleInferenceGet(c *gin.Context) error {\n\tnamespace := c.Query(\"namespace\")\n\tif namespace == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"namespace is required\"), \"inference-get\")\n\t}\n\tname := c.Param(\"name\")\n\tif name == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"name is required\"), \"inference-get\")\n\t}\n\n\tfunction, err := s.runtime.InferenceGet(namespace, name)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"inference-get\")\n\t}\n\n\tc.JSON(http.StatusOK, function)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_get_test.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/golang/mock/gomock\"\n\t. \"github.com/onsi/ginkgo/v2\"\n\t. \"github.com/onsi/gomega\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/server/validator\"\n\t. \"github.com/tensorchord/openmodelz/modelzetes/pkg/pointer\"\n)\n\nvar _ = Describe(\"inference get\", func() {\n\tBeforeEach(func() {\n\t\tserver = &Server{\n\t\t\trouter:        gin.New(),\n\t\t\tmetricsRouter: gin.New(),\n\t\t\truntime:       mockRuntime,\n\t\t\tvalidator:     validator.New(),\n\t\t}\n\t})\n\tIt(\"invalid request - no namespace\", func() {\n\t\tc := mkContext(\"GET\", \"/\", nil, nil)\n\t\terr := server.handleInferenceGet(c)\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\tIt(\"invalid request - no name\", func() {\n\t\tc := mkJsonBodyContext(\"GET\", \"/\", nil, nil)\n\t\tsetQuery(c, map[string]string{\"namespace\": \"mock-namespace\"})\n\t\terr := server.handleInferenceGet(c)\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\tIt(\"invalid request - mock error\", func() {\n\t\tmockRuntime.EXPECT().InferenceGet(gomock.Any(), gomock.Any()).Times(1).Return(nil, errors.New(\"mock-error\"))\n\t\tc := mkJsonBodyContext(\"GET\", \"/\", nil, nil)\n\t\tsetQuery(c, map[string]string{\"namespace\": \"mock-namespace\"})\n\t\tsetParam(c, map[string]string{\"name\": \"mock-name\"})\n\t\terr := server.handleInferenceGet(c)\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\tIt(\"good request\", func() {\n\t\tmockRuntime.EXPECT().InferenceGet(gomock.Any(), gomock.Any()).Times(1).Return(Ptr(types.InferenceDeployment{}), nil)\n\t\tc := mkJsonBodyContext(\"GET\", \"/\", nil, nil)\n\t\tsetQuery(c, map[string]string{\"namespace\": \"mock-namespace\"})\n\t\tsetParam(c, map[string]string{\"name\": \"mock-name\"})\n\t\terr := server.handleInferenceGet(c)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n})\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_instance.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t_ \"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     List the inference instances.\n// @Description List the inference instances.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string true \"Namespace\"\n// @Param       name      path     string true \"Name\"\n// @Success     200       {object} []types.InferenceDeployment\n// @Router      /system/inference/{name}/instances [get]\nfunc (s *Server) handleInferenceInstance(c *gin.Context) error {\n\tnamespace := c.Query(\"namespace\")\n\tif namespace == \"\" {\n\t\treturn NewError(http.StatusBadRequest, errors.New(\"namespace is required\"), \"inference-instance-list\")\n\t}\n\tname := c.Param(\"name\")\n\tif name == \"\" {\n\t\treturn NewError(http.StatusBadRequest, errors.New(\"name is required\"),\n\t\t\t\"inference-instance-list\")\n\t}\n\n\tinstances, err := s.runtime.InferenceInstanceList(namespace, name)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"inference-instance-list\")\n\t}\n\tc.JSON(http.StatusOK, instances)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_instance_exec.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t_ \"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Attach to the inference instance.\n// @Description Attach to the inference instance.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string true \"Namespace\"\n// @Param       name      path     string true \"Name\"\n// @Param       instance  path     string true \"Instance name\"\n// @Success     200       {object} []types.InferenceDeployment\n// @Router      /system/inference/{name}/instance/{instance} [post]\nfunc (s *Server) handleInferenceInstanceExec(c *gin.Context) error {\n\tnamespace := c.Query(\"namespace\")\n\tif namespace == \"\" {\n\t\treturn NewError(http.StatusBadRequest, errors.New(\"namespace is required\"), \"inference-instance-list\")\n\t}\n\tname := c.Param(\"name\")\n\tif name == \"\" {\n\t\treturn NewError(http.StatusBadRequest, errors.New(\"name is required\"),\n\t\t\t\"inference-instance-list\")\n\t}\n\tinstance := c.Param(\"instance\")\n\tif name == \"\" {\n\t\treturn NewError(http.StatusBadRequest, errors.New(\"instance is required\"),\n\t\t\t\"inference-instance-list\")\n\t}\n\n\ttty := c.Query(\"tty\")\n\tif tty == \"\" {\n\t\ttty = \"false\"\n\t}\n\tttyBoolean, err := strconv.ParseBool(tty)\n\tif err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, \"inference-instance-exec\")\n\t}\n\n\tcommand := c.Query(\"command\")\n\tcommandSlice := strings.Split(command, \",\")\n\n\tif err := s.runtime.InferenceExec(\n\t\tc, namespace, instance, commandSlice, ttyBoolean); err != nil {\n\t\treturn errFromErrDefs(err, \"inference-instance-exec\")\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_list.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t_ \"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     List the inferences.\n// @Description List the inferences.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string true \"Namespace\"\n// @Success     200       {object} []types.InferenceDeployment\n// @Router      /system/inferences [get]\nfunc (s *Server) handleInferenceList(c *gin.Context) error {\n\tnamespace := c.Query(\"namespace\")\n\tif namespace == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"namespace is required\"), \"inference-list\")\n\t}\n\n\tinferenes, err := s.runtime.InferenceList(namespace)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"inference-list\")\n\t}\n\t// Add invocation count metrics into the body.\n\t// TODO: https://github.com/tensorchord/openmodelz/issues/203\n\ts.prometheusClient.AddMetrics(inferenes)\n\n\tc.JSON(http.StatusOK, inferenes)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_logs.go",
    "content": "package server\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/gin-gonic/gin\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/log\"\n)\n\n// @Summary     Get the inference logs.\n// @Description Get the inference logs.\n// @Tags        log\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string true  \"Namespace\"\n// @Param       name      query    string true  \"Name\"\n// @Param       instance  query    string false \"Instance\"\n// @Param       tail      query    int    false \"Tail\"\n// @Param       follow    query    bool   false \"Follow\"\n// @Param       since     query    string false \"Since\"\n// @Param       end       query    string false \"End\"\n// @Success     200       {object} []types.Message\n// @Router      /system/logs/inference [get]\nfunc (s *Server) handleInferenceLogs(c *gin.Context) error {\n\treturn s.getLogsFromRequester(c, s.deploymentLogRequester)\n}\n\nfunc (s Server) getLogsFromRequester(c *gin.Context, requester log.Requester) error {\n\tcn, ok := c.Writer.(http.CloseNotifier)\n\tif !ok {\n\t\treturn NewError(http.StatusNotFound, errors.New(\"LogHandler: response is not a CloseNotifier, required for streaming response\"), \"log-get\")\n\t}\n\tflusher, ok := c.Writer.(http.Flusher)\n\tif !ok {\n\t\treturn NewError(http.StatusNotFound, errors.New(\"LogHandler: response is not a Flusher, required for streaming response\"), \"log-get\")\n\t}\n\n\tvar req types.LogRequest\n\tif err := c.ShouldBindQuery(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, \"log-get\")\n\t}\n\t_ = cn\n\n\ttimeout := s.config.Inference.LogTimeout\n\tif req.Follow {\n\t\t// use a much larger timeout for streaming log\n\t\ttimeout = time.Hour\n\t}\n\n\tctx, cancelQuery := context.WithTimeout(c.Request.Context(), timeout)\n\tdefer cancelQuery()\n\n\tmessages, err := requester.Query(ctx, req)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"log-get\")\n\t}\n\n\t// Send the initial headers saying we're gonna stream the response.\n\tc.Header(\"Content-Type\", \"application/x-ndjson\")\n\tc.Header(\"Transfer-Encoding\", \"chunked\")\n\tc.Header(\"Connection\", \"Keep-Alive\")\n\tflusher.Flush()\n\n\tdefer flusher.Flush()\n\tdefer c.Writer.Write([]byte{})\n\tdefer flusher.Flush()\n\n\tjsonEncoder := json.NewEncoder(c.Writer)\n\tfor messages != nil {\n\t\tselect {\n\t\tcase <-cn.CloseNotify():\n\t\t\ts.logger.WithField(\"req\", req).\n\t\t\t\tDebug(\"client closed connection\")\n\t\t\treturn nil\n\t\tcase msg, ok := <-messages:\n\t\t\tif !ok {\n\t\t\t\ts.logger.WithField(\"req\", req).\n\t\t\t\t\tDebug(\"log stream closed\")\n\t\t\t\tmessages = nil\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// serialize and write the msg to the http ResponseWriter\n\t\t\terr := jsonEncoder.Encode(msg)\n\t\t\tif err != nil {\n\t\t\t\t// can't actually write the status header here so we should json serialize an error\n\t\t\t\t// and return that because we have already sent the content type and status code\n\t\t\t\ts.logger.WithError(err).Error(\"LogHandler: failed to serialize log message\")\n\t\t\t\t// write json error message here ?\n\t\t\t\tjsonEncoder.Encode(types.Message{Text: \"failed to serialize log message\"})\n\t\t\t\tflusher.Flush()\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tflusher.Flush()\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_proxy.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httputil\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\n// @Summary     Inference.\n// @Description Inference proxy.\n// @Tags        inference-proxy\n// @Accept      json\n// @Produce     json\n// @Param       name path string true \"inference id\"\n// @Router      /inference/{name} [post]\n// @Router      /inference/{name} [get]\n// @Router      /inference/{name} [put]\n// @Router      /inference/{name} [delete]\n// @Success     200\n// @Failure     303\n// @Failure     400\n// @Failure     404\n// @Failure     500\nfunc (s *Server) handleInferenceProxy(c *gin.Context) error {\n\tnamespacedName := c.Param(\"name\")\n\tif namespacedName == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"name is required\"), \"inference-proxy\")\n\t}\n\n\tnamespace, name, err := getNamespaceAndName(namespacedName)\n\tif err != nil {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, err, \"inference-proxy\")\n\t}\n\n\t// Update metrics.\n\ts.metricsOptions.GatewayInferenceInvocationStarted.\n\t\tWithLabelValues(namespacedName).Inc()\n\ts.metricsOptions.GatewayInferenceInvocationInflight.\n\t\tWithLabelValues(namespacedName).Inc()\n\tstart := time.Now()\n\tlabel := prometheus.Labels{\"inference_name\": namespacedName, \"code\": strconv.Itoa(http.StatusProcessing)}\n\tdefer func() {\n\t\ts.metricsOptions.GatewayInferenceInvocationInflight.\n\t\t\tWithLabelValues(namespacedName).Dec()\n\t\ts.metricsOptions.GatewayInferencesHistogram.With(label).\n\t\t\tObserve(time.Since(start).Seconds())\n\t\ts.metricsOptions.GatewayInferenceInvocation.With(label).Inc()\n\t}()\n\n\tres := s.scaler.Scale(c.Request.Context(), namespace, name)\n\tif !res.Found {\n\t\tlabel[\"code\"] = strconv.Itoa(http.StatusNotFound)\n\t\treturn NewError(\n\t\t\thttp.StatusNotFound, errors.New(\"inference not found\"), \"inference-proxy\")\n\t} else if res.Error != nil {\n\t\tlabel[\"code\"] = strconv.Itoa(http.StatusInternalServerError)\n\t\treturn NewError(\n\t\t\thttp.StatusInternalServerError, res.Error, \"inference-proxy\")\n\t}\n\n\tif res.Available {\n\t\tstatusCode, err := s.forward(c, namespace, name)\n\t\tif err != nil {\n\t\t\tlabel[\"code\"] = strconv.Itoa(statusCode)\n\t\t\treturn NewError(statusCode, err, \"inference-proxy\")\n\t\t}\n\t\tlabel[\"code\"] = strconv.Itoa(statusCode)\n\t\treturn nil\n\t} else {\n\t\t// The inference is still being created.\n\t\tlabel[\"code\"] = strconv.Itoa(http.StatusSeeOther)\n\t\treturn NewError(http.StatusSeeOther,\n\t\t\tfmt.Errorf(\"inference %s is not available\", name), \"inference-proxy\")\n\t}\n}\n\nfunc (s *Server) forward(c *gin.Context, namespace, name string) (int, error) {\n\tbackendURL, err := s.endpointResolver.Resolve(namespace, name)\n\tif err != nil {\n\t\treturn 0, errdefs.InvalidParameter(err)\n\t}\n\tdefer s.endpointResolver.Close(backendURL)\n\n\tproxyServer := httputil.ReverseProxy{}\n\tproxyServer.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout:   s.config.Server.ReadTimeout,\n\t\t\tKeepAlive: s.config.Server.ReadTimeout,\n\t\t\tDualStack: true,\n\t\t}).DialContext,\n\t}\n\tproxyServer.Director = func(req *http.Request) {\n\t\ttargetQuery := backendURL.RawQuery\n\t\treq.URL.Scheme = backendURL.Scheme\n\t\treq.URL.Host = backendURL.Host\n\t\tif targetQuery == \"\" || req.URL.RawQuery == \"\" {\n\t\t\treq.URL.RawQuery = targetQuery + req.URL.RawQuery\n\t\t} else {\n\t\t\treq.URL.RawQuery = targetQuery + \"&\" + req.URL.RawQuery\n\t\t}\n\t\treq.URL.Path = c.Param(\"proxyPath\")\n\t\tif req.URL.Path == \"\" {\n\t\t\treq.URL.Path = \"/\"\n\t\t}\n\n\t\ts.logger.WithField(\"url\", backendURL.String()).\n\t\t\tWithField(\"path\", req.URL.Path).\n\t\t\tWithField(\"header\", req.Header).\n\t\t\tWithField(\"raw-query\", req.URL.RawQuery).Debug(\"reverse proxy\")\n\t}\n\n\tvar statusCode int\n\tproxyServer.ModifyResponse = func(resp *http.Response) error {\n\t\tstatusCode = resp.StatusCode\n\t\treturn nil\n\t}\n\n\tproxyServer.ServeHTTP(c.Writer, c.Request)\n\treturn statusCode, nil\n}\n\nfunc getNamespaceAndName(name string) (string, string, error) {\n\tif !strings.Contains(name, \".\") {\n\t\treturn \"\", \"\", fmt.Errorf(\"name is not namespaced\")\n\t}\n\tnamespace := name[strings.LastIndexAny(name, \".\")+1:]\n\tinfName := strings.TrimSuffix(name, \".\"+namespace)\n\n\tif namespace == \"\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"namespace is empty\")\n\t}\n\n\tif infName == \"\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"inference name is empty\")\n\t}\n\treturn namespace, infName, nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_scale.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Scale the inferences.\n// @Description Scale the inferences.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string                    true \"Namespace\"\n// @Param       request   body     types.ScaleServiceRequest true \"query params\"\n// @Success     202       {object} []types.ScaleServiceRequest\n// @Failure     400\n// @Router      /system/scale-inference [post]\nfunc (s *Server) handleInferenceScale(c *gin.Context) error {\n\tvar req types.ScaleServiceRequest\n\tif err := c.ShouldBindJSON(&req); err != nil {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, err, \"inference-scale\")\n\t}\n\n\tnamespace := c.Query(\"namespace\")\n\tif namespace == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"namespace is required\"), \"inference-scale\")\n\t}\n\n\tinf, err := s.runtime.InferenceGet(namespace, req.ServiceName)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"inference-scale\")\n\t}\n\n\tif err := s.runtime.InferenceScale(c.Request.Context(),\n\t\tnamespace, req, inf); err != nil {\n\t\treturn errFromErrDefs(err, \"inference-scale\")\n\t}\n\n\tc.JSON(http.StatusAccepted, req)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_inference_update.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Update the inferences.\n// @Description Update the inferences.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       request   body     types.InferenceDeployment true \"query params\"\n// @Param       namespace query    string                    true \"Namespace\"\n// @Success     202       {object} types.InferenceDeployment\n// @Router      /system/inferences [put]\nfunc (s *Server) handleInferenceUpdate(c *gin.Context) error {\n\tevent := types.DeploymentUpdateEvent\n\tvar req types.InferenceDeployment\n\tif err := c.ShouldBindJSON(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, event)\n\t}\n\n\tnamespace := c.Query(\"namespace\")\n\tif namespace == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest,\n\t\t\terrors.New(\"namespace is required\"), event)\n\t}\n\n\tif err := s.validator.ValidateDeployRequest(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, event)\n\t}\n\n\tif err := s.runtime.InferenceUpdate(c.Request.Context(),\n\t\tnamespace, req, event); err != nil {\n\t\treturn errFromErrDefs(err, event)\n\t}\n\n\tc.JSON(http.StatusAccepted, req)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_info.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/version\"\n)\n\n// @Summary     Get system info.\n// @Description Get system info.\n// @Tags        system\n// @Accept      json\n// @Produce     json\n// @Success     200 {object} types.ProviderInfo\n// @Router      /system/info [get]\nfunc (s *Server) handleInfo(c *gin.Context) error {\n\tv := version.GetVersion()\n\tc.JSON(http.StatusOK, types.ProviderInfo{\n\t\tName:          \"agent\",\n\t\tOrchestration: \"kubernetes\",\n\t\tVersion: &types.VersionInfo{\n\t\t\tVersion:      v.Version,\n\t\t\tBuildDate:    v.BuildDate,\n\t\t\tGitCommit:    v.GitCommit,\n\t\t\tGitTag:       v.GitTag,\n\t\t\tGitTreeState: v.GitTreeState,\n\t\t\tGoVersion:    v.GoVersion,\n\t\t\tCompiler:     v.Compiler,\n\t\t\tPlatform:     v.Platform,\n\t\t},\n\t})\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_mosec_proxy.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/consts\"\n)\n\n// @Summary     Proxy to the backend mosec.\n// @Description Proxy to the backend mosec.\n// @Tags        inference\n// @Accept      */*\n// @Produce     json\n// @Param       id path string true \"Deployment ID\"\n// @Router      /mosec/{id} [get]\n// @Router      /mosec/{id}/metrics [get]\n// @Router      /mosec/{id}/inference [post]\n// @Success     201\nfunc (s *Server) proxyMosec(c *gin.Context) error {\n\tuid, deployment, err := s.proxyAuth(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.Request.URL.Path = path.Join(\n\t\t\"/\", \"inference\", fmt.Sprintf(\"%s.%s\", deployment, consts.DefaultPrefix+uid), c.Param(\"proxyPath\"))\n\treturn s.handleInferenceProxy(c)\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_namespace_create.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Create the namespace.\n// @Description Create the namespace.\n// @Tags        namespace\n// @Accept      json\n// @Produce     json\n// @Param       body body     types.NamespaceRequest true \"Namespace name\"\n// @Success     200  {object} types.NamespaceRequest\n// @Router      /system/namespaces [post]\nfunc (s *Server) handleNamespaceCreate(c *gin.Context) error {\n\tvar req types.NamespaceRequest\n\tif err := c.ShouldBindJSON(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, \"namespace-create\")\n\t}\n\n\tif err := s.runtime.NamespaceCreate(c.Request.Context(), req.Name); err != nil {\n\t\treturn errFromErrDefs(err, \"namespace-create\")\n\t}\n\n\tc.JSON(http.StatusOK, req)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_namespace_delete.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Delete the namespace.\n// @Description Delete the namespace.\n// @Tags        namespace\n// @Accept      json\n// @Produce     json\n// @Param       body body     types.NamespaceRequest true \"Namespace name\"\n// @Success     200  {object} types.NamespaceRequest\n// @Router      /system/namespaces [delete]\nfunc (s *Server) handleNamespaceDelete(c *gin.Context) error {\n\tvar req types.NamespaceRequest\n\tif err := c.ShouldBindJSON(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, \"namespace-delete\")\n\t}\n\n\tif err := s.runtime.NamespaceDelete(c.Request.Context(), req.Name); err != nil {\n\t\treturn errFromErrDefs(err, \"namespace-delete\")\n\t}\n\n\tc.JSON(http.StatusOK, req)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_namespace_delete_test.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/golang/mock/gomock\"\n\t. \"github.com/onsi/ginkgo/v2\"\n\t. \"github.com/onsi/gomega\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/server/validator\"\n)\n\nvar _ = Describe(\"namespace delete\", func() {\n\tBeforeEach(func() {\n\t\tserver = &Server{\n\t\t\trouter:        gin.New(),\n\t\t\tmetricsRouter: gin.New(),\n\t\t\truntime:       mockRuntime,\n\t\t\tvalidator:     validator.New(),\n\t\t}\n\t})\n\tIt(\"invalid request - nil\", func() {\n\t\tc := mkContext(\"GET\", \"/\", nil, nil)\n\t\terr := server.handleNamespaceDelete(c)\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\tIt(\"invalid request - mock error\", func() {\n\t\tmockRuntime.EXPECT().NamespaceDelete(gomock.Any(), gomock.Any()).Times(1).Return(errors.New(\"mock-error\"))\n\t\tc := mkJsonBodyContext(\"GET\", \"/\", nil, types.NamespaceRequest{\n\t\t\tName: \"mock-ns\",\n\t\t})\n\t\terr := server.handleNamespaceDelete(c)\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\tIt(\"good request\", func() {\n\t\tmockRuntime.EXPECT().NamespaceDelete(gomock.Any(), gomock.Any()).Times(1).Return(nil)\n\t\tc := mkJsonBodyContext(\"GET\", \"/\", nil, types.NamespaceRequest{\n\t\t\tName: \"mock-ns\",\n\t\t})\n\t\terr := server.handleNamespaceDelete(c)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n})\n"
  },
  {
    "path": "agent/pkg/server/handler_namespace_list.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n)\n\n// @Summary     List the namespaces.\n// @Description List the namespaces.\n// @Tags        namespace\n// @Accept      json\n// @Produce     json\n// @Success     200 {object} []string\n// @Router      /system/namespaces [get]\nfunc (s *Server) handleNamespaceList(c *gin.Context) error {\n\tns, err := s.runtime.NamespaceList(c.Request.Context())\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"namespace-list\")\n\t}\n\tc.JSON(http.StatusOK, ns)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_other_proxy.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/consts\"\n)\n\n// @Summary     Reverse proxy to the backend other.\n// @Description Reverse proxy to the backend other.\n// @Tags        inference\n// @Accept      */*\n// @Produce     json\n// @Param       id path string true \"Deployment ID\"\n// @Router      /other/{id} [get]\n// @Router      /other/{id} [post]\n// @Success     201\nfunc (s *Server) proxyOther(c *gin.Context) error {\n\tremote, err := url.Parse(fmt.Sprintf(\"http://0.0.0.0:%d\", s.config.Server.ServerPort))\n\tif err != nil {\n\t\treturn err\n\t}\n\tproxy := httputil.NewSingleHostReverseProxy(remote)\n\tproxy.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout:   s.config.ModelZCloud.UpstreamTimeout,\n\t\t\tKeepAlive: s.config.ModelZCloud.UpstreamTimeout,\n\t\t\tDualStack: true,\n\t\t}).DialContext,\n\t\tMaxIdleConns:          s.config.ModelZCloud.MaxIdleConnections,\n\t\tMaxIdleConnsPerHost:   s.config.ModelZCloud.MaxIdleConnectionsPerHost,\n\t\tIdleConnTimeout:       90 * time.Second,\n\t\tTLSHandshakeTimeout:   10 * time.Second,\n\t\tExpectContinueTimeout: 1 * time.Second,\n\t}\n\n\tuid, deployment, err := s.proxyAuth(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproxy.Director = func(req *http.Request) {\n\t\treq.Header = c.Request.Header\n\t\treq.Host = remote.Host\n\t\treq.URL.Scheme = remote.Scheme\n\t\treq.URL.Host = remote.Host\n\t\treq.URL.Path = path.Join(\n\t\t\t\"/\", \"inference\", fmt.Sprintf(\"%s.%s\", deployment, consts.DefaultPrefix+uid), c.Param(\"proxyPath\"))\n\t}\n\n\tproxy.ServeHTTP(c.Writer, c.Request)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_root.go",
    "content": "package server\n\nimport (\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/server/static\"\n)\n\nfunc (s *Server) handleRoot(c *gin.Context) error {\n\tlp, err := static.RenderLoadingPage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.Data(200, \"text/html; charset=utf-8\", lp.Bytes())\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_server_delete.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n)\n\n// @Summary     Delete a node from the cluster.\n// @Description Delete a node.\n// @Tags        namespace\n// @Param       name path string true \"Server Name\"\n// @Accept      json\n// @Produce     json\n// @Success     200\n// @Router      /system/server/{name}/delete [delete]\nfunc (s *Server) handleServerDelete(c *gin.Context) error {\n\tname := c.Param(\"name\")\n\tif name == \"\" {\n\t\treturn NewError(http.StatusBadRequest, errors.New(\"name is required\"), \"server-delete-node\")\n\t}\n\terr := s.runtime.ServerDeleteNode(c.Request.Context(), name)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"server-delete-node\")\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_server_label_create.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     List the servers.\n// @Description List the servers.\n// @Tags        namespace\n// @Param       name    path string           true \"Server Name\"\n// @Param       request body types.ServerSpec true \"query params\"\n// @Accept      json\n// @Produce     json\n// @Success     200 {object} []string\n// @Router      /system/server/{name}/labels [post]\nfunc (s *Server) handleServerLabelCreate(c *gin.Context) error {\n\tname := c.Param(\"name\")\n\tif name == \"\" {\n\t\treturn NewError(http.StatusBadRequest, errors.New(\"name is required\"),\n\t\t\t\"server-label-create\")\n\t}\n\n\tvar req types.ServerSpec\n\tif err := c.ShouldBindJSON(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, \"server-label-create\")\n\t}\n\n\terr := s.runtime.ServerLabelCreate(c.Request.Context(), name, req)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"namespace-list\")\n\t}\n\tc.JSON(http.StatusOK, req)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_server_list.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     List the servers.\n// @Description List the servers.\n// @Tags        namespace\n// @Accept      json\n// @Produce     json\n// @Success     200 {object} []types.Server\n// @Router      /system/servers [get]\nfunc (s *Server) handleServerList(c *gin.Context) error {\n\tns := []types.Server{}\n\tns, err := s.runtime.ServerList(c.Request.Context())\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"namespace-list\")\n\t}\n\tc.JSON(http.StatusOK, ns)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/handler_streamlit_proxy.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\t\"path\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/consts\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/server/static\"\n)\n\n// @Summary     Reverse proxy to streamlit.\n// @Description Reverse proxy to streamlit.\n// @Tags        inference\n// @Accept      */*\n// @Produce     json\n// @Param       id path string true \"Deployment ID\"\n// @Router      /streamlit/{id} [get]\n// @Router      /streamlit/{id} [post]\n// @Success     201\nfunc (s *Server) proxyStreamlit(c *gin.Context) error {\n\tremote, err := url.Parse(fmt.Sprintf(\"http://0.0.0.0:%d\", s.config.Server.ServerPort))\n\tif err != nil {\n\t\treturn err\n\t}\n\tproxy := httputil.NewSingleHostReverseProxy(remote)\n\tproxy.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout:   s.config.ModelZCloud.UpstreamTimeout,\n\t\t\tKeepAlive: s.config.ModelZCloud.UpstreamTimeout,\n\t\t\tDualStack: true,\n\t\t}).DialContext,\n\t\tMaxIdleConns:          s.config.ModelZCloud.MaxIdleConnections,\n\t\tMaxIdleConnsPerHost:   s.config.ModelZCloud.MaxIdleConnectionsPerHost,\n\t\tIdleConnTimeout:       90 * time.Second,\n\t\tTLSHandshakeTimeout:   10 * time.Second,\n\t\tExpectContinueTimeout: 1 * time.Second,\n\t}\n\n\tuid, deployment, err := s.proxyNoAuth(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tns := consts.DefaultPrefix + uid\n\tproxy.Director = func(req *http.Request) {\n\t\treq.Header = c.Request.Header\n\t\treq.Host = remote.Host\n\t\treq.URL.Scheme = remote.Scheme\n\t\treq.URL.Host = remote.Host\n\t\treq.URL.Path = path.Join(\n\t\t\t\"/\", \"inference\", fmt.Sprintf(\"%s.%s\", deployment, ns), c.Param(\"proxyPath\"))\n\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"deployment\": deployment,\n\t\t\t\"uid\":        uid,\n\t\t\t\"ns\":         ns,\n\t\t\t\"path\":       req.URL.Path,\n\t\t\t\"remote\":     remote.String(),\n\t\t}).Debug(\"proxying to streamlit\")\n\t}\n\tproxy.ModifyResponse = func(resp *http.Response) error {\n\t\tif resp.StatusCode == http.StatusSeeOther {\n\t\t\tresp.StatusCode = http.StatusOK\n\t\t\tinstances, err := s.runtime.InferenceInstanceList(ns, deployment)\n\t\t\tif err != nil {\n\t\t\t\treturn NewError(http.StatusInternalServerError, err, \"instance-list\")\n\t\t\t}\n\n\t\t\tbuf, err := static.RenderDeploymentLoadingPage(\"streamlit\", resp.Header.Get(\"X-Call-Id\"),\n\t\t\t\t\"We are currently processing your request.\", deployment, instances)\n\t\t\tif err != nil {\n\t\t\t\treturn NewError(http.StatusInternalServerError, err, \"render-loading-page\")\n\t\t\t}\n\t\t\tresp.Body = io.NopCloser(buf)\n\t\t\tresp.ContentLength = int64(buf.Len())\n\t\t\tresp.Header.Set(\"Content-Length\", strconv.Itoa(buf.Len()))\n\t\t\tresp.Header.Set(\"Content-Type\", \"text/html\")\n\t\t\tresp.StatusCode = http.StatusServiceUnavailable\n\t\t}\n\t\treturn nil\n\t}\n\n\tproxy.ServeHTTP(c.Writer, c.Request)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/middleware_callid.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/google/uuid\"\n)\n\nfunc (s Server) middlewareCallID(c *gin.Context) error {\n\tstart := time.Now()\n\tif len(c.Request.Header.Get(\"X-Call-Id\")) == 0 {\n\t\tcallID := uuid.New().String()\n\t\tc.Request.Header.Add(\"X-Call-Id\", callID)\n\t\tc.Writer.Header().Add(\"X-Call-Id\", callID)\n\t}\n\n\tc.Request.Header.Add(\"X-Start-Time\", fmt.Sprintf(\"%d\", start.UTC().UnixNano()))\n\tc.Writer.Header().Add(\"X-Start-Time\", fmt.Sprintf(\"%d\", start.UTC().UnixNano()))\n\n\tc.Next()\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/proxy_auth.go",
    "content": "package server\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/consts\"\n)\n\nfunc (s *Server) proxyAuth(c *gin.Context) (string, string, error) {\n\tvar uid string\n\tvar valid bool\n\n\tdeployment := c.Param(\"id\")\n\tif len(deployment) == 0 {\n\t\treturn \"\", \"\", errFromErrDefs(\n\t\t\tfmt.Errorf(\"cannot find the deployment name in %s\", c.Request.RequestURI), \"get-deployment\")\n\t}\n\n\tkey := c.GetHeader(\"X-API-Key\")\n\t// Be compatible with the OpenAI API.\n\trawKeyStr := c.GetHeader(\"Authorization\")\n\tlogrus.Debug(\"proxyOther: key: \", key, \", rawKeyStr: \", rawKeyStr)\n\n\tif s.validateUnifiedKey(key) {\n\t\t// uid 0 means to use unified api key\n\t\tuid = \"00000000-0000-0000-0000-000000000000\"\n\t} else if len(key) > 0 {\n\t\tuid, valid = s.validateAPIKey(key)\n\t\tif !valid {\n\t\t\treturn \"\", \"\", errdefs.Unauthorized(fmt.Errorf(\"invalid API key\"))\n\t\t}\n\t} else if len(rawKeyStr) > 0 {\n\t\tstrs := strings.Split(rawKeyStr, \" \")\n\t\tif len(strs) != 2 {\n\t\t\treturn \"\", \"\", errdefs.Unauthorized(fmt.Errorf(\"invalid Authorization API key\"))\n\t\t}\n\n\t\tif strs[0] != \"Bearer\" {\n\t\t\treturn \"\", \"\", errdefs.Unauthorized(fmt.Errorf(\"invalid Authorization API key\"))\n\t\t}\n\n\t\tuid, valid = s.validateAPIKey(strs[1])\n\t\tif !valid {\n\t\t\treturn \"\", \"\", errdefs.Unauthorized(fmt.Errorf(\"invalid Authorization API key\"))\n\t\t}\n\t}\n\n\tif len(uid) == 0 {\n\t\treturn \"\", \"\", errdefs.Unauthorized(fmt.Errorf(\"invalid API key\"))\n\t}\n\treturn uid, deployment, nil\n}\n\nfunc (s *Server) proxyNoAuth(c *gin.Context) (string, string, error) {\n\tdeployment := c.Param(\"id\")\n\tif len(deployment) == 0 {\n\t\treturn \"\", \"\", errdefs.InvalidParameter(\n\t\t\tfmt.Errorf(\"cannot find the deployment name in %s\", c.Request.RequestURI))\n\t}\n\n\tuid, found := s.getUIDFromDeploymentID(c.Request.Context(), deployment)\n\tif !found {\n\t\treturn \"\", \"\", errdefs.InvalidParameter(\n\t\t\tfmt.Errorf(\"cannot find the user id from the deployment id\"))\n\t}\n\treturn uid, deployment, nil\n}\n\nfunc (s *Server) validateAPIKey(key string) (string, bool) {\n\tif !strings.HasPrefix(key, consts.APIKEY_PREFIX) {\n\t\treturn \"\", false\n\t}\n\n\tapikeys := s.config.ModelZCloud.APIKeys\n\tuid, exit := apikeys[key]\n\tif exit {\n\t\treturn uid, true\n\t}\n\n\tapiServerReady := make(chan struct{})\n\tgo func() {\n\t\tif err := s.modelzCloudClient.WaitForAPIServerReady(); err != nil {\n\t\t\tlogrus.Fatalf(\"failed to wait for apiserver ready: %v\", err)\n\t\t}\n\t\tclose(apiServerReady)\n\t}()\n\t// Get from apiserver\n\tapikeys, err := s.modelzCloudClient.GetAPIKeys(context.Background(), apiServerReady, s.config.ModelZCloud.AgentToken, s.config.ModelZCloud.ID)\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed to get apikeys: %v\", err)\n\t\treturn \"\", false\n\t}\n\tuid, exit = apikeys[key]\n\tif exit {\n\t\treturn uid, true\n\t}\n\n\treturn \"\", false\n}\n\nfunc (s *Server) validateUnifiedKey(key string) bool {\n\tif !strings.HasPrefix(key, consts.APIKEY_PREFIX) {\n\t\treturn false\n\t}\n\tif len(s.config.ModelZCloud.UnifiedAPIKey) != 0 && s.config.ModelZCloud.UnifiedAPIKey == key {\n\t\treturn true\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "agent/pkg/server/server_factory.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/dgraph-io/ristretto\"\n\t\"github.com/gin-contrib/cors\"\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/pkg/errors\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/client\"\n\tginlogrus \"github.com/toorop/gin-logrus\"\n\n\t\"github.com/tensorchord/openmodelz/agent/pkg/config\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/event\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/k8s\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/log\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/metrics\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/prom\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/runtime\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/scaling\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/server/validator\"\n)\n\ntype Server struct {\n\trouter        *gin.Engine\n\tmetricsRouter *gin.Engine\n\tlogger        *logrus.Entry\n\tvalidator     *validator.Validator\n\n\truntime runtime.Runtime\n\n\t// endpointResolver resolves the requests from the client to the\n\t// corresponding inference kubernetes service.\n\tendpointResolver       k8s.Resolver\n\tbuildLogRequester      log.Requester\n\tdeploymentLogRequester log.Requester\n\n\t// prometheusClient is the client to query the prometheus server.\n\t// It is used in inference list.\n\tprometheusClient prom.PrometheusQuery\n\tmetricsOptions   metrics.MetricOptions\n\n\t// scaler scales the inference from 0 to 1.\n\tscaler *scaling.InferenceScaler\n\n\tconfig config.Config\n\n\teventRecorder event.Interface\n\n\tmodelzCloudClient *client.Client\n\n\tcache ristretto.Cache\n}\n\nfunc New(c config.Config) (Server, error) {\n\trouter := gin.New()\n\trouter.Use(ginlogrus.Logger(logrus.StandardLogger(), \"/healthz\"))\n\trouter.Use(gin.Recovery())\n\n\t// metrics server\n\tmetricsRouter := gin.New()\n\tmetricsRouter.Use(gin.Recovery())\n\n\tif gin.Mode() == gin.DebugMode {\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t\tlogrus.Debug(\"Allow CORS\")\n\t\trouter.Use(cors.New(cors.Config{\n\t\t\tAllowOrigins: []string{\"*\"},\n\t\t\tAllowMethods: []string{\"GET\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\"},\n\t\t\tAllowHeaders: []string{\"*\"},\n\t\t}))\n\t}\n\n\tpromCli := prom.NewPrometheusQuery(c.Metrics.PrometheusHost, c.Metrics.PrometheusPort, http.DefaultClient)\n\n\tlogger := logrus.WithField(\"component\", \"server\")\n\n\ts := Server{\n\t\trouter:           router,\n\t\tmetricsRouter:    metricsRouter,\n\t\tconfig:           c,\n\t\tlogger:           logger,\n\t\tvalidator:        validator.New(),\n\t\tprometheusClient: promCli,\n\t}\n\n\tcache, err := ristretto.NewCache(&ristretto.Config{\n\t\tNumCounters: 1e7,\n\t\tMaxCost:     1 << 28,\n\t\tBufferItems: 64,\n\t})\n\tif err != nil {\n\t\treturn s, err\n\t}\n\ts.cache = *cache\n\n\tif s.config.ModelZCloud.EventEnabled {\n\t\tlogrus.Info(\"Event recording is enabled\")\n\t\tcli, err := client.NewClientWithOpts(\n\t\t\tclient.WithHost(s.config.ModelZCloud.URL))\n\t\tif err != nil {\n\t\t\treturn s, errors.Wrap(err, \"failed to create modelz cloud client\")\n\t\t}\n\t\ts.eventRecorder = event.NewEventRecorder(cli, s.config.ModelZCloud.AgentToken)\n\t} else {\n\t\ts.eventRecorder = event.NewFake()\n\t}\n\n\ts.registerRoutes()\n\ts.registerMetricsRoutes()\n\tif err := s.initKubernetesResources(); err != nil {\n\t\treturn s, err\n\t}\n\n\tif c.ModelZCloud.Enabled {\n\t\terr := s.initModelZCloud(c.ModelZCloud.URL, c.ModelZCloud.AgentToken, c.ModelZCloud.Region)\n\t\tif err != nil {\n\t\t\treturn s, err\n\t\t}\n\t}\n\tif err := s.initMetrics(); err != nil {\n\t\treturn s, err\n\t}\n\ts.initLogs()\n\treturn s, nil\n}\n"
  },
  {
    "path": "agent/pkg/server/server_handlerfunc.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype HandlerFunc func(c *gin.Context) error\n\nfunc WrapHandler(handler HandlerFunc) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\terr := handler(c)\n\t\tif err != nil {\n\t\t\tvar serverErr *Error\n\t\t\tif !errors.As(err, &serverErr) {\n\t\t\t\tserverErr = &Error{\n\t\t\t\t\tHTTPStatusCode: http.StatusInternalServerError,\n\t\t\t\t\tErr:            err,\n\t\t\t\t\tMessage:        err.Error(),\n\t\t\t\t}\n\t\t\t}\n\t\t\tserverErr.Request = c.Request.Method + \" \" + c.Request.URL.String()\n\n\t\t\tif gin.Mode() == \"debug\" {\n\t\t\t\tlogrus.Debugf(\"error: %+v\", err)\n\t\t\t} else {\n\t\t\t\t// Remove detailed info when in the release mode\n\t\t\t\tserverErr.Op = \"\"\n\t\t\t\tserverErr.Err = nil\n\t\t\t}\n\n\t\t\tc.JSON(serverErr.HTTPStatusCode, serverErr)\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "agent/pkg/server/server_init_kubernetes.go",
    "content": "package server\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\tv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tkubeinformers \"k8s.io/client-go/informers\"\n\tkubeinformersv1 \"k8s.io/client-go/informers/core/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/tools/cache\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n\n\tkubefledged \"github.com/senthilrch/kube-fledged/pkg/client/clientset/versioned\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/event\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/k8s\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/log\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/runtime\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/scaling\"\n\tingressclient \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned\"\n\tclientset \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned\"\n\tinformers \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/informers/externalversions\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/signals\"\n)\n\nfunc (s *Server) initKubernetesResources() error {\n\tclientCmdConfig, err := clientcmd.BuildConfigFromFlags(\n\t\ts.config.KubeConfig.MasterURL, s.config.KubeConfig.Kubeconfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclientCmdConfig.QPS = float32(s.config.KubeConfig.QPS)\n\tclientCmdConfig.Burst = s.config.KubeConfig.Burst\n\n\tkubeClient, err := kubernetes.NewForConfig(clientCmdConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinferenceClient, err := clientset.NewForConfig(clientCmdConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar ingressClient ingressclient.Interface\n\tif s.config.Ingress.IngressEnabled {\n\t\tingressClient, err = ingressclient.NewForConfig(clientCmdConfig)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tkubefledgedClient, err := kubefledged.NewForConfig(clientCmdConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(\n\t\tkubeClient, s.config.KubeConfig.ResyncPeriod)\n\n\tinferenceInformerFactory := informers.NewSharedInformerFactoryWithOptions(\n\t\tinferenceClient, s.config.KubeConfig.ResyncPeriod)\n\n\t// set up signals so we handle the first shutdown signal gracefully\n\tstopCh := signals.SetupSignalHandler()\n\n\tinferences := inferenceInformerFactory.Tensorchord().V2alpha1().Inferences()\n\tgo inferences.Informer().Run(stopCh)\n\tif ok := cache.WaitForNamedCacheSync(\n\t\tfmt.Sprintf(\"%s:inferences\", consts.ProviderName),\n\t\tstopCh, inferences.Informer().HasSynced); !ok {\n\t\ts.logger.Errorf(\"failed to wait for cache to sync\")\n\t}\n\n\tdeployments := kubeInformerFactory.Apps().V1().Deployments()\n\tgo deployments.Informer().Run(stopCh)\n\tif ok := cache.WaitForNamedCacheSync(\n\t\tfmt.Sprintf(\"%s:deployments\", consts.ProviderName),\n\t\tstopCh, deployments.Informer().HasSynced); !ok {\n\t\ts.logger.Errorf(\"failed to wait for cache to sync\")\n\t}\n\n\tpods := kubeInformerFactory.Core().V1().Pods()\n\ts.podStartWatch(pods, kubeClient)\n\tgo pods.Informer().Run(stopCh)\n\tif ok := cache.WaitForNamedCacheSync(\n\t\tfmt.Sprintf(\"%s:pods\", consts.ProviderName),\n\t\tstopCh, pods.Informer().HasSynced); !ok {\n\t\ts.logger.Errorf(\"failed to wait for cache to sync\")\n\t}\n\n\tendpoints := kubeInformerFactory.Core().V1().Endpoints()\n\tgo endpoints.Informer().Run(stopCh)\n\tif ok := cache.WaitForNamedCacheSync(\n\t\tfmt.Sprintf(\"%s:endpoints\", consts.ProviderName),\n\t\tstopCh, endpoints.Informer().HasSynced); !ok {\n\t\ts.logger.Errorf(\"failed to wait for cache to sync\")\n\t}\n\n\truntime, err := runtime.New(clientCmdConfig,\n\t\tendpoints, deployments, inferences, pods,\n\t\tkubeClient, ingressClient, kubefledgedClient, inferenceClient,\n\t\ts.eventRecorder,\n\t\ts.config.Ingress.IngressEnabled, s.config.ModelZCloud.EventEnabled,\n\t\ts.config.Build.BuildEnabled, s.config.Ingress.AnyIPToDomain,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.runtime = runtime\n\tif s.config.Server.Dev {\n\t\tlogrus.Warn(\"running in dev mode, using port forwarding to access pods, please do not use dev mode in production\")\n\t\ts.endpointResolver = k8s.NewPortForwardingResolver(clientCmdConfig, kubeClient)\n\t} else {\n\t\ts.endpointResolver = k8s.NewEndpointResolver(endpoints.Lister())\n\t}\n\ts.deploymentLogRequester = log.NewK8sAPIRequestor(kubeClient)\n\ts.scaler, err = scaling.NewInferenceScaler(runtime, s.config.Inference.CacheTTL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif s.scaler == nil {\n\t\treturn fmt.Errorf(\"scaler is nil\")\n\t}\n\treturn nil\n}\n\n// podStartWatch log event when pod start began and finished\nfunc (s *Server) podStartWatch(pods kubeinformersv1.PodInformer, client *kubernetes.Clientset) {\n\tpods.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tnew := obj.(*v1.Pod)\n\t\t\tcontrolPlane, exist := new.Annotations[consts.AnnotationControlPlaneKey]\n\t\t\t// for inference created by modelz apiserver\n\t\t\tif !exist || controlPlane != consts.ModelzAnnotationValue {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpodWatchEventLog(s.eventRecorder, new, types.PodCreateEvent)\n\t\t\tstart := time.Now()\n\n\t\t\t// Ticker will keep watching until pod start or timeout\n\t\t\tticker := time.NewTicker(time.Second * 2)\n\t\t\ttimeout := time.After(5 * time.Minute)\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-timeout:\n\t\t\t\t\t\tpodWatchEventLog(s.eventRecorder, new, types.PodTimeoutEvent)\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase <-ticker.C:\n\t\t\t\t\t\tpod, err := client.CoreV1().Pods(new.Namespace).Get(context.TODO(), new.Name, metav1.GetOptions{})\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\t\t\t\"namespace\":  pod.Namespace,\n\t\t\t\t\t\t\t\t\"deployment\": pod.Labels[\"app\"],\n\t\t\t\t\t\t\t\t\"name\":       pod.Name,\n\t\t\t\t\t\t\t}).Errorf(\"failed to get pod: %s\", err)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor _, c := range pod.Status.Conditions {\n\t\t\t\t\t\t\tif c.Type == v1.PodReady && c.Status == v1.ConditionTrue {\n\t\t\t\t\t\t\t\tpodWatchEventLog(s.eventRecorder, pod, types.PodReadyEvent)\n\t\t\t\t\t\t\t\tlabel := prometheus.Labels{\n\t\t\t\t\t\t\t\t\t\"inference_name\": fmt.Sprintf(\"%s.%s\", pod.Labels[\"app\"], pod.Namespace),\n\t\t\t\t\t\t\t\t\t\"source_image\":   pod.Annotations[consts.AnnotationDockerImage]}\n\t\t\t\t\t\t\t\ts.metricsOptions.PodStartHistogram.With(label).\n\t\t\t\t\t\t\t\t\tObserve(time.Since(start).Seconds())\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t},\n\t})\n}\n\n// log status for pod watch status transfer\nfunc podWatchEventLog(recorder event.Interface, obj *v1.Pod, event string) {\n\tdeployment := obj.Labels[\"app\"]\n\terr := recorder.CreateDeploymentEvent(obj.Namespace, deployment, event, obj.Name)\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"namespace\":  obj.Namespace,\n\t\t\t\"deployment\": deployment,\n\t\t\t\"name\":       obj.Name,\n\t\t\t\"event\":      event,\n\t\t}).Errorf(\"failed to create deployment event: %s\", err)\n\t}\n}\n"
  },
  {
    "path": "agent/pkg/server/server_init_logs.go",
    "content": "package server\n\nimport (\n\t\"github.com/tensorchord/openmodelz/agent/pkg/log\"\n)\n\nfunc (s *Server) initLogs() {\n\tif len(s.config.Logs.LokiURL) > 0 {\n\t\ts.logger.Info(\"enable Loki logs requester\")\n\t\ts.buildLogRequester = log.NewLokiAPIRequestor(\n\t\t\ts.config.Logs.LokiURL, s.config.Logs.LokiUser, s.config.Logs.LokiToken)\n\t}\n\n}\n"
  },
  {
    "path": "agent/pkg/server/server_init_metrics.go",
    "content": "package server\n\nimport (\n\t\"context\"\n\n\t\"github.com/tensorchord/openmodelz/agent/pkg/metrics\"\n)\n\nfunc (s *Server) initMetrics() error {\n\tmetricsOptions := metrics.BuildMetricsOptions()\n\ts.metricsOptions = metricsOptions\n\texporter := metrics.NewExporter(metricsOptions, s.runtime)\n\tmetrics.RegisterExporter(exporter)\n\texporter.StartServiceWatcher(context.TODO(), s.config.Metrics.PollingInterval)\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/server_init_modelz_cloud.go",
    "content": "package server\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/pkg/errors\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/client\"\n)\n\nfunc (s *Server) initModelZCloud(url, token, region string) error {\n\tcluster := types.ManagedCluster{\n\t\tRegion: region,\n\t\tPrometheusURL: fmt.Sprintf(\"%s:%d\", s.config.Metrics.PrometheusHost,\n\t\t\ts.config.Metrics.PrometheusPort),\n\t}\n\n\tcli, err := client.NewClientWithOpts(\n\t\tclient.WithHost(url))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create modelz cloud client\")\n\t}\n\ts.modelzCloudClient = cli\n\n\terr = s.runtime.GetClusterInfo(&cluster)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get managed cluster info\")\n\t}\n\n\tapiServerReady := make(chan struct{})\n\tgo func() {\n\t\tif err := s.modelzCloudClient.WaitForAPIServerReady(); err != nil {\n\t\t\tlogrus.Fatalf(\"failed to wait for apiserver ready: %v\", err)\n\t\t}\n\t\tclose(apiServerReady)\n\t}()\n\n\tcluster.Status = types.ClusterStatusInit\n\t// after init modelz cloud client, register agent\n\terr = cli.RegisterAgent(context.Background(), token, &cluster)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to register agent to modelz cloud\")\n\t}\n\ts.config.ModelZCloud.ID = cluster.ID\n\ts.config.ModelZCloud.TokenID = cluster.TokenID\n\ts.config.ModelZCloud.Name = cluster.Name\n\n\tapikeys, err := s.modelzCloudClient.GetAPIKeys(context.Background(), apiServerReady, s.config.ModelZCloud.AgentToken, s.config.ModelZCloud.ID)\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed to get apikeys: %v\", err)\n\t}\n\n\ts.config.ModelZCloud.APIKeys = apikeys\n\n\tnamespaces, err := s.modelzCloudClient.GetNamespaces(context.Background(), apiServerReady, s.config.ModelZCloud.AgentToken, s.config.ModelZCloud.ID)\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed to get namespaces: %v\", err)\n\t}\n\n\tnss := []string{}\n\tfor _, ns := range namespaces.Items {\n\t\tnss = append(nss, ns)\n\t\terr = s.runtime.NamespaceCreate(context.Background(), ns)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"failed to create namespace %s: %v\", ns, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\ts.config.ModelZCloud.UserNamespaces = nss\n\n\treturn nil\n}\n"
  },
  {
    "path": "agent/pkg/server/server_init_route.go",
    "content": "package server\n\nimport (\n\t\"github.com/gin-gonic/gin\"\n\tswaggerfiles \"github.com/swaggo/files\"\n\tginSwagger \"github.com/swaggo/gin-swagger\"\n\n\t_ \"github.com/tensorchord/openmodelz/agent/pkg/docs\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/metrics\"\n)\n\nconst (\n\tendpointInferencePlural = \"/inferences\"\n\tendpointInference       = \"/inference\"\n\tendpointServerPlural    = \"/servers\"\n\tendpointServer          = \"/server\"\n\tendpointScaleInference  = \"/scale-inference\"\n\tendpointInfo            = \"/info\"\n\tendpointLogPlural       = \"/logs\"\n\tendpointNamespacePlural = \"/namespaces\"\n\tendpointHealthz         = \"/healthz\"\n\tendpointBuild           = \"/build\"\n\tendpointImageCache      = \"/image-cache\"\n)\n\nfunc (s *Server) registerRoutes() {\n\troot := s.router.Group(\"/\")\n\tv1 := s.router.Group(\"/api/v1\")\n\n\t// swagger\n\troot.GET(\"/swagger/*any\", ginSwagger.WrapHandler(swaggerfiles.Handler))\n\t// dataplane\n\troot.Any(\"/inference/:name\",\n\t\tWrapHandler(s.middlewareCallID),\n\t\tWrapHandler(s.handleInferenceProxy))\n\troot.Any(\"/inference/:name/*proxyPath\",\n\t\tWrapHandler(s.middlewareCallID),\n\t\tWrapHandler(s.handleInferenceProxy))\n\n\tv1.Any(\"/mosec/:id/*proxyPath\", WrapHandler(s.proxyMosec))\n\tv1.Any(\"/gradio/:id/*proxyPath\", WrapHandler(s.proxyGradio))\n\tv1.Any(\"/streamlit/:id/*proxyPath\", WrapHandler(s.proxyStreamlit))\n\tv1.Any(\"/other/:id/*proxyPath\", WrapHandler(s.proxyOther))\n\n\t// healthz\n\troot.GET(endpointHealthz, WrapHandler(s.handleHealthz))\n\n\t// landing page\n\troot.GET(\"/\", WrapHandler(s.handleRoot))\n\n\t// control plane\n\tcontrolPlane := root.Group(\"/system\")\n\t// inferences\n\tcontrolPlane.GET(endpointInferencePlural,\n\t\tWrapHandler(s.handleInferenceList))\n\tcontrolPlane.POST(endpointInferencePlural,\n\t\tWrapHandler(s.handleInferenceCreate))\n\tcontrolPlane.PUT(endpointInferencePlural,\n\t\tWrapHandler(s.handleInferenceUpdate))\n\tcontrolPlane.DELETE(endpointInferencePlural,\n\t\tWrapHandler(s.handleInferenceDelete))\n\tcontrolPlane.POST(endpointScaleInference,\n\t\tWrapHandler(s.handleInferenceScale))\n\tcontrolPlane.GET(endpointInference+\"/:name\",\n\t\tWrapHandler(s.handleInferenceGet))\n\n\t// instances\n\tcontrolPlane.GET(endpointInference+\"/:name/instances\",\n\t\tWrapHandler(s.handleInferenceInstance))\n\tcontrolPlane.GET(endpointInference+\"/:name/instance/:instance/exec\",\n\t\tWrapHandler(s.handleInferenceInstanceExec))\n\n\t// info\n\tcontrolPlane.GET(endpointInfo, WrapHandler(s.handleInfo))\n\n\t// servers\n\tcontrolPlane.GET(endpointServerPlural, WrapHandler(s.handleServerList))\n\tcontrolPlane.POST(endpointServer+\"/:name/labels\", WrapHandler(s.handleServerLabelCreate))\n\tcontrolPlane.DELETE(endpointServer+\"/:name/delete\", WrapHandler(s.handleServerDelete))\n\n\t// logs\n\tcontrolPlane.GET(endpointLogPlural+endpointInference,\n\t\tWrapHandler(s.handleInferenceLogs))\n\tcontrolPlane.GET(endpointLogPlural+endpointBuild, WrapHandler(s.handleBuildLogs))\n\n\t// namespaces\n\tcontrolPlane.GET(endpointNamespacePlural,\n\t\tWrapHandler(s.handleNamespaceList))\n\tcontrolPlane.POST(endpointNamespacePlural,\n\t\tWrapHandler(s.handleNamespaceCreate))\n\tcontrolPlane.DELETE(endpointNamespacePlural,\n\t\tWrapHandler(s.handleNamespaceDelete))\n\n\t// TODO(gaocegege): Support secrets\n\t// controlPlane.GET(\"/secrets\")\n\n\t// builds\n\tif s.config.Build.BuildEnabled {\n\t\tcontrolPlane.GET(endpointBuild, WrapHandler(s.handleBuildList))\n\t\tcontrolPlane.GET(endpointBuild+\"/:name\", WrapHandler(s.handleBuildGet))\n\t\tcontrolPlane.POST(endpointBuild, WrapHandler(s.handleBuildCreate))\n\t}\n\t// TODO(gaocegege): Support metrics\n\t// metrics\n\n\t// image cache\n\tcontrolPlane.POST(endpointImageCache, WrapHandler(s.handleImageCacheCreate))\n}\n\n// registerMetricsRoutes registers the metrics routes.\nfunc (s *Server) registerMetricsRoutes() {\n\ts.metricsRouter.GET(\"/metrics\", gin.WrapH(metrics.PrometheusHandler()))\n\ts.metricsRouter.GET(endpointHealthz, WrapHandler(s.handleHealthz))\n}\n"
  },
  {
    "path": "agent/pkg/server/server_run.go",
    "content": "package server\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"k8s.io/apimachinery/pkg/util/wait\"\n)\n\nfunc (s *Server) Run() error {\n\tsrv := &http.Server{\n\t\tAddr:         fmt.Sprintf(\":%d\", s.config.Server.ServerPort),\n\t\tHandler:      s.router,\n\t\tWriteTimeout: s.config.Server.WriteTimeout,\n\t\tReadTimeout:  s.config.Server.ReadTimeout,\n\t}\n\n\tgo func() {\n\t\tif err := srv.ListenAndServe(); err != nil &&\n\t\t\t!errors.Is(err, http.ErrServerClosed) {\n\t\t\tlogrus.Errorf(\"listen on port %d error: %v\", s.config.Server.ServerPort, err)\n\t\t}\n\t}()\n\n\tmetricsSrv := &http.Server{\n\t\tAddr:         fmt.Sprintf(\":%d\", s.config.Metrics.ServerPort),\n\t\tHandler:      s.metricsRouter,\n\t\tReadTimeout:  s.config.Metrics.PollingInterval,\n\t\tWriteTimeout: s.config.Metrics.PollingInterval,\n\t}\n\tgo func() {\n\t\tif err := metricsSrv.ListenAndServe(); err != nil &&\n\t\t\t!errors.Is(err, http.ErrServerClosed) {\n\t\t\tlogrus.Errorf(\"listen on port %d error: %v\",\n\t\t\t\ts.config.Metrics.ServerPort, err)\n\t\t}\n\t}()\n\n\tlogrus.WithField(\"port\", s.config.Server.ServerPort).\n\t\tInfo(\"server is running...\")\n\tlogrus.WithField(\"metrics-port\", s.config.Metrics.ServerPort).\n\t\tInfo(\"metrics server is running...\")\n\n\tif s.config.ModelZCloud.Enabled {\n\t\t// check apiserver is ready\n\t\tapiServerReady := make(chan struct{})\n\t\tgo func() {\n\t\t\tif err := s.modelzCloudClient.WaitForAPIServerReady(); err != nil {\n\t\t\t\tlogrus.Fatalf(\"failed to wait for apiserver ready: %v\", err)\n\t\t\t}\n\t\t\tclose(apiServerReady)\n\t\t}()\n\t\t// websocket\n\t\t// build websocket\n\t\tgo s.connect(apiServerReady)\n\n\t\t// heartbeat with apiserver\n\t\tgo wait.UntilWithContext(context.Background(), func(ctx context.Context) {\n\t\t\tcluster := types.ManagedCluster{\n\t\t\t\tName:      s.config.ModelZCloud.Name,\n\t\t\t\tID:        s.config.ModelZCloud.ID,\n\t\t\t\tStatus:    types.ClusterStatusActive,\n\t\t\t\tUpdatedAt: time.Now().UTC(),\n\t\t\t\tTokenID:   s.config.ModelZCloud.TokenID,\n\t\t\t\tRegion:    s.config.ModelZCloud.Region,\n\t\t\t\tPrometheusURL: fmt.Sprintf(\"%s:%d\", s.config.Metrics.PrometheusHost,\n\t\t\t\t\ts.config.Metrics.PrometheusPort),\n\t\t\t}\n\t\t\terr := s.runtime.GetClusterInfo(&cluster)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to get managed cluster info: %v\", err)\n\t\t\t}\n\n\t\t\terr = s.modelzCloudClient.UpdateAgentStatus(ctx, apiServerReady, s.config.ModelZCloud.AgentToken, cluster)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to update agent status: %v\", err)\n\t\t\t}\n\t\t\tlogrus.Debugf(\"update agent status: %v\", cluster)\n\t\t}, s.config.ModelZCloud.HeartbeatInterval)\n\n\t\tgo wait.UntilWithContext(context.Background(), func(ctx context.Context) {\n\t\t\tapikeys, err := s.modelzCloudClient.GetAPIKeys(ctx, apiServerReady, s.config.ModelZCloud.AgentToken, s.config.ModelZCloud.ID)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to get apikeys: %v\", err)\n\t\t\t}\n\n\t\t\ts.config.ModelZCloud.APIKeys = apikeys\n\t\t\tlogrus.Debugf(\"update apikeys\")\n\t\t}, s.config.ModelZCloud.HeartbeatInterval) // default 1min update, TODO(xieydd) make it configurable\n\n\t\tgo wait.UntilWithContext(context.Background(), func(ctx context.Context) {\n\t\t\tnamespaces, err := s.modelzCloudClient.GetNamespaces(ctx, apiServerReady, s.config.ModelZCloud.AgentToken, s.config.ModelZCloud.ID)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to get namespaces: %v\", err)\n\t\t\t}\n\n\t\t\tfor _, ns := range namespaces.Items {\n\t\t\t\tif ContainString(ns, s.config.ModelZCloud.UserNamespaces) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\terr = s.runtime.NamespaceCreate(ctx, ns)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Errorf(\"failed to create namespace %s: %v\", ns, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ts.config.ModelZCloud.UserNamespaces = append(s.config.ModelZCloud.UserNamespaces, ns)\n\t\t\t\tlogrus.Debugf(\"update namespaces\")\n\t\t\t}\n\t\t}, s.config.ModelZCloud.HeartbeatInterval) // default 1h update, make it configurable\n\t}\n\n\tquit := make(chan os.Signal, 1)\n\tsignal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)\n\t<-quit\n\tlogrus.Info(\"shutdown server\")\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\treturn srv.Shutdown(ctx)\n}\n\nfunc ContainString(target string, strs []string) bool {\n\tfor _, str := range strs {\n\t\tif str == target {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "agent/pkg/server/server_websocket.go",
    "content": "package server\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"github.com/rancher/remotedialer\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nfunc (s *Server) connect(apiServerReady <-chan struct{}) {\n\t<-apiServerReady\n\tvar clusterDialEndpoint string\n\theaders := http.Header{\n\t\t\"X-Cluster-ID\": {s.config.ModelZCloud.ID},\n\t\t\"Agent-Token\":  {s.config.ModelZCloud.AgentToken},\n\t}\n\tu, err := url.Parse(s.config.ModelZCloud.URL)\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed to parse url: %v\", err)\n\t}\n\n\tswitch u.Scheme {\n\tcase \"http\":\n\t\tclusterDialEndpoint = \"ws://\" + u.Host + types.DailEndPointSuffix\n\tcase \"https\":\n\t\tclusterDialEndpoint = \"wss://\" + u.Host + types.DailEndPointSuffix\n\t}\n\n\tctx := context.Background()\n\tgo func() {\n\t\tfor {\n\t\t\tremotedialer.ClientConnect(ctx, clusterDialEndpoint, headers, nil, func(proto, address string) bool { return true }, nil)\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-time.After(s.config.ModelZCloud.HeartbeatInterval):\n\t\t\t\t// retry connect after interval\n\t\t\t}\n\t\t}\n\t}()\n\n\t// retry(\n\t// \ts.config.ModelZCloud.HeartbeatInterval,\n\t// \tfunc() error {\n\t// \t\tlogrus.Debugf(\"run websocket server\")\n\t// \t\tctx := context.Background()\n\t// \t\terr := remotedialer.ClientConnect(ctx, clusterDialEndpoint, headers, nil,\n\t// \t\t\tfunc(proto, address string) bool { return true }, nil)\n\t// \t\tif err != nil {\n\t// \t\t\tlogrus.Errorf(\"failed to connect to apiserver: %v\", err)\n\t// \t\t\treturn err\n\t// \t\t}\n\t// \t\treturn nil\n\t// \t},\n\t// )\n\n}\n\nfunc retry(sleep time.Duration, f func() error) {\n\ti := 1\n\tfor {\n\t\terr := f()\n\t\tif err == nil {\n\t\t\treturn\n\t\t} else {\n\t\t\tlogrus.Errorf(\"retry %d times, still failed\", i)\n\t\t\ttime.Sleep(sleep)\n\t\t\ti++\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "agent/pkg/server/static/index.html",
    "content": "<html lang=\"en\">\n\n<head>\n    <meta http-equiv=\"refresh\" content=\"10\">\n    <meta charset=\"utf-8\">\n    <meta name=\"viewport\" content=\"width=device-width,initial-scale=1\">\n    <meta name=\"theme-color\" content=\"#000\">\n    <title>OpenModelZ Serving | Running</title>\n    <style>\n        .tooltip {\n            position: relative;\n            display: inline-block;\n            border-bottom: 1px dotted black;\n        }\n\n        .tooltip .tooltiptext {\n            visibility: hidden;\n            width: 120px;\n            background-color: black;\n            color: #fff;\n            text-align: center;\n            border-radius: 6px;\n            padding: 5px 0;\n\n            /* Position the tooltip */\n            position: absolute;\n            z-index: 1;\n            bottom: 100%;\n            left: 50%;\n            margin-left: -60px;\n        }\n\n        .tooltip:hover .tooltiptext {\n            visibility: visible;\n        }\n\n        html {\n            font-size: 62.5%;\n            box-sizing: border-box;\n            height: -webkit-fill-available\n        }\n\n        *,\n        ::after,\n        ::before {\n            box-sizing: inherit\n        }\n\n        body {\n            font-family: sf pro text, sf pro icons, helvetica neue, helvetica, arial, sans-serif;\n            font-size: 1.6rem;\n            line-height: 1.65;\n            word-break: break-word;\n            font-kerning: auto;\n            font-variant: normal;\n            -webkit-font-smoothing: antialiased;\n            -moz-osx-font-smoothing: grayscale;\n            text-rendering: optimizeLegibility;\n            hyphens: auto;\n            height: 100vh;\n            height: -webkit-fill-available;\n            max-height: 100vh;\n            max-height: -webkit-fill-available;\n            margin: 0\n        }\n\n        ::selection {\n            background: #dbe6d2\n        }\n\n        ::-moz-selection {\n            background: #dbe6d2\n        }\n\n        a {\n            cursor: pointer;\n            color: #5e785f;\n            text-decoration: none;\n            transition: all .2s ease;\n            border-bottom: 1px solid #0000\n        }\n\n        a:hover {\n            border-bottom: 1px solid #5e785f\n        }\n\n        ul {\n            padding: 0;\n            margin-left: 1.5em;\n            list-style-type: none\n        }\n\n        li {\n            margin-bottom: 10px\n        }\n\n        ul li:before {\n            content: '\\02013'\n        }\n\n        li:before {\n            display: inline-block;\n            color: #ccc;\n            position: absolute;\n            margin-left: -18px;\n            transition: color .2s ease\n        }\n\n        code {\n            font-family: Menlo, Monaco, Lucida Console, Liberation Mono, DejaVu Sans Mono, Bitstream Vera Sans Mono, Courier New, monospace, serif;\n            font-size: .92em\n        }\n\n        code:after,\n        code:before {\n            content: '`'\n        }\n\n        .container {\n            display: flex;\n            justify-content: center;\n            flex-direction: column;\n            min-height: 100%\n        }\n\n        main {\n            max-width: 80rem;\n            padding: 4rem 6rem;\n            margin: auto\n        }\n\n        ul {\n            margin-bottom: 32px\n        }\n\n        .error-title {\n            font-size: 2rem;\n            padding-left: 22px;\n            line-height: 1.5;\n            margin-bottom: 24px\n        }\n\n        .error-title-guilty {\n            border-left: 2px solid #ed367f\n        }\n\n        .error-title-innocent {\n            border-left: 2px solid #59b89c\n        }\n\n        main p {\n            color: #333\n        }\n\n        .devinfo-container {\n            border: 1px solid #ddd;\n            border-radius: 4px;\n            padding: 2rem;\n            display: flex;\n            flex-direction: column;\n            margin-bottom: 32px\n        }\n\n        .error-code {\n            margin: 0;\n            font-size: 1.6rem;\n            color: #000;\n            margin-bottom: 1.6rem\n        }\n\n        .devinfo-line {\n            color: #333\n        }\n\n        .devinfo-line code,\n        code,\n        li {\n            color: #000\n        }\n\n        .devinfo-line:not(:last-child) {\n            margin-bottom: 8px\n        }\n\n        .docs-link,\n        .contact-link {\n            font-weight: 500\n        }\n\n        header,\n        footer,\n        footer a {\n            display: flex;\n            justify-content: center;\n            align-items: center\n        }\n\n        header,\n        footer {\n            min-height: 100px;\n            height: 100px\n        }\n\n        header {\n            border-bottom: 1px solid #eaeaea\n        }\n\n        header h1 {\n            font-size: 1.8rem;\n            margin: 0;\n            font-weight: 500\n        }\n\n        header p {\n            font-size: 1.3rem;\n            margin: 0;\n            font-weight: 500\n        }\n\n        .header-item {\n            display: flex;\n            padding: 0 2rem;\n            margin: 2rem 0;\n            text-decoration: line-through;\n            color: #999\n        }\n\n        .header-item.active {\n            color: #ff0080;\n            text-decoration: none\n        }\n\n        .header-item.first {\n            border-right: 1px solid #eaeaea\n        }\n\n        .header-item-content {\n            display: flex;\n            flex-direction: column\n        }\n\n        .header-item-icon {\n            margin-right: 1rem;\n            margin-top: .6rem\n        }\n\n        footer {\n            border-top: 1px solid #eaeaea\n        }\n\n        footer a {\n            color: #000\n        }\n\n        footer a:hover {\n            border-bottom-color: #0000\n        }\n\n        footer svg {\n            margin-left: .8rem\n        }\n\n        .note {\n            padding: 8pt 16pt;\n            border-radius: 5px;\n            border: 1px solid #0070f3;\n            font-size: 14px;\n            line-height: 1.8;\n            color: #0070f3\n        }\n\n        @media(max-width:500px) {\n            .devinfo-container .devinfo-line code {\n                margin-top: .4rem\n            }\n\n            .devinfo-container .devinfo-line:not(:last-child) {\n                margin-bottom: 1.6rem\n            }\n\n            .devinfo-container {\n                margin-bottom: 0\n            }\n\n            header {\n                flex-direction: column;\n                height: auto;\n                min-height: auto;\n                align-items: flex-start\n            }\n\n            .header-item.first {\n                border-right: none;\n                margin-bottom: 0\n            }\n\n            main {\n                padding: 1rem 2rem\n            }\n\n            body {\n                font-size: 1.4rem;\n                line-height: 1.55\n            }\n\n            footer {\n                display: none\n            }\n\n            .note {\n                margin-top: 16px\n            }\n        }\n\n    </style>\n</head>\n\n<body>\n    <div class=\"container\">\n        <main>\n            <p class=\"devinfo-container\">\n                <span class=\"error-code\">OpenModelZ server is running</span>\n                <span class=\"devinfo-line\">Version: {{.Version}}\n                </span>\n            </p>\n            <p class=\"devinfo-container\">\n                <span class=\"devinfo-line\">Please check out the <a\n                        href=\"https://docs.open.modelz.ai/#create-your-first-ui-based-deployment\">documentation</a>\n                    for the next steps.\n                </span>\n                <span class=\"devinfo-line\">Please <a href=\"https://discord.gg/F4WnzqmeNj\">contact us\n                        on discord</a> if there is any issue.\n                </span>\n            </p>\n        </main>\n    </div>\n</body>\n\n</html>\n"
  },
  {
    "path": "agent/pkg/server/static/landing.go",
    "content": "package static\n\nimport (\n\t\"bytes\"\n\t_ \"embed\"\n\t\"html/template\"\n\n\t\"github.com/tensorchord/openmodelz/agent/pkg/version\"\n)\n\n//go:embed index.html\nvar htmlTemplate string\n\ntype htmlStruct struct {\n\tVersion string\n}\n\nfunc RenderLoadingPage() (*bytes.Buffer, error) {\n\ttmpl, err := template.New(\"root\").Parse(htmlTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := htmlStruct{\n\t\tVersion: version.GetAgentVersion(),\n\t}\n\n\tvar buffer bytes.Buffer\n\tif err := tmpl.Execute(&buffer, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &buffer, nil\n}\n"
  },
  {
    "path": "agent/pkg/server/static/page_loading.go",
    "content": "package static\n\nimport (\n\t\"bytes\"\n\t\"html/template\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nconst htmlDeploymentTemplate = `<html lang=\"en\"><head><meta http-equiv=\"refresh\" content=\"10\"><meta charset=\"utf-8\"><meta name=\"viewport\" content=\"width=device-width,initial-scale=1\"><meta name=\"theme-color\" content=\"#000\"><title>Loading - {{.Framework}}</title><style>\n.tooltip {\n    position: relative;\n    display: inline-block;\n    border-bottom: 1px dotted black;\n  }\n  \n  .tooltip .tooltiptext {\n    visibility: hidden;\n    width: 120px;\n    background-color: black;\n    color: #fff;\n    text-align: center;\n    border-radius: 6px;\n    padding: 5px 0;\n    \n    /* Position the tooltip */\n    position: absolute;\n    z-index: 1;\n    bottom: 100%;\n    left: 50%;\n    margin-left: -60px;\n  }\n  \n  .tooltip:hover .tooltiptext {\n    visibility: visible;\n  }\n  html{font-size:62.5%;box-sizing:border-box;height:-webkit-fill-available}*,::after,::before{box-sizing:inherit}body{font-family:sf pro text,sf pro icons,helvetica neue,helvetica,arial,sans-serif;font-size:1.6rem;line-height:1.65;word-break:break-word;font-kerning:auto;font-variant:normal;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;text-rendering:optimizeLegibility;hyphens:auto;height:100vh;height:-webkit-fill-available;max-height:100vh;max-height:-webkit-fill-available;margin:0}::selection{background:#dbe6d2}::-moz-selection{background:#dbe6d2}a{cursor:pointer;color:#5e785f;text-decoration:none;transition:all .2s ease;border-bottom:1px solid #0000}a:hover{border-bottom:1px solid #5e785f}ul{padding:0;margin-left:1.5em;list-style-type:none}li{margin-bottom:10px}ul li:before{content:'\\02013'}li:before{display:inline-block;color:#ccc;position:absolute;margin-left:-18px;transition:color .2s ease}code{font-family:Menlo,Monaco,Lucida Console,Liberation Mono,DejaVu Sans Mono,Bitstream Vera Sans Mono,Courier New,monospace,serif;font-size:.92em}code:after,code:before{content:'` + \"`\" + `'}.container{display:flex;justify-content:center;flex-direction:column;min-height:100%}main{max-width:80rem;padding:4rem 6rem;margin:auto}ul{margin-bottom:32px}.error-title{font-size:2rem;padding-left:22px;line-height:1.5;margin-bottom:24px}.error-title-guilty{border-left:2px solid #ed367f}.error-title-innocent{border-left:2px solid #59b89c}main p{color:#333}.devinfo-container{border:1px solid #ddd;border-radius:4px;padding:2rem;display:flex;flex-direction:column;margin-bottom:32px}.error-code{margin:0;font-size:1.6rem;color:#000;margin-bottom:1.6rem}.devinfo-line{color:#333}.devinfo-line code,code,li{color:#000}.devinfo-line:not(:last-child){margin-bottom:8px}.docs-link,.contact-link{font-weight:500}header,footer,footer a{display:flex;justify-content:center;align-items:center}header,footer{min-height:100px;height:100px}header{border-bottom:1px solid #eaeaea}header h1{font-size:1.8rem;margin:0;font-weight:500}header p{font-size:1.3rem;margin:0;font-weight:500}.header-item{display:flex;padding:0 2rem;margin:2rem 0;text-decoration:line-through;color:#999}.header-item.active{color:#ff0080;text-decoration:none}.header-item.first{border-right:1px solid #eaeaea}.header-item-content{display:flex;flex-direction:column}.header-item-icon{margin-right:1rem;margin-top:.6rem}footer{border-top:1px solid #eaeaea}footer a{color:#000}footer a:hover{border-bottom-color:#0000}footer svg{margin-left:.8rem}.note{padding:8pt 16pt;border-radius:5px;border:1px solid #0070f3;font-size:14px;line-height:1.8;color:#0070f3}@media(max-width:500px){.devinfo-container .devinfo-line code{margin-top:.4rem}.devinfo-container .devinfo-line:not(:last-child){margin-bottom:1.6rem}.devinfo-container{margin-bottom:0}header{flex-direction:column;height:auto;min-height:auto;align-items:flex-start}.header-item.first{border-right:none;margin-bottom:0}main{padding:1rem 2rem}body{font-size:1.4rem;line-height:1.55}footer{display:none}.note{margin-top:16px}}</style></head><body><div class=\"container\"><main><p class=\"devinfo-container\"><span class=\"error-code\">{{.StatusString}}</span><span class=\"devinfo-line\">Framework: <code>{{.Framework}}</code></span><span class=\"devinfo-line\">Deployment: <code>{{.Deployment}}</code></span><span class=\"devinfo-line\"><span class=\"tooltip\"><span class=\"tooltiptext\">Scheduling, ContainerCreating, Initializing, Running</span>Status</span>: <code>{{.InstanceStatus}}</code></span><span class=\"devinfo-line\">The page will auto refresh once the request is completed. Kindly wait for the page to reload automatically. If the issue persists, please contact <a href=\"https://discord.gg/F4WnzqmeNj\">modelz support team on discord</a> for assistance.</span></p></main></div></body></html>`\n\ntype htmlDeploymentStruct struct {\n\tDeployment     string\n\tFramework      string\n\tID             string\n\tStatusString   string\n\tInstanceStatus string\n}\n\nfunc RenderDeploymentLoadingPage(framework, id, statusString, deployment string,\n\tinstances []types.InferenceDeploymentInstance) (*bytes.Buffer, error) {\n\ttmpl, err := template.New(\"root\").Parse(htmlDeploymentTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := htmlDeploymentStruct{\n\t\tDeployment:     deployment,\n\t\tFramework:      framework,\n\t\tID:             id,\n\t\tStatusString:   statusString,\n\t\tInstanceStatus: \"Scaling\",\n\t}\n\n\tif len(instances) > 0 {\n\t\tdata.InstanceStatus = string(instances[0].Status.Phase)\n\t}\n\n\tvar buffer bytes.Buffer\n\tif err := tmpl.Execute(&buffer, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &buffer, nil\n}\n"
  },
  {
    "path": "agent/pkg/server/suite_test.go",
    "content": "package server\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"testing\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/golang/mock/gomock\"\n\t. \"github.com/onsi/ginkgo/v2\"\n\t. \"github.com/onsi/gomega\"\n\truntimemock \"github.com/tensorchord/openmodelz/agent/pkg/runtime/mock\"\n)\n\nvar (\n\tctrl        *gomock.Controller\n\tmockRuntime *runtimemock.MockRuntime\n\tserver      *Server\n)\n\nfunc mkContext(method string, path string, header map[string][]string, body io.Reader) *gin.Context {\n\tc, _ := gin.CreateTestContext(httptest.NewRecorder())\n\tif c == nil {\n\t\tpanic(c)\n\t}\n\treq, _ := http.NewRequest(method, path, body)\n\tfor k, vs := range header {\n\t\tfor _, v := range vs {\n\t\t\treq.Header.Add(k, v)\n\t\t}\n\t}\n\tc.Request = req\n\treturn c\n}\n\nfunc mkJsonBodyContext(method string, path string, header map[string][]string, body any) *gin.Context {\n\tjsonValue, err := json.Marshal(body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn mkContext(method, path, header, bytes.NewBuffer(jsonValue))\n}\n\nfunc setQuery(c *gin.Context, query map[string]string) {\n\tparams, _ := url.ParseQuery(c.Request.URL.RawQuery)\n\tfor k, v := range query {\n\t\tparams.Set(k, v)\n\t}\n\tc.Request.URL.RawQuery = params.Encode()\n}\n\nfunc setParam(c *gin.Context, param map[string]string) {\n\tfor k, v := range param {\n\t\tc.Params = []gin.Param{{Key: k, Value: v}}\n\t}\n}\n\nfunc TestBuilder(t *testing.T) {\n\tgin.SetMode(gin.ReleaseMode)\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"server\")\n}\n\nvar _ = BeforeSuite(func() {\n\tctrl = gomock.NewController(GinkgoT())\n\tmockRuntime = runtimemock.NewMockRuntime(ctrl)\n})\n"
  },
  {
    "path": "agent/pkg/server/user.go",
    "content": "package server\n\nimport (\n\t\"context\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc (s *Server) getUIDFromDeploymentID(ctx context.Context, id string) (string, bool) {\n\n\tuid, exit := s.cache.Get(id)\n\tif exit {\n\t\treturn uid.(string), true\n\t}\n\n\tuid, err := s.modelzCloudClient.GetUIDFromDeploymentID(ctx, s.config.ModelZCloud.AgentToken, s.config.ModelZCloud.ID, id)\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed to get uid from deployment id: %v\", err)\n\t\treturn \"\", false\n\t}\n\n\t// no expiration\n\ts.cache.SetWithTTL(id, uid, 1, 0)\n\treturn uid.(string), true\n}\n"
  },
  {
    "path": "agent/pkg/server/validator/validator.go",
    "content": "package validator\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\n\t\"k8s.io/apimachinery/pkg/util/rand\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nconst (\n\tdefaultMinReplicas     = 0\n\tdefaultMaxReplicas     = 1\n\tmaxReplicas            = 5\n\tdefaultTargetLoad      = 100\n\tdefaultZeroDuration    = 300\n\tdefaultStartupDuration = 600\n\tdefaultBuildDuration   = \"40m\"\n\tdefaultHTTPProbePath   = \"/\"\n)\n\nvar (\n\tdnsValidRegex = `^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`\n)\n\ntype Validator struct {\n\tvalidDNS *regexp.Regexp\n}\n\nfunc New() *Validator {\n\treturn &Validator{\n\t\tvalidDNS: regexp.MustCompile(dnsValidRegex),\n\t}\n}\n\n// Validates that the service name is valid for Kubernetes\nfunc (v Validator) ValidateService(service string) error {\n\tmatched := v.validDNS.MatchString(service)\n\tif matched {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"service: (%s) is invalid, must be a valid DNS entry\", service)\n}\n\n// DefaultDeployRequest sets default values for the deploy request.\nfunc (v Validator) DefaultDeployRequest(request *types.InferenceDeployment) {\n\tif request.Spec.Scaling == nil {\n\t\trequest.Spec.Scaling = &types.ScalingConfig{}\n\t}\n\n\tif request.Spec.Scaling.MinReplicas == nil {\n\t\trequest.Spec.Scaling.MinReplicas = new(int32)\n\t\t*request.Spec.Scaling.MinReplicas = defaultMinReplicas\n\t}\n\n\tif request.Spec.Scaling.MaxReplicas == nil {\n\t\trequest.Spec.Scaling.MaxReplicas = new(int32)\n\t\t*request.Spec.Scaling.MaxReplicas = defaultMinReplicas\n\t}\n\n\tif request.Spec.Scaling.TargetLoad == nil {\n\t\trequest.Spec.Scaling.TargetLoad = new(int32)\n\t\t*request.Spec.Scaling.TargetLoad = defaultTargetLoad\n\t}\n\n\tif request.Spec.Scaling.Type == nil {\n\t\trequest.Spec.Scaling.Type = new(types.ScalingType)\n\t\t*request.Spec.Scaling.Type = types.ScalingTypeCapacity\n\t}\n\n\tif request.Spec.Scaling.ZeroDuration == nil {\n\t\trequest.Spec.Scaling.ZeroDuration = new(int32)\n\t\t*request.Spec.Scaling.ZeroDuration = defaultZeroDuration\n\t}\n\n\tif request.Spec.Scaling.StartupDuration == nil {\n\t\trequest.Spec.Scaling.StartupDuration = new(int32)\n\t\t*request.Spec.Scaling.StartupDuration = defaultStartupDuration\n\t}\n\n\tif request.Spec.Framework == \"\" {\n\t\trequest.Spec.Framework = types.FrameworkOther\n\t}\n}\n\n// ValidateDeployRequest validates that the service name is valid for Kubernetes\nfunc (v Validator) ValidateDeployRequest(request *types.InferenceDeployment) error {\n\n\tif request.Spec.Name == \"\" {\n\t\treturn fmt.Errorf(\"service: is required\")\n\t}\n\n\terr := v.ValidateService(request.Spec.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif request.Spec.Image == \"\" {\n\t\treturn fmt.Errorf(\"image: is required\")\n\t}\n\n\tif request.Spec.Scaling == nil {\n\t\treturn fmt.Errorf(\"scaling: is required\")\n\t}\n\n\tif request.Spec.Framework == types.FrameworkOther {\n\t\tif request.Spec.Port == nil {\n\t\t\treturn fmt.Errorf(\"port: is required for other framework\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (v Validator) ValidateBuildRequest(request *types.Build) error {\n\tif request.Spec.Name == \"\" {\n\t\treturn fmt.Errorf(\"name: is required\")\n\t}\n\n\tif request.Spec.BuildTarget.ArtifactImage == \"\" {\n\t\treturn fmt.Errorf(\"artifact image: is required\")\n\t}\n\n\treturn nil\n}\n\nfunc (v Validator) ValidateImageCacheRequest(request *types.ImageCache) error {\n\tif request.Name == \"\" {\n\t\treturn fmt.Errorf(\"name: is required\")\n\t}\n\n\tif request.Namespace == \"\" {\n\t\treturn fmt.Errorf(\"namespace: is required\")\n\t}\n\n\tif request.Image == \"\" {\n\t\treturn fmt.Errorf(\"image: is required\")\n\t}\n\n\tif request.NodeSelector == \"\" {\n\t\treturn fmt.Errorf(\"node selector: is required\")\n\t}\n\treturn nil\n}\n\nfunc (v Validator) DefaultBuildRequest(request *types.Build) {\n\tif request.Spec.BuildTarget.Builder == \"\" {\n\t\trequest.Spec.BuildTarget.Builder = types.BuilderTypeImage\n\t}\n\n\tif request.Spec.BuildTarget.Builder != types.BuilderTypeImage {\n\t\tif request.Spec.Branch == \"\" && request.Spec.Revision == \"\" {\n\t\t\trequest.Spec.Branch = \"main\"\n\t\t}\n\n\t\tif request.Spec.BuildTarget.Duration == \"\" {\n\t\t\trequest.Spec.BuildTarget.Duration = defaultBuildDuration\n\t\t}\n\t}\n\n\tif request.Spec.BuildTarget.ArtifactImageTag == \"\" {\n\t\trequest.Spec.BuildTarget.ArtifactImageTag = rand.String(8)\n\t}\n}\n"
  },
  {
    "path": "agent/pkg/version/version.go",
    "content": "/*\n   Copyright The TensorChord Inc.\n   Copyright The BuildKit Authors.\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npackage version\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\t// Package is filled at linking time\n\tPackage = \"github.com/tensorchord/openmodelz/agent\"\n\n\t// Revision is filled with the VCS (e.g. git) revision being used to build\n\t// the program at linking time.\n\tRevision = \"\"\n\n\tversion         = \"0.0.0+unknown\"\n\tbuildDate       = \"1970-01-01T00:00:00Z\" // output from `date -u +'%Y-%m-%dT%H:%M:%SZ'`\n\tgitCommit       = \"\"                     // output from `git rev-parse HEAD`\n\tgitTag          = \"\"                     // output from `git describe --exact-match --tags HEAD` (if clean tree state)\n\tgitTreeState    = \"\"                     // determined from `git status --porcelain`. either 'clean' or 'dirty'\n\tdevelopmentFlag = \"false\"\n)\n\n// Version contains version information\ntype Version struct {\n\tVersion      string\n\tBuildDate    string\n\tGitCommit    string\n\tGitTag       string\n\tGitTreeState string\n\tGoVersion    string\n\tCompiler     string\n\tPlatform     string\n}\n\nfunc (v Version) String() string {\n\treturn v.Version\n}\n\n// SetGitTagForE2ETest sets the gitTag for test purpose.\nfunc SetGitTagForE2ETest(tag string) {\n\tgitTag = tag\n}\n\n// GetAgentVersion gets version information\nfunc GetAgentVersion() string {\n\tvar versionStr string\n\n\tif gitCommit != \"\" && gitTag != \"\" &&\n\t\tgitTreeState == \"clean\" && developmentFlag == \"false\" {\n\t\t// if we have a clean tree state and the current commit is tagged,\n\t\t// this is an official release.\n\t\tversionStr = gitTag\n\t} else {\n\t\t// otherwise formulate a version string based on as much metadata\n\t\t// information we have available.\n\t\tif strings.HasPrefix(version, \"v\") {\n\t\t\tversionStr = version\n\t\t} else {\n\t\t\tversionStr = \"v\" + version\n\t\t}\n\t\tif len(gitCommit) >= 7 {\n\t\t\tversionStr += \"+\" + gitCommit[0:7]\n\t\t\tif gitTreeState != \"clean\" {\n\t\t\t\tversionStr += \".dirty\"\n\t\t\t}\n\t\t} else {\n\t\t\tversionStr += \"+unknown\"\n\t\t}\n\t}\n\treturn versionStr\n}\n\n// GetVersion returns the version information\nfunc GetVersion() Version {\n\treturn Version{\n\t\tVersion:      GetAgentVersion(),\n\t\tBuildDate:    buildDate,\n\t\tGitCommit:    gitCommit,\n\t\tGitTag:       gitTag,\n\t\tGitTreeState: gitTreeState,\n\t\tGoVersion:    runtime.Version(),\n\t\tCompiler:     runtime.Compiler,\n\t\tPlatform:     fmt.Sprintf(\"%s/%s\", runtime.GOOS, runtime.GOARCH),\n\t}\n}\n\nvar (\n\treRelease *regexp.Regexp\n\treDev     *regexp.Regexp\n\treOnce    sync.Once\n)\n\nfunc UserAgent() string {\n\tversion := GetVersion().String()\n\n\treOnce.Do(func() {\n\t\treRelease = regexp.MustCompile(`^(v[0-9]+\\.[0-9]+)\\.[0-9]+$`)\n\t\treDev = regexp.MustCompile(`^(v[0-9]+\\.[0-9]+)\\.[0-9]+`)\n\t})\n\n\tif matches := reRelease.FindAllStringSubmatch(version, 1); len(matches) > 0 {\n\t\tversion = matches[0][1]\n\t} else if matches := reDev.FindAllStringSubmatch(version, 1); len(matches) > 0 {\n\t\tversion = matches[0][1] + \"-dev\"\n\t}\n\n\treturn \"openmodelz/agent/\" + version\n}\n"
  },
  {
    "path": "agent/sqlc.yaml",
    "content": "version: \"2\"\nsql:\n  - engine: \"postgresql\"\n    queries: \"sql/query/\"\n    schema: \"./sql/schema.sql\"\n    gen:\n      go:\n        package: \"query\"\n        sql_package: \"pgx/v4\"\n        out: \"pkg/query\"\n        emit_prepared_queries: true\n        emit_interface: true\n        emit_exact_table_names: false\n        emit_json_tags: true\n"
  },
  {
    "path": "autoscaler/.gitignore",
    "content": "# If you prefer the allow list template instead of the deny list, see community template:\n# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore\n#\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n\n# Test binary, built with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n*.report\n\n# Dependency directories (remove the comment below to include it)\nvendor/\n\n# Go workspace file\ngo.work\n\n.vscode/*\n.idea\n\n# Local History for Visual Studio Code\n.history/\n\n# Built Visual Studio Code Extensions\n*.vsix\n\n__debug_bin\nbin/\ndebug-bin/\n/build.envd\n.ipynb_checkpoints/\ncover.html\n\ncmd/test/\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nwheelhouse/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n.demo/\npkg/docs/swagger.*\n"
  },
  {
    "path": "autoscaler/Dockerfile",
    "content": "FROM ubuntu:22.04\n\nLABEL maintainer=\"modelz-support@tensorchord.ai\"\n\nCOPY autoscaler /usr/bin/autoscaler\nENTRYPOINT [\"/usr/bin/autoscaler\"]\n"
  },
  {
    "path": "autoscaler/Makefile",
    "content": "# Copyright 2022 TensorChord Inc.\n#\n# The old school Makefile, following are required targets. The Makefile is written\n# to allow building multiple binaries. You are free to add more targets or change\n# existing implementations, as long as the semantics are preserved.\n#\n#   make              - default to 'build' target\n#   make lint         - code analysis\n#   make test         - run unit test (or plus integration test)\n#   make build        - alias to build-local target\n#   make build-local  - build local binary targets\n#   make build-linux  - build linux binary targets\n#   make container    - build containers\n#   $ docker login registry -u username -p xxxxx\n#   make push         - push containers\n#   make clean        - clean up targets\n#\n# Not included but recommended targets:\n#   make e2e-test\n#\n# The makefile is also responsible to populate project version information.\n#\n\n#\n# Tweak the variables based on your project.\n#\n\n# This repo's root import path (under GOPATH).\nROOT := github.com/tensorchord/openmodelz/autoscaler\n\n# Target binaries. You can build multiple binaries for a single project.\nTARGETS := autoscaler\n\n# Container image prefix and suffix added to targets.\n# The final built images are:\n#   $[REGISTRY]/$[IMAGE_PREFIX]$[TARGET]$[IMAGE_SUFFIX]:$[VERSION]\n# $[REGISTRY] is an item from $[REGISTRIES], $[TARGET] is an item from $[TARGETS].\nIMAGE_PREFIX ?= $(strip )\nIMAGE_SUFFIX ?= $(strip )\n\n# Container registries.\nREGISTRY ?= ghcr.io/tensorchord\n\n# Container registry for base images.\nBASE_REGISTRY ?= docker.io\nBASE_REGISTRY_USER ?= modelzai\n\n# Disable CGO by default.\nCGO_ENABLED ?= 0\n\nGOOS ?= $(shell go env GOOS)\nGOARCH ?= $(shell go env GOARCH)\n\n#\n# These variables should not need tweaking.\n#\n\n# It's necessary to set this because some environments don't link sh -> bash.\nexport SHELL := bash\n\n# It's necessary to set the errexit flags for the bash shell.\nexport SHELLOPTS := errexit\n\nPACKAGE_NAME := github.com/tensorchord/openmodelz\nGOLANG_CROSS_VERSION  ?= v1.17.6\n\n# Project main package location (can be multiple ones).\nCMD_DIR := ./cmd\n\n# Project output directory.\nOUTPUT_DIR := ./bin\nDEBUG_DIR := ./debug-bin\n\n# Build directory.\nBUILD_DIR := ./build\n\n# Current version of the project.\nVERSION ?= $(shell git describe --match 'v[0-9]*' --always --tags --abbrev=0)\nBUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\nGIT_COMMIT=$(shell git rev-parse HEAD)\nGIT_TAG=$(shell if [ -z \"`git status --porcelain`\" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)\nGIT_TREE_STATE=$(shell if [ -z \"`git status --porcelain`\" ]; then echo \"clean\" ; else echo \"dirty\"; fi)\nGITSHA ?= $(shell git rev-parse --short HEAD)\n\n# Track code version with Docker Label.\nDOCKER_LABELS ?= git-describe=\"$(shell date -u +v%Y%m%d)-$(shell git describe --tags --always --dirty)\"\n\n# Golang standard bin directory.\nGOPATH ?= $(shell go env GOPATH)\nGOROOT ?= $(shell go env GOROOT)\nBIN_DIR := $(GOPATH)/bin\nGOLANGCI_LINT := $(BIN_DIR)/golangci-lint\n\n# check if we need embed the dashboard\nDASHBOARD_BUILD ?= debug\n\n# Default golang flags used in build and test\n# -mod=vendor: force go to use the vendor files instead of using the `$GOPATH/pkg/mod`\n# -p: the number of programs that can be run in parallel\n# -count: run each test and benchmark 1 times. Set this flag to disable test cache\nexport GOFLAGS ?= -count=1\n\n#\n# Define all targets. At least the following commands are required:\n#\n\n# All targets.\n.PHONY: help lint test build container push addlicense debug debug-local build-local generate clean test-local addlicense-install release build-image\n\n.DEFAULT_GOAL:=build\n\nbuild: build-local  ## Build the release version of envd\n\nhelp:  ## Display this help\n\t@awk 'BEGIN {FS = \":.*##\"; printf \"\\nUsage:\\n  make \\033[36m<target>\\033[0m\\n\"} /^[a-zA-Z0-9_-]+:.*?##/ { printf \"  \\033[36m%-15s\\033[0m %s\\n\", $$1, $$2 } /^##@/ { printf \"\\n\\033[1m%s\\033[0m\\n\", substr($$0, 5) } ' $(MAKEFILE_LIST)\n\ndebug: debug-local  ## Build the debug version of envd\n\n# more info about `GOGC` env: https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint\nlint: $(GOLANGCI_LINT)  ## Lint GO code\n\t@$(GOLANGCI_LINT) run\n\n$(GOLANGCI_LINT):\n\tcurl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$(go env GOPATH)/bin\n\nmockgen-install:\n\tgo install github.com/golang/mock/mockgen@v1.6.0\n\naddlicense-install:\n\tgo install github.com/google/addlicense@latest\n\nbuild-local:\n\t@for target in $(TARGETS); do                                                      \\\n\t  CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build -tags $(DASHBOARD_BUILD)  -trimpath -v -o $(OUTPUT_DIR)/$${target}     \\\n\t    -ldflags \"-s -w -X $(ROOT)/pkg/version.version=$(VERSION) -X $(ROOT)/pkg/version.buildDate=$(BUILD_DATE) -X $(ROOT)/pkg/version.gitCommit=$(GIT_COMMIT) -X $(ROOT)/pkg/version.gitTreeState=$(GIT_TREE_STATE)\"                     \\\n\t    $(CMD_DIR)/$${target};                                                         \\\n\tdone\n\n# It is used by vscode to attach into the process.\ndebug-local:\n\t@for target in $(TARGETS); do                                                      \\\n\t  CGO_ENABLED=$(CGO_ENABLED) go build -tags $(DASHBOARD_BUILD) -trimpath                                    \\\n\t  \t-v -o $(DEBUG_DIR)/$${target}                                                  \\\n\t  \t-gcflags='all=-N -l'                                                           \\\n\t    $(CMD_DIR)/$${target};                                                         \\\n\tdone\n\naddlicense: addlicense-install  ## Add license to GO code files\n\taddlicense -l mpl -c \"TensorChord Inc.\" $$(find . -type f -name '*.go' | grep -v pkg/docs/docs.go)\n\ntest-local:\n\t@go test -tags=$(DASHBOARD_BUILD) -v -race -coverprofile=coverage.out ./...\n\ntest:  ## Run the tests\n\t@go test -tags=$(DASHBOARD_BUILD) -race -coverpkg=./pkg/... -coverprofile=coverage.out ./...\n\t@go tool cover -func coverage.out | tail -n 1 | awk '{ print \"Total coverage: \" $$3 }'\n\nclean:  ## Clean the outputs and artifacts\n\t@-rm -vrf ${OUTPUT_DIR}\n\t@-rm -vrf ${DEBUG_DIR}\n\t@-rm -vrf build dist .eggs *.egg-info\n\nfmt:  ## Run go fmt against code.\n\tgo fmt ./...\n\nvet: ## Run go vet against code.\n\tgo vet ./...\n\nbuild-image: build-local\n\tdocker build -t ${BASE_REGISTRY}/${BASE_REGISTRY_USER}/modelz-autoscaler:dev -f Dockerfile ./bin\n\tdocker push ${BASE_REGISTRY}/${BASE_REGISTRY_USER}/modelz-autoscaler:dev\n\nrelease:\n\t@if [ ! -f \".release-env\" ]; then \\\n\t\techo \"\\033[91m.release-env is required for release\\033[0m\";\\\n\t\texit 1;\\\n\tfi\n\tdocker run \\\n\t\t--rm \\\n\t\t--privileged \\\n\t\t-e CGO_ENABLED=1 \\\n\t\t--env-file .release-env \\\n\t\t-v /var/run/docker.sock:/var/run/docker.sock \\\n\t\t-v `pwd`:/go/src/$(PACKAGE_NAME) \\\n\t\t-v `pwd`/sysroot:/sysroot \\\n\t\t-w /go/src/$(PACKAGE_NAME) \\\n\t\tgoreleaser/goreleaser-cross:${GOLANG_CROSS_VERSION} \\\n\t\trelease --rm-dist\n\ntsschema: swag\n\t@cd dashboard; pnpm tsschema\n\ngenerate: mockgen-install sqlc-install swag tsschema\n\t@mockgen -source pkg/query/querier.go -destination pkg/query/mock/mock.go -package mock\n\t@sqlc generate\n\ndashboard-build:\n\t@cd dashboard; pnpm build\n"
  },
  {
    "path": "autoscaler/cmd/autoscaler/main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli/v2\"\n\n\t\"github.com/tensorchord/openmodelz/autoscaler/pkg/autoscalerapp\"\n\t\"github.com/tensorchord/openmodelz/autoscaler/pkg/version\"\n)\n\nfunc run(args []string) error {\n\tcli.VersionPrinter = func(c *cli.Context) {\n\t\tfmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision)\n\t}\n\n\tapp := autoscalerapp.New()\n\treturn app.Run(args)\n}\n\nfunc handleErr(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tlogrus.Error(err)\n}\n\nfunc main() {\n\terr := run(os.Args)\n\thandleErr(err)\n}\n"
  },
  {
    "path": "autoscaler/pkg/autoscaler/factory.go",
    "content": "package autoscaler\n\nimport (\n\t\"net/http\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/client\"\n\n\t\"github.com/tensorchord/openmodelz/autoscaler/pkg/prom\"\n)\n\ntype Opt struct {\n\tGatewayHost      string\n\tPrometheusHost   string\n\tBasicAuthEnabled bool\n\tSecretPath       string\n\tPrometheusPort   int\n\n\tInterval time.Duration\n}\n\nfunc New(opt Opt) (*Scaler, error) {\n\tlogrus.Info(\"Creating autoscaler with options: \", opt)\n\n\tgatewayURL, err := url.Parse(opt.GatewayHost)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to parse gateway host\")\n\t}\n\n\tclient, err := client.NewClientWithOpts(\n\t\tclient.WithHost(gatewayURL.String()),\n\t)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create client\")\n\t}\n\n\tprometheusQuery := prom.NewPrometheusQuery(opt.PrometheusHost, opt.PrometheusPort, &http.Client{})\n\n\tas := newScaler(client, &prometheusQuery, newLoadCache(), newInferenceCache())\n\treturn as, nil\n}\n"
  },
  {
    "path": "autoscaler/pkg/autoscaler/inferencecache.go",
    "content": "package autoscaler\n\nimport (\n\t\"time\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\ntype Inference struct {\n\tDeployment types.InferenceDeployment\n\tTimestamp  time.Time\n}\n\ntype InferenceCache struct {\n\tinference map[string]Inference\n}\n\nfunc newInferenceCache() *InferenceCache {\n\treturn &InferenceCache{\n\t\tinference: make(map[string]Inference),\n\t}\n}\n\nfunc (i *InferenceCache) Set(key string, inference Inference) {\n\ti.inference[key] = inference\n}\n\nfunc (i *InferenceCache) Get(key string, expireTime time.Duration) (types.InferenceDeployment, bool) {\n\tinference, ok := i.inference[key]\n\n\t// expired\n\tif !ok || time.Since(inference.Timestamp) > expireTime {\n\t\treturn types.InferenceDeployment{}, false\n\t}\n\treturn inference.Deployment, ok\n}\n"
  },
  {
    "path": "autoscaler/pkg/autoscaler/loadcache.go",
    "content": "package autoscaler\n\nimport \"time\"\n\n// LoadCache is a cache for load metrics.\ntype LoadCache struct {\n\tload map[string]Load\n}\n\ntype Load struct {\n\tScalingType            string\n\tCurrentStartedRequests float64\n\tCurrentLoad            float64\n\tTimestamp              time.Time\n}\n\nfunc newLoadCache() *LoadCache {\n\treturn &LoadCache{\n\t\tload: make(map[string]Load),\n\t}\n}\n\nfunc (l *LoadCache) Get(key string) (Load, bool) {\n\tload, ok := l.load[key]\n\treturn load, ok\n}\n\nfunc (l *LoadCache) Set(key string, load Load) {\n\tl.load[key] = load\n}\n"
  },
  {
    "path": "autoscaler/pkg/autoscaler/scaler.go",
    "content": "package autoscaler\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/client\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/scaling\"\n\n\t\"github.com/tensorchord/openmodelz/autoscaler/pkg/prom\"\n)\n\ntype Scaler struct {\n\tPromQuery      *prom.PrometheusQuery\n\tclient         *client.Client\n\tLoadCache      *LoadCache\n\tZeroCache      map[string]time.Time\n\tInferenceCache *InferenceCache\n}\n\nfunc newScaler(c *client.Client,\n\tpromQuery *prom.PrometheusQuery,\n\tloadCache *LoadCache,\n\tinferanceCache *InferenceCache) *Scaler {\n\treturn &Scaler{\n\t\tclient:         c,\n\t\tPromQuery:      promQuery,\n\t\tLoadCache:      loadCache,\n\t\tZeroCache:      make(map[string]time.Time),\n\t\tInferenceCache: inferanceCache,\n\t}\n}\n\nfunc (s *Scaler) AutoScale(interval time.Duration) {\n\tticker := time.NewTicker(interval)\n\tquit := make(chan struct{})\n\n\tTTL := 1 * time.Minute\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t// Detect if the instance pod always restart,\n\t\t\t// if pod restart count in 10 minutes before last update time is more than 2, will scale it down.\n\t\t\tresults, err := s.GetRestartMetrics()\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Info(\"Get Restart Metrics of inference Failed\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinferenceCount := make(map[string]int)\n\t\t\tfor _, ts := range results {\n\t\t\t\tlabels := ts.Labels\n\t\t\t\tpodName, inferenceName, namespace := \"\", \"\", \"\"\n\t\t\t\tfor _, label := range labels {\n\t\t\t\t\tswitch label.Name {\n\t\t\t\t\tcase \"pod\":\n\t\t\t\t\t\tpodName = label.Value\n\t\t\t\t\tcase \"inference_name\":\n\t\t\t\t\t\tinferenceName = label.Value\n\t\t\t\t\tcase \"namespace\":\n\t\t\t\t\t\tnamespace = label.Value\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(ts.Samples) < 1 {\n\t\t\t\t\tlogrus.Infof(\"Sample not found for inference %s.\", inferenceName)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tstrs := strings.Split(inferenceName, \".\")\n\t\t\t\tif len(strs) != 2 {\n\t\t\t\t\tlogrus.Infof(\"Invalid inference name: %s\", inferenceName)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tname := strs[0]\n\t\t\t\tresp, err := s.client.InstanceList(context.TODO(), namespace, name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\t\"service\": inferenceName,\n\t\t\t\t\t\t\"error\":   err,\n\t\t\t\t\t}).Error(\"failed to get instance list\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, instance := range resp {\n\t\t\t\t\tif instance.Spec.Name == podName {\n\t\t\t\t\t\tif instance.Status.Phase == \"CrashLoopBackOff\" {\n\t\t\t\t\t\t\tinferenceCount[inferenceName] += 1\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(inferenceCount) != 0 {\n\t\t\t\tfor inferenceName, count := range inferenceCount {\n\t\t\t\t\tstrs := strings.Split(inferenceName, \".\")\n\t\t\t\t\tif len(strs) != 2 {\n\t\t\t\t\t\tlogrus.Infof(\"Invalid inference name: %s\", inferenceName)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tname := strs[0]\n\t\t\t\t\tnamespace := strs[1]\n\n\t\t\t\t\tresp, ok := s.InferenceCache.Get(inferenceName, TTL)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tresp, err = s.client.InferenceGet(context.TODO(), namespace, name)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\t\t\t\"service\": inferenceName,\n\t\t\t\t\t\t\t\t\"error\":   err,\n\t\t\t\t\t\t\t}).Error(\"failed to get inference\")\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// update inference cache\n\t\t\t\t\t\tinference := Inference{\n\t\t\t\t\t\t\tTimestamp:  time.Now(),\n\t\t\t\t\t\t\tDeployment: resp,\n\t\t\t\t\t\t}\n\t\t\t\t\t\ts.InferenceCache.Set(inferenceName, inference)\n\t\t\t\t\t}\n\t\t\t\t\t// check if the instance already exists\n\t\t\t\t\tvar expectedReplicas int\n\t\t\t\t\ttotalReplicas := resp.Status.Replicas\n\t\t\t\t\tif count > int(totalReplicas) {\n\t\t\t\t\t\texpectedReplicas = 0\n\t\t\t\t\t} else {\n\t\t\t\t\t\texpectedReplicas = int(totalReplicas) - count\n\t\t\t\t\t}\n\n\t\t\t\t\tif expectedReplicas != int(totalReplicas) {\n\t\t\t\t\t\tlogrus.Infof(\"Scaling inference %s to %d replicas\", inferenceName, expectedReplicas)\n\t\t\t\t\t\t// Add event to record the scale down operation\n\t\t\t\t\t\teventMessage := fmt.Sprintf(\"Deployment %d replicas always CrashLoopBackOff, system scale down the deployment replicas to %d\", count, expectedReplicas)\n\t\t\t\t\t\tif err := s.client.InferenceScale(context.TODO(),\n\t\t\t\t\t\t\tnamespace, name, expectedReplicas, eventMessage); err != nil {\n\t\t\t\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\t\t\t\"service\":  inferenceName,\n\t\t\t\t\t\t\t\t\"expected\": expectedReplicas,\n\t\t\t\t\t\t\t\t\"error\":    err,\n\t\t\t\t\t\t\t}).Error(\"failed to scale inference\")\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// update the inference, set minReplicas to expectedReplicas\n\t\t\t\t\t\tif resp.Spec.Scaling.MinReplicas != nil &&\n\t\t\t\t\t\t\t*resp.Spec.Scaling.MinReplicas > int32(expectedReplicas) {\n\t\t\t\t\t\t\tresp.Status.EventMessage = fmt.Sprintf(\"Deployment %d replicas always CrashLoopBackOff, system scales down the replicas to %d, original min replicas is %d, reset it to %d\",\n\t\t\t\t\t\t\t\tcount, expectedReplicas, resp.Spec.Scaling.MinReplicas,\n\t\t\t\t\t\t\t\texpectedReplicas)\n\t\t\t\t\t\t\t*resp.Spec.Scaling.MinReplicas = int32(expectedReplicas)\n\t\t\t\t\t\t\tif _, err := s.client.DeploymentUpdate(context.TODO(), namespace, resp); err != nil {\n\t\t\t\t\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\t\t\t\t\"service\":  inferenceName,\n\t\t\t\t\t\t\t\t\t\"expected\": expectedReplicas,\n\t\t\t\t\t\t\t\t\t\"error\":    err,\n\t\t\t\t\t\t\t\t}).Error(\"failed to update inference\")\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ts.LoadCache = newLoadCache()\n\t\t\ts.GetLoadMetrics()\n\n\t\t\tfor service, lc := range s.LoadCache.load {\n\t\t\t\t// if instances of inference are restarting, do not scale it.\n\t\t\t\tif value, ok := inferenceCount[service]; ok && value > 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tstrs := strings.Split(service, \".\")\n\t\t\t\tif len(strs) != 2 {\n\t\t\t\t\tlogrus.Infof(\"Invalid inference name: %s\", service)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tname := strs[0]\n\t\t\t\tnamespace := strs[1]\n\n\t\t\t\tresp, ok := s.InferenceCache.Get(service, TTL)\n\t\t\t\tif !ok {\n\t\t\t\t\tresp, err = s.client.InferenceGet(context.TODO(), namespace, name)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\t\t\"service\": service,\n\t\t\t\t\t\t\t\"error\":   err,\n\t\t\t\t\t\t}).Error(\"failed to get inference\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t// update inference cache\n\t\t\t\t\tinference := Inference{\n\t\t\t\t\t\tTimestamp:  time.Now(),\n\t\t\t\t\t\tDeployment: resp,\n\t\t\t\t\t}\n\t\t\t\t\ts.InferenceCache.Set(service, inference)\n\t\t\t\t}\n\n\t\t\t\tif resp.Spec.Labels == nil {\n\t\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\t\"service\": service,\n\t\t\t\t\t\t\"error\":   err,\n\t\t\t\t\t}).Error(\"failed to get inference labels\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tvar expectedReplicas int\n\t\t\t\tvar targetLoad int\n\t\t\t\t// If the inference has a target load label, use that instead.\n\t\t\t\tif resp.Spec.Scaling != nil && resp.Spec.Scaling.TargetLoad != nil {\n\t\t\t\t\ttargetLoad = int(*resp.Spec.Scaling.TargetLoad)\n\t\t\t\t\texpectedReplicas = int(math.Ceil(\n\t\t\t\t\t\tlc.CurrentLoad / float64(*resp.Spec.Scaling.TargetLoad)))\n\t\t\t\t}\n\n\t\t\t\tif expectedReplicas == 0 {\n\t\t\t\t\t// Check the current start requests to see if the inference is being used.\n\t\t\t\t\tif lc.CurrentStartedRequests > 0 {\n\t\t\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\t\t\"service\":                  service,\n\t\t\t\t\t\t\t\"current_started_requests\": lc.CurrentStartedRequests,\n\t\t\t\t\t\t\t\"target_load\":              lc.CurrentLoad,\n\t\t\t\t\t\t}).Debug(\"inference is being used\")\n\t\t\t\t\t\texpectedReplicas = 1\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tvar maxReplicas, minReplicas int\n\t\t\t\tvar zeroDuration time.Duration\n\t\t\t\tif resp.Spec.Scaling != nil {\n\t\t\t\t\tif resp.Spec.Scaling.MinReplicas != nil {\n\t\t\t\t\t\tminReplicas = int(*resp.Spec.Scaling.MinReplicas)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tminReplicas = scaling.DefaultMinReplicas\n\t\t\t\t\t}\n\n\t\t\t\t\tif resp.Spec.Scaling.MaxReplicas != nil {\n\t\t\t\t\t\tmaxReplicas = int(*resp.Spec.Scaling.MaxReplicas)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmaxReplicas = scaling.DefaultMaxReplicas\n\t\t\t\t\t}\n\n\t\t\t\t\tif resp.Spec.Scaling.ZeroDuration != nil {\n\t\t\t\t\t\tzeroDuration = time.Duration(*resp.Spec.Scaling.ZeroDuration) * time.Second\n\t\t\t\t\t} else {\n\t\t\t\t\t\tzeroDuration = scaling.DefaultZeroDuration\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif expectedReplicas > maxReplicas {\n\t\t\t\t\tlogrus.Infof(\"Expected replicas (%d) exceeds max replicas (%d) for inference %s\", expectedReplicas, maxReplicas, service)\n\t\t\t\t\texpectedReplicas = maxReplicas\n\t\t\t\t}\n\t\t\t\tif expectedReplicas < minReplicas {\n\t\t\t\t\tlogrus.Infof(\"Expected replicas (%d) is less than min replicas (%d) for inference %s\", expectedReplicas, minReplicas, service)\n\t\t\t\t\texpectedReplicas = minReplicas\n\t\t\t\t}\n\n\t\t\t\tavailableReplicas := resp.Status.AvailableReplicas\n\t\t\t\ttotalReplicas := resp.Status.Replicas\n\n\t\t\t\tif expectedReplicas == int(totalReplicas) {\n\t\t\t\t\t// If the expected replicas is the same as the current replicas, remove the entry from the zero cache.\n\t\t\t\t\tdelete(s.ZeroCache, service)\n\t\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\t\"service\":          service,\n\t\t\t\t\t\t\"replicas\":         totalReplicas,\n\t\t\t\t\t\t\"expectedReplicas\": expectedReplicas,\n\t\t\t\t\t}).Debug(\"delete zero cache\")\n\t\t\t\t}\n\n\t\t\t\tif expectedReplicas == 0 && totalReplicas != 0 {\n\t\t\t\t\tif availableReplicas == 0 {\n\t\t\t\t\t\t// If the expected replicas is 0 and there are no available replicas,\n\t\t\t\t\t\t// set the expected replicas to 1 to prevent the inference from being scaled to zero.\n\t\t\t\t\t\texpectedReplicas = 1\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// If the expected replicas is 0 and there is no entry in the zero cache, add one.\n\t\t\t\t\t\tif _, ok := s.ZeroCache[service]; !ok {\n\t\t\t\t\t\t\ts.ZeroCache[service] = time.Now()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// If the inference has been idle for longer than the zero duration, scale to zero.\n\t\t\t\t\t\tif time.Since(s.ZeroCache[service]) > zeroDuration {\n\t\t\t\t\t\t\tlogrus.Infof(\"Inference %s has been idle for %s, scaling to zero\", service, zeroDuration)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// If the inference has not been idle for longer than the zero duration, scale to 1.\n\t\t\t\t\t\t\texpectedReplicas = 1\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif expectedReplicas == 1 && totalReplicas == 0 {\n\t\t\t\t\t// If the expected replicas is 1 and the current replicas is 0, do nothing since the scaling handler in gateway will take care of this situation.\n\t\t\t\t\texpectedReplicas = 0\n\t\t\t\t}\n\n\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\"service\":           service,\n\t\t\t\t\t\"replicas\":          totalReplicas,\n\t\t\t\t\t\"expectedReplicas\":  expectedReplicas,\n\t\t\t\t\t\"availableReplicas\": availableReplicas,\n\t\t\t\t\t\"currentLoad\":       lc.CurrentLoad,\n\t\t\t\t\t\"targetLoad\":        targetLoad,\n\t\t\t\t\t\"zeroDuration\":      zeroDuration,\n\t\t\t\t\t\"zeroCache\":         s.ZeroCache[service],\n\t\t\t\t}).Debug(\"start scaling (replicas)\")\n\n\t\t\t\tif expectedReplicas != int(totalReplicas) {\n\t\t\t\t\tdelete(s.ZeroCache, service)\n\t\t\t\t\tlogrus.Infof(\"Scaling inference %s to %d replicas\", service, expectedReplicas)\n\t\t\t\t\teventMessage := fmt.Sprintf(\"Scaling inference based load, current %f, target %d\",\n\t\t\t\t\t\tlc.CurrentLoad, targetLoad)\n\t\t\t\t\tif err := s.client.InferenceScale(context.TODO(),\n\t\t\t\t\t\tnamespace, name, expectedReplicas, eventMessage); err != nil {\n\t\t\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\t\t\"service\":  service,\n\t\t\t\t\t\t\t\"expected\": expectedReplicas,\n\t\t\t\t\t\t\t\"error\":    err,\n\t\t\t\t\t\t}).Error(\"failed to scale inference\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-quit:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (s *Scaler) GetLoadMetrics() {\n\tresults, err := s.PromQuery.Fetch(url.QueryEscape(\"job:inference_current_load:sum\"))\n\tif err != nil {\n\t\t// log the error but continue, the mixIn will correctly handle the empty results.\n\t\tlogrus.Infof(\"Error querying Prometheus: %s\\n\", err.Error())\n\t}\n\n\tcurrentSumResults, err := s.PromQuery.Fetch(\n\t\turl.QueryEscape(\"job:inference_current_started:max_sum\"))\n\tif err != nil {\n\t\t// log the error but continue, the mixIn will correctly handle the empty results.\n\t\tlogrus.Infof(\"Error querying Prometheus: %s\\n\", err.Error())\n\t}\n\n\tfor _, result := range results.Data.Result {\n\t\tcurrentLoad := 0.0\n\n\t\tswitch val := result.Value[1].(type) {\n\t\tcase string:\n\t\t\tf, err := strconv.ParseFloat(val, 64)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Infof(\"add_metrics: unable to convert value %q for metric: %s\", val, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcurrentLoad = f\n\t\t}\n\n\t\ttimestamp := time.Now()\n\t\tswitch val := result.Value[0].(type) {\n\t\tcase float64:\n\t\t\ttimestamp = time.Unix(int64(val), 0)\n\t\t}\n\n\t\tif l, ok := s.LoadCache.Get(result.Metric.InferenceName); ok {\n\t\t\tl.CurrentLoad = currentLoad\n\t\t\tl.Timestamp = timestamp\n\t\t\ts.LoadCache.Set(result.Metric.InferenceName, l)\n\t\t} else {\n\t\t\ts.LoadCache.Set(result.Metric.InferenceName, Load{\n\t\t\t\tCurrentLoad: currentLoad,\n\t\t\t\tTimestamp:   timestamp,\n\t\t\t})\n\t\t}\n\t}\n\n\tfor _, result := range currentSumResults.Data.Result {\n\t\tcurrentSum := 0.0\n\n\t\tswitch val := result.Value[1].(type) {\n\t\tcase string:\n\t\t\tf, err := strconv.ParseFloat(val, 64)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Infof(\"add_metrics: unable to convert value %q for metric: %s\", val, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcurrentSum = f\n\t\t}\n\n\t\ttimestamp := time.Now()\n\t\tswitch val := result.Value[0].(type) {\n\t\tcase float64:\n\t\t\ttimestamp = time.Unix(int64(val), 0)\n\t\t}\n\n\t\tif l, ok := s.LoadCache.Get(result.Metric.InferenceName); ok {\n\t\t\tl.CurrentStartedRequests = currentSum\n\t\t\tl.Timestamp = timestamp\n\t\t\ts.LoadCache.Set(result.Metric.InferenceName, l)\n\t\t} else {\n\t\t\ts.LoadCache.Set(result.Metric.InferenceName, Load{\n\t\t\t\tCurrentStartedRequests: currentSum,\n\t\t\t\tTimestamp:              timestamp,\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc (s *Scaler) GetRestartMetrics() ([]*prom.TimeSeries, error) {\n\t// record this rule in prometheus\n\t// (sum by (pod,namespace) (increase(kube_pod_container_status_restarts_total{namespace=~\"modelz-(.*)\"}[10m])) > 2) * on (pod) group_left(inference_name) (label_join(label_replace(kube_pod_info{created_by_kind=\"ReplicaSet\",namespace=~\"modelz-(.*)\"}, \"inference\", \"$1\", \"created_by_name\", \"(.+)-.+\"), \"inference_name\",\".\",\"inference\",\"namespace\"))\n\tquery := \"pod_restart_count_over_2_10m\"\n\ttsList, err := s.PromQuery.Query(query, time.Now())\n\tif err != nil {\n\t\tlogrus.Infof(\"Error querying Prometheus: %s\\n\", err.Error())\n\t\treturn nil, err\n\t}\n\n\treturn tsList, nil\n}\n"
  },
  {
    "path": "autoscaler/pkg/autoscalerapp/root.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage autoscalerapp\n\nimport (\n\t\"time\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/sirupsen/logrus\"\n\tcli \"github.com/urfave/cli/v2\"\n\n\t\"github.com/tensorchord/openmodelz/autoscaler/pkg/autoscaler\"\n\t\"github.com/tensorchord/openmodelz/autoscaler/pkg/server\"\n\t\"github.com/tensorchord/openmodelz/autoscaler/pkg/version\"\n)\n\ntype EnvdServerApp struct {\n\t*cli.App\n}\n\nfunc New() EnvdServerApp {\n\tinternalApp := cli.NewApp()\n\tinternalApp.EnableBashCompletion = true\n\tinternalApp.Name = \"modelz-autoscaler\"\n\tinternalApp.Usage = \"Autoscaler for modelz serverless inference platform\"\n\tinternalApp.HideHelpCommand = true\n\tinternalApp.HideVersion = false\n\tinternalApp.Version = version.GetVersion().String()\n\tinternalApp.Flags = []cli.Flag{\n\t\t&cli.BoolFlag{\n\t\t\tName:  \"debug\",\n\t\t\tUsage: \"enable debug output in logs\",\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    \"gateway-host\",\n\t\t\tUsage:   \"host for gateway\",\n\t\t\tEnvVars: []string{\"MODELZ_GATEWAY_HOST\"},\n\t\t\tAliases: []string{\"gh\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    \"prometheus-host\",\n\t\t\tUsage:   \"host for prometheus\",\n\t\t\tValue:   \"prometheus\",\n\t\t\tEnvVars: []string{\"MODELZ_PROMETHEUS_HOST\"},\n\t\t\tAliases: []string{\"ph\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    \"prometheus-port\",\n\t\t\tUsage:   \"port for prometheus\",\n\t\t\tValue:   9090,\n\t\t\tEnvVars: []string{\"MODELZ_PROMETHEUS_PORT\"},\n\t\t\tAliases: []string{\"pp\"},\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName:    \"basic-auth\",\n\t\t\tUsage:   \"enable basic auth\",\n\t\t\tEnvVars: []string{\"MODELZ_BASIC_AUTH\"},\n\t\t\tAliases: []string{\"ba\"},\n\t\t\tValue:   true,\n\t\t},\n\t\t&cli.PathFlag{\n\t\t\tName:    \"secret-path\",\n\t\t\tUsage:   \"path to secrets\",\n\t\t\tValue:   \"/var/modelz/secrets\",\n\t\t\tEnvVars: []string{\"MODELZ_SECRET_PATH\"},\n\t\t\tAliases: []string{\"sp\"},\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName:    \"interval\",\n\t\t\tUsage:   \"interval for autoscaling\",\n\t\t\tValue:   time.Second,\n\t\t\tEnvVars: []string{\"MODELZ_INTERVAL\"},\n\t\t\tAliases: []string{\"i\"},\n\t\t},\n\t}\n\tinternalApp.Action = runServer\n\n\t// Deal with debug flag.\n\tvar debugEnabled bool\n\n\tinternalApp.Before = func(context *cli.Context) error {\n\t\tdebugEnabled = context.Bool(\"debug\")\n\n\t\tif debugEnabled {\n\t\t\tlogrus.SetReportCaller(true)\n\t\t\tlogrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true})\n\t\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t\t} else {\n\t\t\tlogrus.SetFormatter(&logrus.JSONFormatter{})\n\t\t}\n\n\t\treturn nil\n\t}\n\treturn EnvdServerApp{\n\t\tApp: internalApp,\n\t}\n}\n\nfunc runServer(clicontext *cli.Context) error {\n\topt := autoscaler.Opt{\n\t\tGatewayHost:      clicontext.String(\"gateway-host\"),\n\t\tPrometheusHost:   clicontext.String(\"prometheus-host\"),\n\t\tBasicAuthEnabled: clicontext.Bool(\"basic-auth\"),\n\t\tSecretPath:       clicontext.Path(\"secret-path\"),\n\t\tPrometheusPort:   clicontext.Int(\"prometheus-port\"),\n\t\tInterval:         clicontext.Duration(\"interval\"),\n\t}\n\n\tas, err := autoscaler.New(opt)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create autoscaler\")\n\t}\n\n\tlogrus.Info(\"starting system info server\")\n\tgo server.RunInfoServe()\n\n\tlogrus.Info(\"starting autoscaler\")\n\tas.AutoScale(opt.Interval)\n\treturn nil\n}\n"
  },
  {
    "path": "autoscaler/pkg/prom/prom.go",
    "content": "package prom\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/api\"\n\tpromapiv1 \"github.com/prometheus/client_golang/api/prometheus/v1\"\n\tprommodel \"github.com/prometheus/common/model\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// PrometheusQuery represents parameters for querying Prometheus\ntype PrometheusQuery struct {\n\tPort   int\n\tHost   string\n\tClient *http.Client\n}\n\ntype PrometheusQueryFetcher interface {\n\tFetch(query string) (*VectorQueryResponse, error)\n}\n\n// NewPrometheusQuery create a NewPrometheusQuery\nfunc NewPrometheusQuery(host string, port int, client *http.Client) PrometheusQuery {\n\treturn PrometheusQuery{\n\t\tClient: client,\n\t\tHost:   host,\n\t\tPort:   port,\n\t}\n}\n\n// Fetch queries aggregated stats\nfunc (q PrometheusQuery) Fetch(query string) (*VectorQueryResponse, error) {\n\n\treq, reqErr := http.NewRequest(http.MethodGet, fmt.Sprintf(\"http://%s:%d/api/v1/query?query=%s\", q.Host, q.Port, query), nil)\n\tif reqErr != nil {\n\t\treturn nil, reqErr\n\t}\n\n\tres, getErr := q.Client.Do(req)\n\tif getErr != nil {\n\t\treturn nil, getErr\n\t}\n\n\tif res.Body != nil {\n\t\tdefer res.Body.Close()\n\t}\n\n\tbytesOut, readErr := ioutil.ReadAll(res.Body)\n\tif readErr != nil {\n\t\treturn nil, readErr\n\t}\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"unexpected status code from Prometheus want: %d, got: %d, body: %s\", http.StatusOK, res.StatusCode, string(bytesOut))\n\t}\n\n\tvar values VectorQueryResponse\n\n\tunmarshalErr := json.Unmarshal(bytesOut, &values)\n\tif unmarshalErr != nil {\n\t\treturn nil, fmt.Errorf(\"error unmarshalling result: %s, '%s'\", unmarshalErr, string(bytesOut))\n\t}\n\n\treturn &values, nil\n}\n\n// TODO(xieydd) Refactor PrometheusQuery\n// Query queries Prometheus with given query string and time\nfunc (q PrometheusQuery) Query(query string, time time.Time) ([]*TimeSeries, error) {\n\tvar ts []*TimeSeries\n\tclient, err := api.NewClient(api.Config{\n\t\tAddress: fmt.Sprintf(\"http://%s:%d\", q.Host, q.Port),\n\t})\n\tif err != nil {\n\t\treturn ts, err\n\t}\n\n\tapi := promapiv1.NewAPI(client)\n\tresults, warnings, err := api.Query(context.TODO(), query, time)\n\tif len(warnings) != 0 {\n\t\tlogrus.Info(\"Prom query warnings\", \"warnings\", warnings)\n\t}\n\tif err != nil {\n\t\treturn ts, err\n\t}\n\tlogrus.Info(\"Prom query result\", \"result\", results.String(), \"resultsType\", results.Type())\n\treturn convertPromResultsToTimeSeries(results)\n}\n\nfunc convertPromResultsToTimeSeries(value prommodel.Value) ([]*TimeSeries, error) {\n\tvar results []*TimeSeries\n\ttypeValue := value.Type()\n\tswitch typeValue {\n\tcase prommodel.ValMatrix:\n\t\tif matrix, ok := value.(prommodel.Matrix); ok {\n\t\t\tfor _, sampleStream := range matrix {\n\t\t\t\tif sampleStream == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tts := NewTimeSeries()\n\t\t\t\tfor key, val := range sampleStream.Metric {\n\t\t\t\t\tts.AppendLabel(string(key), string(val))\n\t\t\t\t}\n\t\t\t\tfor _, pair := range sampleStream.Values {\n\t\t\t\t\tts.AppendSample(int64(pair.Timestamp/1000), float64(pair.Value))\n\t\t\t\t}\n\t\t\t\tresults = append(results, ts)\n\t\t\t}\n\t\t\treturn results, nil\n\t\t} else {\n\t\t\treturn results, fmt.Errorf(\"prometheus value type is %v, but assert failed\", typeValue)\n\t\t}\n\n\tcase prommodel.ValVector:\n\t\tif vector, ok := value.(prommodel.Vector); ok {\n\t\t\tfor _, sample := range vector {\n\t\t\t\tif sample == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tts := NewTimeSeries()\n\t\t\t\tfor key, val := range sample.Metric {\n\t\t\t\t\tts.AppendLabel(string(key), string(val))\n\t\t\t\t}\n\t\t\t\t// for vector, all the sample has the same timestamp. just one point for each metric\n\t\t\t\tts.AppendSample(int64(sample.Timestamp/1000), float64(sample.Value))\n\t\t\t\tresults = append(results, ts)\n\t\t\t}\n\t\t\treturn results, nil\n\t\t} else {\n\t\t\treturn results, fmt.Errorf(\"prometheus value type is %v, but assert failed\", typeValue)\n\t\t}\n\tcase prommodel.ValScalar:\n\t\treturn results, fmt.Errorf(\"not support for scalar when use timeseries\")\n\tcase prommodel.ValString:\n\t\treturn results, fmt.Errorf(\"not support for string when use timeseries\")\n\tcase prommodel.ValNone:\n\t\treturn results, fmt.Errorf(\"prometheus return value type is none\")\n\t}\n\treturn results, fmt.Errorf(\"prometheus return unknown model value type %v\", typeValue)\n}\n"
  },
  {
    "path": "autoscaler/pkg/prom/types.go",
    "content": "package prom\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n)\n\ntype VectorQueryResponse struct {\n\tData struct {\n\t\tResult []struct {\n\t\t\tMetric struct {\n\t\t\t\tInferenceName string `json:\"inference_name\"`\n\t\t\t}\n\t\t\tValue []interface{} `json:\"value\"`\n\t\t}\n\t}\n}\n\n// Ref: https://github.com/gocrane/crane/blob/9aaeb2aa9cf9f43a31842b4663e48bc47ac05f17/pkg/common/types.go\n// TimeSeries is a stream of samples that belong to a metric with a set of labels\ntype TimeSeries struct {\n\t// A collection of Labels that are attached by monitoring system as metadata\n\t// for the metrics, which are known as dimensions.\n\tLabels []Label\n\t// A collection of Samples in chronological order.\n\tSamples []Sample\n}\n\n// Sample pairs a Value with a Timestamp.\ntype Sample struct {\n\tValue     float64\n\tTimestamp int64\n}\n\n// A Label is a Name and Value pair that provides additional information about the metric.\n// It is metadata for the metric. For example, Kubernetes pod metrics always have\n// 'namespace' label that represents which namespace the pod belongs to.\ntype Label struct {\n\tName  string\n\tValue string\n}\n\nfunc (s *Sample) String() string {\n\treturn fmt.Sprintf(\"%d %f\", s.Timestamp, s.Value)\n}\n\nfunc (l *Label) String() string {\n\treturn l.Name + \"=\" + l.Value\n}\n\nfunc (ts *TimeSeries) SetLabels(labels []Label) {\n\tts.Labels = labels\n}\n\nfunc (ts *TimeSeries) SetSamples(samples []Sample) {\n\tts.Samples = samples\n}\n\nfunc (ts *TimeSeries) AppendLabel(key, val string) {\n\tts.Labels = append(ts.Labels, Label{key, val})\n}\n\nfunc (ts *TimeSeries) AppendSample(timestamp int64, val float64) {\n\tts.Samples = append(ts.Samples, Sample{Timestamp: timestamp, Value: val})\n}\n\nfunc (ts *TimeSeries) SortSampleAsc() {\n\tsort.Slice(ts.Samples, func(i, j int) bool {\n\t\tif ts.Samples[i].Timestamp < ts.Samples[j].Timestamp {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t})\n}\n\nfunc NewTimeSeries() *TimeSeries {\n\treturn &TimeSeries{\n\t\tLabels:  make([]Label, 0),\n\t\tSamples: make([]Sample, 0),\n\t}\n}\n"
  },
  {
    "path": "autoscaler/pkg/server/status.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage server\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/autoscaler/pkg/version\"\n)\n\nfunc getInfo(w http.ResponseWriter, r *http.Request) {\n\tscalerInfo := map[string]string{\"version\": version.GetEnvdVersion()}\n\tjsonOut, marshalErr := json.Marshal(scalerInfo)\n\tif marshalErr != nil {\n\t\tlogrus.Infof(\"Error during unmarshal of autoscaler info request %s\\n\", marshalErr.Error())\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(jsonOut)\n}\n\nfunc RunInfoServe() {\n\ttcpPort := 8080\n\n\tserverMux := http.NewServeMux()\n\tserverMux.HandleFunc(\"/system/info\", getInfo)\n\n\ts := &http.Server{\n\t\tAddr:           fmt.Sprintf(\":%d\", tcpPort),\n\t\tReadTimeout:    10 * time.Second,\n\t\tWriteTimeout:   10 * time.Second,\n\t\tMaxHeaderBytes: http.DefaultMaxHeaderBytes, // 1MB - can be overridden by setting Server.MaxHeaderBytes.\n\t\tHandler:        serverMux,\n\t}\n\ts.ListenAndServe()\n}\n"
  },
  {
    "path": "autoscaler/pkg/version/version.go",
    "content": "/*\n   Copyright The TensorChord Inc.\n   Copyright The BuildKit Authors.\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npackage version\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\t// Package is filled at linking time\n\tPackage = \"github.com/tensorchord/openmodelz/autoscaler\"\n\n\t// Revision is filled with the VCS (e.g. git) revision being used to build\n\t// the program at linking time.\n\tRevision = \"\"\n\n\tversion         = \"0.0.0+unknown\"\n\tbuildDate       = \"1970-01-01T00:00:00Z\" // output from `date -u +'%Y-%m-%dT%H:%M:%SZ'`\n\tgitCommit       = \"\"                     // output from `git rev-parse HEAD`\n\tgitTag          = \"\"                     // output from `git describe --exact-match --tags HEAD` (if clean tree state)\n\tgitTreeState    = \"\"                     // determined from `git status --porcelain`. either 'clean' or 'dirty'\n\tdevelopmentFlag = \"false\"\n)\n\n// Version contains envd version information\ntype Version struct {\n\tVersion      string\n\tBuildDate    string\n\tGitCommit    string\n\tGitTag       string\n\tGitTreeState string\n\tGoVersion    string\n\tCompiler     string\n\tPlatform     string\n}\n\nfunc (v Version) String() string {\n\treturn v.Version\n}\n\n// SetGitTagForE2ETest sets the gitTag for test purpose.\nfunc SetGitTagForE2ETest(tag string) {\n\tgitTag = tag\n}\n\n// GetEnvdVersion gets Envd version information\nfunc GetEnvdVersion() string {\n\tvar versionStr string\n\n\tif gitCommit != \"\" && gitTag != \"\" &&\n\t\tgitTreeState == \"clean\" && developmentFlag == \"false\" {\n\t\t// if we have a clean tree state and the current commit is tagged,\n\t\t// this is an official release.\n\t\tversionStr = gitTag\n\t} else {\n\t\t// otherwise formulate a version string based on as much metadata\n\t\t// information we have available.\n\t\tif strings.HasPrefix(version, \"v\") {\n\t\t\tversionStr = version\n\t\t} else {\n\t\t\tversionStr = \"v\" + version\n\t\t}\n\t\tif len(gitCommit) >= 7 {\n\t\t\tversionStr += \"+\" + gitCommit[0:7]\n\t\t\tif gitTreeState != \"clean\" {\n\t\t\t\tversionStr += \".dirty\"\n\t\t\t}\n\t\t} else {\n\t\t\tversionStr += \"+unknown\"\n\t\t}\n\t}\n\treturn versionStr\n}\n\n// GetVersion returns the version information\nfunc GetVersion() Version {\n\treturn Version{\n\t\tVersion:      GetEnvdVersion(),\n\t\tBuildDate:    buildDate,\n\t\tGitCommit:    gitCommit,\n\t\tGitTag:       gitTag,\n\t\tGitTreeState: gitTreeState,\n\t\tGoVersion:    runtime.Version(),\n\t\tCompiler:     runtime.Compiler,\n\t\tPlatform:     fmt.Sprintf(\"%s/%s\", runtime.GOOS, runtime.GOARCH),\n\t}\n}\n\nvar (\n\treRelease *regexp.Regexp\n\treDev     *regexp.Regexp\n\treOnce    sync.Once\n)\n\nfunc UserAgent() string {\n\tversion := GetVersion().String()\n\n\treOnce.Do(func() {\n\t\treRelease = regexp.MustCompile(`^(v[0-9]+\\.[0-9]+)\\.[0-9]+$`)\n\t\treDev = regexp.MustCompile(`^(v[0-9]+\\.[0-9]+)\\.[0-9]+`)\n\t})\n\n\tif matches := reRelease.FindAllStringSubmatch(version, 1); len(matches) > 0 {\n\t\tversion = matches[0][1]\n\t} else if matches := reDev.FindAllStringSubmatch(version, 1); len(matches) > 0 {\n\t\tversion = matches[0][1] + \"-dev\"\n\t}\n\n\treturn \"envd/\" + version\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/tensorchord/openmodelz\n\ngo 1.20\n\nreplace (\n\tgithub.com/anthhub/forwarder => github.com/tensorchord/forwarder v0.0.0-20230713171536-b1b52b398d3a\n\tgithub.com/senthilrch/kube-fledged v0.10.0 => github.com/tensorchord/kube-fledged v0.2.0\n)\n\nrequire (\n\tgithub.com/anthhub/forwarder v1.1.0\n\tgithub.com/cockroachdb/errors v1.10.0\n\tgithub.com/dchest/uniuri v1.2.0\n\tgithub.com/dgraph-io/ristretto v0.1.1\n\tgithub.com/docker/docker v23.0.2+incompatible\n\tgithub.com/docker/go-connections v0.4.0\n\tgithub.com/dustinkirkland/golang-petname v0.0.0-20230626224747-e794b9370d49\n\tgithub.com/gin-contrib/cors v1.4.0\n\tgithub.com/gin-gonic/gin v1.9.1\n\tgithub.com/golang/mock v1.5.0\n\tgithub.com/google/go-cmp v0.5.9\n\tgithub.com/google/uuid v1.3.0\n\tgithub.com/gorilla/websocket v1.4.2\n\tgithub.com/jackc/pgconn v1.14.1\n\tgithub.com/jackc/pgx/v4 v4.18.1\n\tgithub.com/jedib0t/go-pretty/v6 v6.4.6\n\tgithub.com/moby/moby v24.0.4+incompatible\n\tgithub.com/moby/term v0.5.0\n\tgithub.com/onsi/ginkgo/v2 v2.11.0\n\tgithub.com/onsi/gomega v1.27.8\n\tgithub.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5\n\tgithub.com/pkg/errors v0.9.1\n\tgithub.com/prometheus/client_golang v1.16.0\n\tgithub.com/prometheus/common v0.44.0\n\tgithub.com/segmentio/analytics-go/v3 v3.2.1\n\tgithub.com/senthilrch/kube-fledged v0.10.0\n\tgithub.com/sirupsen/logrus v1.9.3\n\tgithub.com/spf13/cobra v1.7.0\n\tgithub.com/swaggo/files v1.0.1\n\tgithub.com/swaggo/gin-swagger v1.6.0\n\tgithub.com/swaggo/swag v1.8.12\n\tgithub.com/toorop/gin-logrus v0.0.0-20210225092905-2c785434f26f\n\tgithub.com/urfave/cli/v2 v2.3.0\n\tgolang.org/x/net v0.14.0\n\tgolang.org/x/term v0.11.0\n\tk8s.io/api v0.27.4\n\tk8s.io/apimachinery v0.27.4\n\tk8s.io/client-go v0.27.4\n\tk8s.io/code-generator v0.27.4\n\tk8s.io/klog v1.0.0\n)\n\nrequire (\n\tgithub.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect\n\tgithub.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect\n)\n\nrequire (\n\tgithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect\n\tgithub.com/KyleBanks/depth v1.2.1 // indirect\n\tgithub.com/Microsoft/go-winio v0.6.1 // indirect\n\tgithub.com/beorn7/perks v1.0.1 // indirect\n\tgithub.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect\n\tgithub.com/bytedance/sonic v1.9.1 // indirect\n\tgithub.com/cespare/xxhash/v2 v2.2.0 // indirect\n\tgithub.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect\n\tgithub.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect\n\tgithub.com/cockroachdb/redact v1.1.5 // indirect\n\tgithub.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect\n\tgithub.com/davecgh/go-spew v1.1.1 // indirect\n\tgithub.com/docker/distribution v2.8.2+incompatible // indirect\n\tgithub.com/docker/go-units v0.5.0 // indirect\n\tgithub.com/dustin/go-humanize v1.0.0 // indirect\n\tgithub.com/emicklei/go-restful/v3 v3.9.0 // indirect\n\tgithub.com/evanphx/json-patch v5.6.0+incompatible // indirect\n\tgithub.com/gabriel-vasile/mimetype v1.4.2 // indirect\n\tgithub.com/getsentry/sentry-go v0.18.0 // indirect\n\tgithub.com/gin-contrib/sse v0.1.0 // indirect\n\tgithub.com/go-errors/errors v1.4.2 // indirect\n\tgithub.com/go-logr/logr v1.2.4 // indirect\n\tgithub.com/go-openapi/jsonpointer v0.19.6 // indirect\n\tgithub.com/go-openapi/jsonreference v0.20.1 // indirect\n\tgithub.com/go-openapi/spec v0.20.8 // indirect\n\tgithub.com/go-openapi/swag v0.22.3 // indirect\n\tgithub.com/go-playground/locales v0.14.1 // indirect\n\tgithub.com/go-playground/universal-translator v0.18.1 // indirect\n\tgithub.com/go-playground/validator/v10 v10.14.0 // indirect\n\tgithub.com/goccy/go-json v0.10.2 // indirect\n\tgithub.com/gogo/protobuf v1.3.2 // indirect\n\tgithub.com/golang/glog v1.0.0 // indirect\n\tgithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect\n\tgithub.com/golang/protobuf v1.5.3 // indirect\n\tgithub.com/google/btree v1.1.2 // indirect\n\tgithub.com/google/gnostic v0.6.9 // indirect\n\tgithub.com/google/gofuzz v1.2.0 // indirect\n\tgithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect\n\tgithub.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect\n\tgithub.com/imdario/mergo v0.3.13 // indirect\n\tgithub.com/inconshreveable/mousetrap v1.1.0 // indirect\n\tgithub.com/jackc/chunkreader/v2 v2.0.1 // indirect\n\tgithub.com/jackc/pgio v1.0.0 // indirect\n\tgithub.com/jackc/pgpassfile v1.0.0 // indirect\n\tgithub.com/jackc/pgproto3/v2 v2.3.2 // indirect\n\tgithub.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect\n\tgithub.com/jackc/pgtype v1.14.0 // indirect\n\tgithub.com/jackc/puddle v1.3.0 // indirect\n\tgithub.com/josharian/intern v1.0.0 // indirect\n\tgithub.com/json-iterator/go v1.1.12 // indirect\n\tgithub.com/klauspost/cpuid/v2 v2.2.4 // indirect\n\tgithub.com/kr/pretty v0.3.1 // indirect\n\tgithub.com/kr/text v0.2.0 // indirect\n\tgithub.com/leodido/go-urn v1.2.4 // indirect\n\tgithub.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect\n\tgithub.com/mailru/easyjson v0.7.7 // indirect\n\tgithub.com/mattn/go-isatty v0.0.19 // indirect\n\tgithub.com/mattn/go-runewidth v0.0.14 // indirect\n\tgithub.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect\n\tgithub.com/moby/spdystream v0.2.0 // indirect\n\tgithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect\n\tgithub.com/modern-go/reflect2 v1.0.2 // indirect\n\tgithub.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect\n\tgithub.com/morikuni/aec v1.0.0 // indirect\n\tgithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect\n\tgithub.com/opencontainers/go-digest v1.0.0 // indirect\n\tgithub.com/opencontainers/image-spec v1.1.0-rc2 // indirect\n\tgithub.com/pelletier/go-toml/v2 v2.0.8 // indirect\n\tgithub.com/peterbourgon/diskv v2.0.1+incompatible // indirect\n\tgithub.com/prometheus/client_model v0.4.0 // indirect\n\tgithub.com/prometheus/procfs v0.10.1 // indirect\n\tgithub.com/rancher/remotedialer v0.3.0\n\tgithub.com/rivo/uniseg v0.4.2 // indirect\n\tgithub.com/rogpeppe/go-internal v1.11.0 // indirect\n\tgithub.com/russross/blackfriday/v2 v2.1.0 // indirect\n\tgithub.com/segmentio/backo-go v1.0.0 // indirect\n\tgithub.com/spf13/pflag v1.0.5 // indirect\n\tgithub.com/twitchyliquid64/golang-asm v0.15.1 // indirect\n\tgithub.com/ugorji/go/codec v1.2.11 // indirect\n\tgithub.com/xlab/treeprint v1.1.0 // indirect\n\tgo.starlark.net v0.0.0-20221019144234-6ce4ce37fe55 // indirect\n\tgolang.org/x/arch v0.3.0 // indirect\n\tgolang.org/x/crypto v0.12.0 // indirect\n\tgolang.org/x/mod v0.10.0 // indirect\n\tgolang.org/x/oauth2 v0.8.0 // indirect\n\tgolang.org/x/sync v0.2.0 // indirect\n\tgolang.org/x/sys v0.11.0 // indirect\n\tgolang.org/x/text v0.12.0 // indirect\n\tgolang.org/x/time v0.1.0 // indirect\n\tgolang.org/x/tools v0.9.3 // indirect\n\tgoogle.golang.org/appengine v1.6.7 // indirect\n\tgoogle.golang.org/protobuf v1.30.0 // indirect\n\tgopkg.in/inf.v0 v0.9.1 // indirect\n\tgopkg.in/yaml.v2 v2.4.0 // indirect\n\tgopkg.in/yaml.v3 v3.0.1 // indirect\n\tgotest.tools/v3 v3.5.0 // indirect\n\tk8s.io/cli-runtime v0.27.4 // indirect\n\tk8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect\n\tk8s.io/klog/v2 v2.90.1 // indirect\n\tk8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect\n\tk8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect\n\tsigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect\n\tsigs.k8s.io/kustomize/api v0.13.2 // indirect\n\tsigs.k8s.io/kustomize/kyaml v0.14.1 // indirect\n\tsigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect\n\tsigs.k8s.io/yaml v1.3.0 // indirect\n)\n"
  },
  {
    "path": "go.sum",
    "content": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=\ncloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=\ncloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=\ncloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=\ncloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=\ncloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=\ncloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=\ncloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=\ncloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=\ncloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=\ncloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=\ncloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=\ncloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=\ncloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=\ncloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=\ncloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=\ncloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=\ncloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=\ncloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=\ncloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=\ncloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=\ncloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=\ncloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=\ncloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=\ncloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=\ncloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=\ncloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=\ncloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=\ncloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=\ncloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=\ncloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=\ncloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=\ncloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=\ncloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=\ncloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=\ncloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=\ndmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=\ngithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=\ngithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=\ngithub.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=\ngithub.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=\ngithub.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=\ngithub.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=\ngithub.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=\ngithub.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=\ngithub.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=\ngithub.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=\ngithub.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=\ngithub.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=\ngithub.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=\ngithub.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=\ngithub.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=\ngithub.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=\ngithub.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=\ngithub.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=\ngithub.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=\ngithub.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=\ngithub.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=\ngithub.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=\ngithub.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=\ngithub.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=\ngithub.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=\ngithub.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=\ngithub.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=\ngithub.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=\ngithub.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=\ngithub.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=\ngithub.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=\ngithub.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=\ngithub.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=\ngithub.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=\ngithub.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=\ngithub.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=\ngithub.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=\ngithub.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=\ngithub.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=\ngithub.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=\ngithub.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=\ngithub.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=\ngithub.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=\ngithub.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=\ngithub.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=\ngithub.com/cockroachdb/errors v1.10.0 h1:lfxS8zZz1+OjtV4MtNWgboi/W5tyLEB6VQZBXN+0VUU=\ngithub.com/cockroachdb/errors v1.10.0/go.mod h1:lknhIsEVQ9Ss/qKDBQS/UqFSvPQjOwNq2qyKAxtHRqE=\ngithub.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=\ngithub.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=\ngithub.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=\ngithub.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=\ngithub.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=\ngithub.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=\ngithub.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=\ngithub.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=\ngithub.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=\ngithub.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=\ngithub.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=\ngithub.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=\ngithub.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/dchest/uniuri v1.2.0 h1:koIcOUdrTIivZgSLhHQvKgqdWZq5d7KdMEWF1Ud6+5g=\ngithub.com/dchest/uniuri v1.2.0/go.mod h1:fSzm4SLHzNZvWLvWJew423PhAzkpNQYq+uNLq4kxhkY=\ngithub.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=\ngithub.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=\ngithub.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=\ngithub.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=\ngithub.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=\ngithub.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=\ngithub.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=\ngithub.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=\ngithub.com/docker/docker v23.0.2+incompatible h1:q81C2qQ/EhPm8COZMUGOQYh4qLv4Xu6CXELJ3WK/mlU=\ngithub.com/docker/docker v23.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=\ngithub.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=\ngithub.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=\ngithub.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=\ngithub.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=\ngithub.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=\ngithub.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=\ngithub.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=\ngithub.com/dustinkirkland/golang-petname v0.0.0-20230626224747-e794b9370d49 h1:6SNWi8VxQeCSwmLuTbEvJd7xvPmdS//zvMBWweZLgck=\ngithub.com/dustinkirkland/golang-petname v0.0.0-20230626224747-e794b9370d49/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8=\ngithub.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=\ngithub.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=\ngithub.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=\ngithub.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=\ngithub.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=\ngithub.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=\ngithub.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=\ngithub.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=\ngithub.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=\ngithub.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=\ngithub.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=\ngithub.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=\ngithub.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=\ngithub.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=\ngithub.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=\ngithub.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=\ngithub.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=\ngithub.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=\ngithub.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=\ngithub.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=\ngithub.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=\ngithub.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=\ngithub.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=\ngithub.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=\ngithub.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g=\ngithub.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs=\ngithub.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=\ngithub.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=\ngithub.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=\ngithub.com/gin-gonic/gin v1.7.3/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=\ngithub.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=\ngithub.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=\ngithub.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=\ngithub.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=\ngithub.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=\ngithub.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=\ngithub.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=\ngithub.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=\ngithub.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=\ngithub.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=\ngithub.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=\ngithub.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=\ngithub.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=\ngithub.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=\ngithub.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=\ngithub.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=\ngithub.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=\ngithub.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=\ngithub.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=\ngithub.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=\ngithub.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=\ngithub.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=\ngithub.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=\ngithub.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=\ngithub.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=\ngithub.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=\ngithub.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=\ngithub.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU=\ngithub.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=\ngithub.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=\ngithub.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=\ngithub.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=\ngithub.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=\ngithub.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=\ngithub.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=\ngithub.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=\ngithub.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=\ngithub.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=\ngithub.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=\ngithub.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=\ngithub.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=\ngithub.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=\ngithub.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=\ngithub.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=\ngithub.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=\ngithub.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=\ngithub.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=\ngithub.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=\ngithub.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=\ngithub.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=\ngithub.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=\ngithub.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=\ngithub.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=\ngithub.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=\ngithub.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=\ngithub.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=\ngithub.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=\ngithub.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=\ngithub.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=\ngithub.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=\ngithub.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=\ngithub.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=\ngithub.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=\ngithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=\ngithub.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=\ngithub.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g=\ngithub.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=\ngithub.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=\ngithub.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=\ngithub.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=\ngithub.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=\ngithub.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=\ngithub.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=\ngithub.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=\ngithub.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=\ngithub.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=\ngithub.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=\ngithub.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=\ngithub.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=\ngithub.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=\ngithub.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=\ngithub.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0=\ngithub.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=\ngithub.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=\ngithub.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=\ngithub.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=\ngithub.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=\ngithub.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=\ngithub.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=\ngithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=\ngithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=\ngithub.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=\ngithub.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=\ngithub.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=\ngithub.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=\ngithub.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=\ngithub.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=\ngithub.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=\ngithub.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=\ngithub.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=\ngithub.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=\ngithub.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=\ngithub.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=\ngithub.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=\ngithub.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=\ngithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=\ngithub.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=\ngithub.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=\ngithub.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=\ngithub.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=\ngithub.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=\ngithub.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=\ngithub.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=\ngithub.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=\ngithub.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=\ngithub.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=\ngithub.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=\ngithub.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=\ngithub.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=\ngithub.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=\ngithub.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=\ngithub.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=\ngithub.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=\ngithub.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=\ngithub.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=\ngithub.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=\ngithub.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=\ngithub.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=\ngithub.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=\ngithub.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=\ngithub.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=\ngithub.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=\ngithub.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=\ngithub.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=\ngithub.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=\ngithub.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=\ngithub.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=\ngithub.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=\ngithub.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=\ngithub.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=\ngithub.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=\ngithub.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=\ngithub.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E=\ngithub.com/jackc/pgconn v1.14.1 h1:smbxIaZA08n6YuxEX1sDyjV/qkbtUtkH20qLkR9MUR4=\ngithub.com/jackc/pgconn v1.14.1/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E=\ngithub.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=\ngithub.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=\ngithub.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=\ngithub.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=\ngithub.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=\ngithub.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=\ngithub.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=\ngithub.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=\ngithub.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=\ngithub.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=\ngithub.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=\ngithub.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=\ngithub.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=\ngithub.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=\ngithub.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=\ngithub.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0=\ngithub.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=\ngithub.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=\ngithub.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=\ngithub.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=\ngithub.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=\ngithub.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=\ngithub.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=\ngithub.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=\ngithub.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw=\ngithub.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=\ngithub.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=\ngithub.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=\ngithub.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=\ngithub.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=\ngithub.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0=\ngithub.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE=\ngithub.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0=\ngithub.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jedib0t/go-pretty/v6 v6.4.6 h1:v6aG9h6Uby3IusSSEjHaZNXpHFhzqMmjXcPq1Rjl9Jw=\ngithub.com/jedib0t/go-pretty/v6 v6.4.6/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs=\ngithub.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=\ngithub.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=\ngithub.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=\ngithub.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=\ngithub.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=\ngithub.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=\ngithub.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=\ngithub.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=\ngithub.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=\ngithub.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=\ngithub.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=\ngithub.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=\ngithub.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=\ngithub.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=\ngithub.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=\ngithub.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=\ngithub.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=\ngithub.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=\ngithub.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=\ngithub.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=\ngithub.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=\ngithub.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=\ngithub.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=\ngithub.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=\ngithub.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=\ngithub.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=\ngithub.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=\ngithub.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=\ngithub.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=\ngithub.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=\ngithub.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=\ngithub.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=\ngithub.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=\ngithub.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=\ngithub.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=\ngithub.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=\ngithub.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=\ngithub.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=\ngithub.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=\ngithub.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=\ngithub.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=\ngithub.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=\ngithub.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=\ngithub.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=\ngithub.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=\ngithub.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=\ngithub.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=\ngithub.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=\ngithub.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=\ngithub.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=\ngithub.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=\ngithub.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=\ngithub.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=\ngithub.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=\ngithub.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=\ngithub.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=\ngithub.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=\ngithub.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=\ngithub.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=\ngithub.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=\ngithub.com/moby/moby v24.0.4+incompatible h1:20Bf1sfJpspHMAUrxRFplG31Sriaw7Z9/jUEuJk6mqI=\ngithub.com/moby/moby v24.0.4+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=\ngithub.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=\ngithub.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=\ngithub.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=\ngithub.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=\ngithub.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=\ngithub.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=\ngithub.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=\ngithub.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=\ngithub.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=\ngithub.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=\ngithub.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=\ngithub.com/namsral/flag v1.7.4-pre h1:b2ScHhoCUkbsq0d2C15Mv+VU8bl8hAXV8arnWiOHNZs=\ngithub.com/namsral/flag v1.7.4-pre/go.mod h1:OXldTctbM6SWH1K899kPZcf65KxJiD7MsceFUpB5yDo=\ngithub.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=\ngithub.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=\ngithub.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=\ngithub.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=\ngithub.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=\ngithub.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=\ngithub.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=\ngithub.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=\ngithub.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=\ngithub.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=\ngithub.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc=\ngithub.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=\ngithub.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=\ngithub.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=\ngithub.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034=\ngithub.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=\ngithub.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=\ngithub.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=\ngithub.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=\ngithub.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=\ngithub.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=\ngithub.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=\ngithub.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=\ngithub.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=\ngithub.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=\ngithub.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=\ngithub.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=\ngithub.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=\ngithub.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=\ngithub.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=\ngithub.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=\ngithub.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=\ngithub.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=\ngithub.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=\ngithub.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=\ngithub.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=\ngithub.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=\ngithub.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=\ngithub.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=\ngithub.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=\ngithub.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=\ngithub.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=\ngithub.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=\ngithub.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=\ngithub.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=\ngithub.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=\ngithub.com/rancher/remotedialer v0.3.0 h1:y1EO8JCsgZo0RcqTUp6U8FXcBAv27R+TLnWRcpvX1sM=\ngithub.com/rancher/remotedialer v0.3.0/go.mod h1:BwwztuvViX2JrLLUwDlsYt5DiyUwHLlzynRwkZLAY0Q=\ngithub.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=\ngithub.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8=\ngithub.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=\ngithub.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=\ngithub.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=\ngithub.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=\ngithub.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=\ngithub.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=\ngithub.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=\ngithub.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=\ngithub.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=\ngithub.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=\ngithub.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=\ngithub.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=\ngithub.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=\ngithub.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=\ngithub.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=\ngithub.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=\ngithub.com/segmentio/analytics-go/v3 v3.2.1 h1:G+f90zxtc1p9G+WigVyTR0xNfOghOGs/PYAlljLOyeg=\ngithub.com/segmentio/analytics-go/v3 v3.2.1/go.mod h1:p8owAF8X+5o27jmvUognuXxdtqvSGtD0ZrfY2kcS9bE=\ngithub.com/segmentio/backo-go v1.0.0 h1:kbOAtGJY2DqOR0jfRkYEorx/b18RgtepGtY3+Cpe6qA=\ngithub.com/segmentio/backo-go v1.0.0/go.mod h1:kJ9mm9YmoWSkk+oQ+5Cj8DEoRCX2JT6As4kEtIIOp1M=\ngithub.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=\ngithub.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=\ngithub.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=\ngithub.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=\ngithub.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=\ngithub.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=\ngithub.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=\ngithub.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=\ngithub.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=\ngithub.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=\ngithub.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=\ngithub.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=\ngithub.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=\ngithub.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=\ngithub.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=\ngithub.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=\ngithub.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=\ngithub.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=\ngithub.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=\ngithub.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=\ngithub.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=\ngithub.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=\ngithub.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=\ngithub.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=\ngithub.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=\ngithub.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=\ngithub.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=\ngithub.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=\ngithub.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=\ngithub.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=\ngithub.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=\ngithub.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=\ngithub.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=\ngithub.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=\ngithub.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=\ngithub.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=\ngithub.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE=\ngithub.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg=\ngithub.com/swaggo/gin-swagger v1.6.0 h1:y8sxvQ3E20/RCyrXeFfg60r6H0Z+SwpTjMYsMm+zy8M=\ngithub.com/swaggo/gin-swagger v1.6.0/go.mod h1:BG00cCEy294xtVpyIAHG6+e2Qzj/xKlRdOqDkvq0uzo=\ngithub.com/swaggo/swag v1.8.12 h1:pctzkNPu0AlQP2royqX3apjKCQonAnf7KGoxeO4y64w=\ngithub.com/swaggo/swag v1.8.12/go.mod h1:lNfm6Gg+oAq3zRJQNEMBE66LIJKM44mxFqhEEgy2its=\ngithub.com/tensorchord/forwarder v0.0.0-20230713171536-b1b52b398d3a h1:q4GoeuagHfbdl7JGSU0AcArYXnsA3p2+dzBdx7AZHP0=\ngithub.com/tensorchord/forwarder v0.0.0-20230713171536-b1b52b398d3a/go.mod h1:PfpNmyy0g95SWDoSxXH5MPAlFJ9S04w7cBmIMS6U89U=\ngithub.com/tensorchord/kube-fledged v0.2.0 h1:wNJNcot0/CxLRtdnG34/EVwVI8WgLoJ2uxAfiUtVfNg=\ngithub.com/tensorchord/kube-fledged v0.2.0/go.mod h1:m4ncbmr05mQDRdJAl+UuCcgf6cpipU333En8lbob2u4=\ngithub.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=\ngithub.com/toorop/gin-logrus v0.0.0-20210225092905-2c785434f26f h1:oqdnd6OGlOUu1InG37hWcCB3a+Jy3fwjylyVboaNMwY=\ngithub.com/toorop/gin-logrus v0.0.0-20210225092905-2c785434f26f/go.mod h1:X3Dd1SB8Gt1V968NTzpKFjMM6O8ccta2NPC6MprOxZQ=\ngithub.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=\ngithub.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=\ngithub.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=\ngithub.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=\ngithub.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=\ngithub.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=\ngithub.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=\ngithub.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=\ngithub.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=\ngithub.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=\ngithub.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=\ngithub.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=\ngithub.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=\ngithub.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=\ngithub.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=\ngithub.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=\ngithub.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk=\ngithub.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=\ngithub.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=\ngithub.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=\ngithub.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=\ngithub.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=\ngo.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=\ngo.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=\ngo.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=\ngo.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=\ngo.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=\ngo.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=\ngo.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=\ngo.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=\ngo.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=\ngo.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=\ngo.starlark.net v0.0.0-20221019144234-6ce4ce37fe55 h1:UETCDFV7xVE6L29SnwA1vzkJEYGwffjjmxURPkstP6A=\ngo.starlark.net v0.0.0-20221019144234-6ce4ce37fe55/go.mod h1:kIVgS18CjmEC3PqMd5kaJSGEifyV/CeB9x506ZJ1Vbk=\ngo.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=\ngo.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=\ngo.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=\ngo.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=\ngo.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=\ngo.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=\ngo.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=\ngo.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=\ngo.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=\ngo.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=\ngo.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=\ngo.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=\ngo.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=\ngo.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=\ngolang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=\ngolang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=\ngolang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=\ngolang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=\ngolang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=\ngolang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=\ngolang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=\ngolang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=\ngolang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=\ngolang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=\ngolang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=\ngolang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=\ngolang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=\ngolang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=\ngolang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=\ngolang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=\ngolang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=\ngolang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=\ngolang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=\ngolang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=\ngolang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=\ngolang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=\ngolang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=\ngolang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=\ngolang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=\ngolang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=\ngolang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=\ngolang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=\ngolang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=\ngolang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=\ngolang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=\ngolang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=\ngolang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=\ngolang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=\ngolang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=\ngolang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=\ngolang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=\ngolang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=\ngolang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=\ngolang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=\ngolang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=\ngolang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=\ngolang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=\ngolang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=\ngolang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=\ngolang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=\ngolang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=\ngolang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=\ngolang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngoogle.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=\ngoogle.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=\ngoogle.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=\ngoogle.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=\ngoogle.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=\ngoogle.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=\ngoogle.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=\ngoogle.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=\ngoogle.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=\ngoogle.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=\ngoogle.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=\ngoogle.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=\ngoogle.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=\ngoogle.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=\ngoogle.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=\ngoogle.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=\ngoogle.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=\ngoogle.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=\ngoogle.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=\ngoogle.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=\ngoogle.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=\ngoogle.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=\ngoogle.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=\ngoogle.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=\ngoogle.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=\ngoogle.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=\ngoogle.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=\ngoogle.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=\ngoogle.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=\ngoogle.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=\ngoogle.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=\ngoogle.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=\ngoogle.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=\ngoogle.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=\ngoogle.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=\ngoogle.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=\ngoogle.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=\ngoogle.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=\ngoogle.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=\ngoogle.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=\ngoogle.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=\ngoogle.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=\ngoogle.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=\ngoogle.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=\ngoogle.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=\ngoogle.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=\ngoogle.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=\ngoogle.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=\ngoogle.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=\ngopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=\ngopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=\ngopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=\ngopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=\ngopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=\ngopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=\ngopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=\ngopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=\ngopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=\ngopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=\ngotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=\nhonnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=\nhonnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=\nk8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=\nk8s.io/api v0.27.4 h1:0pCo/AN9hONazBKlNUdhQymmnfLRbSZjd5H5H3f0bSs=\nk8s.io/api v0.27.4/go.mod h1:O3smaaX15NfxjzILfiln1D8Z3+gEYpjEpiNA/1EVK1Y=\nk8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=\nk8s.io/apimachinery v0.27.4 h1:CdxflD4AF61yewuid0fLl6bM4a3q04jWel0IlP+aYjs=\nk8s.io/apimachinery v0.27.4/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=\nk8s.io/cli-runtime v0.23.5/go.mod h1:oY6QDF2qo9xndSq32tqcmRp2UyXssdGrLfjAVymgbx4=\nk8s.io/cli-runtime v0.27.4 h1:Zb0eci+58eHZNnoHhjRFc7W88s8dlG12VtIl3Nv2Hto=\nk8s.io/cli-runtime v0.27.4/go.mod h1:k9Z1xiZq2xNplQmehpDquLgc+rE+pubpO1cK4al4Mlw=\nk8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=\nk8s.io/client-go v0.27.4 h1:vj2YTtSJ6J4KxaC88P4pMPEQECWMY8gqPqsTgUKzvjk=\nk8s.io/client-go v0.27.4/go.mod h1:ragcly7lUlN0SRPk5/ZkGnDjPknzb37TICq07WhI6Xc=\nk8s.io/code-generator v0.27.4 h1:bw2xFEBnthhCSC7Bt6FFHhPTfWX21IJ30GXxOzywsFE=\nk8s.io/code-generator v0.27.4/go.mod h1:DPung1sI5vBgn4AGKtlPRQAyagj/ir/4jI55ipZHVww=\nk8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=\nk8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=\nk8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=\nk8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=\nk8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=\nk8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=\nk8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=\nk8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=\nk8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=\nk8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=\nk8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=\nk8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=\nk8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=\nk8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=\nk8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=\nk8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=\nk8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=\nk8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY=\nk8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=\nrsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=\nrsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=\nrsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=\nrsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=\nsigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=\nsigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=\nsigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=\nsigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8=\nsigs.k8s.io/kustomize/api v0.13.2 h1:kejWfLeJhUsTGioDoFNJET5LQe/ajzXhJGYoU+pJsiA=\nsigs.k8s.io/kustomize/api v0.13.2/go.mod h1:DUp325VVMFVcQSq+ZxyDisA8wtldwHxLZbr1g94UHsw=\nsigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E=\nsigs.k8s.io/kustomize/kyaml v0.14.1 h1:c8iibius7l24G2wVAGZn/Va2wNys03GXLjYVIcFVxKA=\nsigs.k8s.io/kustomize/kyaml v0.14.1/go.mod h1:AN1/IpawKilWD7V+YvQwRGUvuUOOWpjsHu6uHwonSF4=\nsigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=\nsigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=\nsigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=\nsigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=\nsigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=\nsigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=\nsigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=\n"
  },
  {
    "path": "ingress-operator/.DEREK.yml",
    "content": "redirect: https://raw.githubusercontent.com/openfaas/faas/master/.DEREK.yml\n"
  },
  {
    "path": "ingress-operator/.dockerignore",
    "content": ".git\n.github\n.vscode\n.tools\nartifacts\nexamples\nhack"
  },
  {
    "path": "ingress-operator/.gitignore",
    "content": "faas-o6s\n\n# Binaries for programs and plugins\n*.exe\n*.dll\n*.so\n*.dylib\n\n# Test binary, build with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736\n.glide/\n.idea/\n\nbin/\npassword.txt\nfaas-netes/**\ntest.yaml\ncluster.yaml\n"
  },
  {
    "path": "ingress-operator/.tools/README.md",
    "content": "# Tools folder\n\nThe tools folder allows us a space to define and pin external dependencies. If they are go based tools we can create individual `mod` files that allow us to download or install these tools independent of the main package.\n\n## Tools\n\n1. `k8s.io/code-gen` this package needs to be _downloaded_ not installed. But we can not use the copy created in the `vendor` folder because vendor does not make a complete clone, it only keeps the Go files, and the code-gen project has several bash scripts that we need to reference.\nThe main project `Makefile` will attempt to keep the `code-generator.mod` file in sync with the `go.mod`. It should not need to be manually edited, but it does need to be committed."
  },
  {
    "path": "ingress-operator/.tools/code-generator.mod",
    "content": "module _ // Fake module so that we can install code-generator separate from the project\n\ngo 1.16\n\nrequire (\n    k8s.io/code-generator v0.21.3\n)"
  },
  {
    "path": "ingress-operator/.tools/code-generator.sum",
    "content": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=\ngithub.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=\ngithub.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=\ngithub.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=\ngithub.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=\ngithub.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=\ngithub.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=\ngithub.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=\ngithub.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=\ngithub.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=\ngithub.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=\ngithub.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=\ngithub.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=\ngithub.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=\ngithub.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=\ngithub.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=\ngithub.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=\ngithub.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=\ngithub.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=\ngithub.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=\ngithub.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=\ngithub.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=\ngithub.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=\ngithub.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=\ngithub.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=\ngithub.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=\ngithub.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=\ngithub.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=\ngithub.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=\ngithub.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=\ngithub.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=\ngithub.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=\ngithub.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=\ngoogle.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=\ngoogle.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=\ngoogle.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=\ngoogle.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=\ngoogle.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=\ngopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nk8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo=\nk8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=\nk8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=\nk8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=\nk8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=\nk8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=\nk8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=\nsigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=\nsigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=\nsigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=\n"
  },
  {
    "path": "ingress-operator/.vscode/settings.json",
    "content": "{\n    \"go.inferGopath\": false,\n    \"cSpell.words\": [\n        \"Infof\",\n        \"appsv\",\n        \"corev\",\n        \"faas\",\n        \"faasclientset\",\n        \"faasscheme\",\n        \"faasv\",\n        \"handler\",\n        \"klog\",\n        \"kube\",\n        \"kubeclientset\",\n        \"kubeconfig\",\n        \"kubeinformers\",\n        \"logtostderr\",\n        \"metav\",\n        \"netv\",\n        \"networkingv\",\n        \"sync\",\n        \"syncer\",\n        \"threadiness\",\n        \"traefik\"\n    ]\n}"
  },
  {
    "path": "ingress-operator/Dockerfile",
    "content": "FROM ubuntu:22.04\n\nLABEL maintainer=\"modelz-support@tensorchord.ai\"\n\nCOPY ingress-operator /usr/bin/ingress-operator\nENTRYPOINT [\"/usr/bin/ingress-operator\"]\n"
  },
  {
    "path": "ingress-operator/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 TensorChord Inc.\nCopyright (c) 2017-2019 OpenFaaS Author(s)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"
  },
  {
    "path": "ingress-operator/Makefile",
    "content": "# Copyright 2022 TensorChord Inc.\n#\n# The old school Makefile, following are required targets. The Makefile is written\n# to allow building multiple binaries. You are free to add more targets or change\n# existing implementations, as long as the semantics are preserved.\n#\n#   make              - default to 'build' target\n#   make lint         - code analysis\n#   make test         - run unit test (or plus integration test)\n#   make build        - alias to build-local target\n#   make build-local  - build local binary targets\n#   make build-linux  - build linux binary targets\n#   make container    - build containers\n#   $ docker login registry -u username -p xxxxx\n#   make push         - push containers\n#   make clean        - clean up targets\n#\n# Not included but recommended targets:\n#   make e2e-test\n#\n# The makefile is also responsible to populate project version information.\n#\n\n#\n# Tweak the variables based on your project.\n#\n\n# This repo's root import path (under GOPATH).\nROOT := github.com/tensorchord/openmodelz/ingress-operator\n\n# Target binaries. You can build multiple binaries for a single project.\nTARGETS := ingress-operator\n\n# Container image prefix and suffix added to targets.\n# The final built images are:\n#   $[REGISTRY]/$[IMAGE_PREFIX]$[TARGET]$[IMAGE_SUFFIX]:$[VERSION]\n# $[REGISTRY] is an item from $[REGISTRIES], $[TARGET] is an item from $[TARGETS].\nIMAGE_PREFIX ?= $(strip )\nIMAGE_SUFFIX ?= $(strip )\n\n# Container registries.\nREGISTRY ?= ghcr.io/tensorchord\n\n# Container registry for base images.\nBASE_REGISTRY ?= docker.io\nBASE_REGISTRY_USER ?= modelzai\n\n# Disable CGO by default.\nCGO_ENABLED ?= 0\n\n#\n# These variables should not need tweaking.\n#\n\n# It's necessary to set this because some environments don't link sh -> bash.\nexport SHELL := bash\n\n# It's necessary to set the errexit flags for the bash shell.\nexport SHELLOPTS := errexit\n\nPACKAGE_NAME := github.com/tensorchord/openmodelz/ingress-operator\nGOLANG_CROSS_VERSION  ?= v1.17.6\n\n# Project main package location (can be multiple ones).\nCMD_DIR := ./cmd\n\n# Project output directory.\nOUTPUT_DIR := ./bin\nDEBUG_DIR := ./debug-bin\n\n# Build directory.\nBUILD_DIR := ./build\n\n# Current version of the project.\nVERSION ?= $(shell git describe --match 'v[0-9]*' --always --tags --abbrev=0)\nBUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\nGIT_COMMIT=$(shell git rev-parse HEAD)\nGIT_TAG=$(shell if [ -z \"`git status --porcelain`\" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)\nGIT_TREE_STATE=$(shell if [ -z \"`git status --porcelain`\" ]; then echo \"clean\" ; else echo \"dirty\"; fi)\nGITSHA ?= $(shell git rev-parse --short HEAD)\n\n# Track code version with Docker Label.\nDOCKER_LABELS ?= git-describe=\"$(shell date -u +v%Y%m%d)-$(shell git describe --tags --always --dirty)\"\n\n# Golang standard bin directory.\nGOPATH ?= $(shell go env GOPATH)\nGOROOT ?= $(shell go env GOROOT)\nBIN_DIR := $(GOPATH)/bin\nGOLANGCI_LINT := $(BIN_DIR)/golangci-lint\n\n# check if we need embed the dashboard\nDASHBOARD_BUILD ?= debug\n\n# Default golang flags used in build and test\n# -mod=vendor: force go to use the vendor files instead of using the `$GOPATH/pkg/mod`\n# -p: the number of programs that can be run in parallel\n# -count: run each test and benchmark 1 times. Set this flag to disable test cache\nexport GOFLAGS ?= -count=1\n\n#\n# Define all targets. At least the following commands are required:\n#\n\n# All targets.\n.PHONY: help lint test build container push addlicense debug debug-local build-local generate clean test-local addlicense-install release build-image\n\n.DEFAULT_GOAL:=build\n\nbuild: build-local  ## Build the release version of envd\n\nhelp:  ## Display this help\n\t@awk 'BEGIN {FS = \":.*##\"; printf \"\\nUsage:\\n  make \\033[36m<target>\\033[0m\\n\"} /^[a-zA-Z0-9_-]+:.*?##/ { printf \"  \\033[36m%-15s\\033[0m %s\\n\", $$1, $$2 } /^##@/ { printf \"\\n\\033[1m%s\\033[0m\\n\", substr($$0, 5) } ' $(MAKEFILE_LIST)\n\ndebug: debug-local  ## Build the debug version of envd\n\n# more info about `GOGC` env: https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint\nlint: $(GOLANGCI_LINT)  ## Lint GO code\n\t@$(GOLANGCI_LINT) run\n\n$(GOLANGCI_LINT):\n\tcurl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$(go env GOPATH)/bin\n\nmockgen-install:\n\tgo install github.com/golang/mock/mockgen@v1.6.0\n\naddlicense-install:\n\tgo install github.com/google/addlicense@latest\n\nbuild-local:\n\t@for target in $(TARGETS); do                                                      \\\n\t  CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build -tags $(DASHBOARD_BUILD)  -trimpath -v -o $(OUTPUT_DIR)/$${target}     \\\n\t    -ldflags \"-s -w -X $(ROOT)/pkg/version.version=$(VERSION) -X $(ROOT)/pkg/version.buildDate=$(BUILD_DATE) -X $(ROOT)/pkg/version.gitCommit=$(GIT_COMMIT) -X $(ROOT)/pkg/version.gitTreeState=$(GIT_TREE_STATE)\"                     \\\n\t    $(CMD_DIR)/$${target};                                                         \\\n\tdone\n\n# It is used by vscode to attach into the process.\ndebug-local:\n\t@for target in $(TARGETS); do                                                      \\\n\t  CGO_ENABLED=$(CGO_ENABLED) go build -tags $(DASHBOARD_BUILD) -trimpath                                    \\\n\t  \t-v -o $(DEBUG_DIR)/$${target}                                                  \\\n\t  \t-gcflags='all=-N -l'                                                           \\\n\t    $(CMD_DIR)/$${target};                                                         \\\n\tdone\n\naddlicense: addlicense-install  ## Add license to GO code files\n\taddlicense -l mpl -c \"TensorChord Inc.\" $$(find . -type f -name '*.go' | grep -v pkg/docs/docs.go)\n\ntest-local:\n\t@go test -tags=$(DASHBOARD_BUILD) -v -race -coverprofile=coverage.out ./...\n\ntest:  ## Run the tests\n\t@go test -tags=$(DASHBOARD_BUILD) -race -coverpkg=./pkg/... -coverprofile=coverage.out ./...\n\t@go tool cover -func coverage.out | tail -n 1 | awk '{ print \"Total coverage: \" $$3 }'\n\nclean:  ## Clean the outputs and artifacts\n\t@-rm -vrf ${OUTPUT_DIR}\n\t@-rm -vrf ${DEBUG_DIR}\n\t@-rm -vrf build dist .eggs *.egg-info\n\nfmt:  ## Run go fmt against code.\n\tgo fmt ./...\n\nvet: ## Run go vet against code.\n\tgo vet ./...\n\nbuild-image: build-local\n\tdocker build -t ${BASE_REGISTRY}/${BASE_REGISTRY_USER}/ingress-operator:dev -f Dockerfile ./bin\n\tdocker push ${BASE_REGISTRY}/${BASE_REGISTRY_USER}/ingress-operator:dev\n\nrelease:\n\t@if [ ! -f \".release-env\" ]; then \\\n\t\techo \"\\033[91m.release-env is required for release\\033[0m\";\\\n\t\texit 1;\\\n\tfi\n\tdocker run \\\n\t\t--rm \\\n\t\t--privileged \\\n\t\t-e CGO_ENABLED=1 \\\n\t\t--env-file .release-env \\\n\t\t-v /var/run/docker.sock:/var/run/docker.sock \\\n\t\t-v `pwd`:/go/src/$(PACKAGE_NAME) \\\n\t\t-v `pwd`/sysroot:/sysroot \\\n\t\t-w /go/src/$(PACKAGE_NAME) \\\n\t\tgoreleaser/goreleaser-cross:${GOLANG_CROSS_VERSION} \\\n\t\trelease --rm-dist\n"
  },
  {
    "path": "ingress-operator/artifacts/.gitignore",
    "content": ""
  },
  {
    "path": "ingress-operator/artifacts/crds/tensorchord.ai_inferenceingresses.yaml",
    "content": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  annotations:\n    controller-gen.kubebuilder.io/version: v0.5.0\n  creationTimestamp: null\n  name: inferenceingresses.tensorchord.ai\nspec:\n  group: tensorchord.ai\n  names:\n    kind: InferenceIngress\n    listKind: InferenceIngressList\n    plural: inferenceingresses\n    singular: inferenceingress\n  scope: Namespaced\n  versions:\n    - additionalPrinterColumns:\n        - jsonPath: .spec.domain\n          name: Domain\n          type: string\n      name: v1\n      schema:\n        openAPIV3Schema:\n          description: InferenceIngress describes an OpenFaaS function\n          type: object\n          required:\n            - spec\n          properties:\n            apiVersion:\n              description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n              type: string\n            kind:\n              description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n              type: string\n            metadata:\n              type: object\n            spec:\n              description: InferenceIngressSpec is the spec for a InferenceIngressSpec resource. It must be created in the same namespace as the gateway, i.e. openfaas.\n              type: object\n              required:\n                - domain\n                - framework\n                - function\n              properties:\n                bypassGateway:\n                  description: BypassGateway, when true creates an Ingress record directly for the Function name without using the gateway in the hot path\n                  type: boolean\n                domain:\n                  description: Domain such as \"api.example.com\"\n                  type: string\n                framework:\n                  type: string\n                function:\n                  description: Function such as \"nodeinfo\"\n                  type: string\n                ingressType:\n                  description: IngressType such as \"nginx\"\n                  type: string\n                path:\n                  description: Path such as \"/v1/profiles/view/(.*)\", or leave empty for default\n                  type: string\n                tls:\n                  description: Enable TLS via cert-manager\n                  type: object\n                  properties:\n                    enabled:\n                      type: boolean\n                    issuerRef:\n                      description: ObjectReference is a reference to an object with a given name and kind.\n                      type: object\n                      required:\n                        - name\n                      properties:\n                        kind:\n                          type: string\n                        name:\n                          type: string\n      served: true\n      storage: true\n      subresources: {}\nstatus:\n  acceptedNames:\n    kind: \"\"\n    plural: \"\"\n  conditions: []\n  storedVersions: []\n"
  },
  {
    "path": "ingress-operator/artifacts/operator-amd64.yaml",
    "content": "---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: ingress-operator\n  namespace: openfaas\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: ingress-operator\n  template:\n    metadata:\n      labels:\n        app: ingress-operator\n      annotations:\n        prometheus.io.scrape: 'false'\n    spec:\n      serviceAccountName: ingress-operator\n      containers:\n      - name: operator\n        image: docker.io/alexellis2/ingress-operator:1\n        imagePullPolicy: Always\n        command:\n          - ./ingress-operator\n        env:\n        - name: ingress_namespace\n          value: openfaas\n        resources:\n          limits:\n            memory: 128Mi\n          requests:\n            memory: 25Mi\n"
  },
  {
    "path": "ingress-operator/artifacts/operator-rbac.yaml",
    "content": "---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: ingress-operator\n  namespace: openfaas\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: ingress-operator-rw\n  namespace: openfaas\nrules:\n- apiGroups: [\"openfaas.com\"]\n  resources: [\"functioningresses\"]\n  verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\", \"delete\"]\n- apiGroups: [\"extensions\", \"networking\", \"networking.k8s.io\"]\n  resources: [\"ingresses\"]\n  verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\", \"delete\"]\n- apiGroups: [\"\"]\n  resources: [\"events\"]\n  verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\", \"delete\"]\n# - apiGroups: [\"certmanager.k8s.io\"]\n#   resources: [\"certificates\"]\n#   verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\", \"delete\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: ingress-operator-rw\n  namespace: openfaas\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: ingress-operator-rw\nsubjects:\n- kind: ServiceAccount\n  name: ingress-operator\n  namespace: openfaas\n"
  },
  {
    "path": "ingress-operator/cmd/ingress-operator/main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\tcli \"github.com/urfave/cli/v2\"\n\tklog \"k8s.io/klog\"\n\n\t// required to authenticate against GKE clusters\n\t_ \"k8s.io/client-go/plugin/pkg/client/auth/gcp\"\n\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/app\"\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/version\"\n)\n\nfunc run(args []string) error {\n\tcli.VersionPrinter = func(c *cli.Context) {\n\t\tfmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision)\n\t}\n\tklog.InitFlags(nil)\n\n\ta := app.New()\n\treturn a.Run(args)\n}\n\nfunc handleErr(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tklog.Error(err)\n\tos.Exit(1)\n}\n\nfunc main() {\n\terr := run(os.Args)\n\thandleErr(err)\n}\n"
  },
  {
    "path": "ingress-operator/hack/boilerplate.go.txt",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n"
  },
  {
    "path": "ingress-operator/hack/custom-boilerplate.go.txt",
    "content": "/*\nCopyright 2023 OpenFaaS Author(s)\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n"
  },
  {
    "path": "ingress-operator/hack/print-codegen-version.sh",
    "content": "#!/bin/bash\n\n# This scripts exists primarily so that it can be used in the Makefile.\n# It is needed because the `($shell ...)` command was having issues with the pipe.\n# Extracting it to a script was the simplest solution.\n\ngrep 'k8s.io/code-generator' go.mod | awk '{print $2}'\n"
  },
  {
    "path": "ingress-operator/hack/update-codegen.sh",
    "content": "#!/usr/bin/env bash\n\n# copied from: https://github.com/weaveworks/flagger/tree/master/hack\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\nSCRIPT_ROOT=$(git rev-parse --show-toplevel)/ingress-operator\n\necho \"SCRIPT_ROOT is ${SCRIPT_ROOT}\"\n\n# Grab code-generator version from go.sum.\nCODEGEN_VERSION=$(grep 'k8s.io/code-generator' go.sum | awk '{print $2}' | head -1)\nCODEGEN_PKG=$(echo `go env GOPATH`\"/pkg/mod/k8s.io/code-generator@${CODEGEN_VERSION}\")\n\necho \">> Using ${CODEGEN_PKG}\"\n\n# code-generator does work with go.mod but makes assumptions about\n# the project living in `$GOPATH/src`. To work around this and support\n# any location; create a temporary directory, use this as an output\n# base, and copy everything back once generated.\nTEMP_DIR=$(mktemp -d)\ncleanup() {\n    echo \">> Removing ${TEMP_DIR}\"\n    rm -rf ${TEMP_DIR}\n}\ntrap \"cleanup\" EXIT SIGINT\n\necho \">> Temporary output directory ${TEMP_DIR}\"\n\n# Ensure we can execute.\nchmod +x ${CODEGEN_PKG}/generate-groups.sh\n\n${CODEGEN_PKG}/generate-groups.sh all \\\n    github.com/tensorchord/openmodelz/ingress-operator/pkg/client github.com/tensorchord/openmodelz/ingress-operator/pkg/apis \\\n    modelzetes:v1 \\\n    --output-base \"${TEMP_DIR}\" \\\n    --go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt\n\n# Copy everything back.\ncp -r \"${TEMP_DIR}/github.com/tensorchord/openmodelz/ingress-operator/.\" \"${SCRIPT_ROOT}/\"\n"
  },
  {
    "path": "ingress-operator/hack/update-crds.sh",
    "content": "#!/bin/bash\n\nexport controllergen=\"$GOPATH/bin/controller-gen\"\nexport PKG=sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0\n\nif [ ! -e \"$controllergen\" ]; then\n  echo \"Getting $PKG\"\n  go install $PKG\nfi\n\n\"$controllergen\" \\\n  crd \\\n  schemapatch:manifests=./artifacts/crds \\\n  paths=./pkg/apis/... \\\n  output:dir=./artifacts/crds\n"
  },
  {
    "path": "ingress-operator/hack/verify-codegen.sh",
    "content": "#!/usr/bin/env bash\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\nSCRIPT_ROOT=$(git rev-parse --show-toplevel)\n\nDIFFROOT=\"${SCRIPT_ROOT}/pkg\"\nTMP_DIFFROOT=\"${SCRIPT_ROOT}/_tmp/pkg\"\n_tmp=\"${SCRIPT_ROOT}/_tmp\"\n\ncleanup() {\n  rm -rf \"${_tmp}\"\n}\ntrap \"cleanup\" EXIT SIGINT\n\ncleanup\n\nmkdir -p \"${TMP_DIFFROOT}\"\ncp -a \"${DIFFROOT}\"/* \"${TMP_DIFFROOT}\"\n\n\"${SCRIPT_ROOT}/hack/update-codegen.sh\"\necho \"diffing ${DIFFROOT} against freshly generated codegen\"\nret=0\ndiff -Naupr \"${DIFFROOT}\" \"${TMP_DIFFROOT}\" || ret=$?\ncp -a \"${TMP_DIFFROOT}\"/* \"${DIFFROOT}\"\nif [[ $ret -eq 0 ]]\nthen\n  echo \"${DIFFROOT} up to date.\"\nelse\n  echo \"${DIFFROOT} is out of date. Please run hack/update-codegen.sh\"\n  exit 1\nfi\n\n"
  },
  {
    "path": "ingress-operator/pkg/apis/modelzetes/register.go",
    "content": "package modelzetes\n\nconst (\n\tGroupName = \"tensorchord.ai\"\n)\n"
  },
  {
    "path": "ingress-operator/pkg/apis/modelzetes/v1/doc.go",
    "content": "// +k8s:deepcopy-gen=package,register\n\n// Package v1 is the OpenFaaS v1 version of the API.\n// +groupName=tensorchord.ai\npackage v1\n"
  },
  {
    "path": "ingress-operator/pkg/apis/modelzetes/v1/register.go",
    "content": "package v1\n\nimport (\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\n\tcontroller \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes\"\n)\n\n// SchemeGroupVersion is group version used to register these objects\nvar SchemeGroupVersion = schema.GroupVersion{Group: controller.GroupName, Version: \"v1\"}\n\n// Resource takes an unqualified resource and returns a Group qualified GroupResource\nfunc Resource(resource string) schema.GroupResource {\n\treturn SchemeGroupVersion.WithResource(resource).GroupResource()\n}\n\nvar (\n\t// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.\n\tSchemeBuilder      runtime.SchemeBuilder\n\tlocalSchemeBuilder = &SchemeBuilder\n\tAddToScheme        = localSchemeBuilder.AddToScheme\n)\n\nfunc init() {\n\t// We only register manually written functions here. The registration of the\n\t// generated functions takes place in the generated files. The separation\n\t// makes the code compile even when the generated files are missing.\n\tlocalSchemeBuilder.Register(addKnownTypes)\n}\n\n// Adds the list of known types to api.Scheme.\nfunc addKnownTypes(scheme *runtime.Scheme) error {\n\tscheme.AddKnownTypes(SchemeGroupVersion,\n\t\t&InferenceIngress{},\n\t\t&InferenceIngressList{},\n\t)\n\tmetav1.AddToGroupVersion(scheme, SchemeGroupVersion)\n\treturn nil\n}\n"
  },
  {
    "path": "ingress-operator/pkg/apis/modelzetes/v1/types.go",
    "content": "package v1\n\nimport (\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\n// +genclient\n// +genclient:noStatus\n// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n// +kubebuilder:printcolumn:name=\"Domain\",type=string,JSONPath=`.spec.domain`\n\n// InferenceIngress describes an OpenFaaS function\ntype InferenceIngress struct {\n\tmetav1.TypeMeta   `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n\tSpec InferenceIngressSpec `json:\"spec\"`\n}\n\n// InferenceIngressSpec is the spec for a InferenceIngressSpec resource. It must\n// be created in the same namespace as the gateway, i.e. openfaas.\ntype InferenceIngressSpec struct {\n\t// Domain such as \"api.example.com\"\n\tDomain string `json:\"domain\"`\n\n\t// Function such as \"nodeinfo\"\n\tFunction string `json:\"function\"`\n\n\tFramework string `json:\"framework\"`\n\n\t// Path such as \"/v1/profiles/view/(.*)\", or leave empty for default\n\t// +optional\n\tPath string `json:\"path\"`\n\n\t// IngressType such as \"nginx\"\n\t// +optional\n\tIngressType string `json:\"ingressType,omitempty\"`\n\n\t// Enable TLS via cert-manager\n\t// +optional\n\tTLS *InferenceIngressTLS `json:\"tls,omitempty\"`\n\n\t// BypassGateway, when true creates an Ingress record\n\t// directly for the Function name without using the gateway\n\t// in the hot path\n\t// +optional\n\tBypassGateway bool `json:\"bypassGateway,omitempty\"`\n}\n\n// InferenceIngressSpec TLS options\ntype InferenceIngressTLS struct {\n\t// +optional\n\tEnabled bool `json:\"enabled\"`\n\n\t// +optional\n\tIssuerRef ObjectReference `json:\"issuerRef\"`\n}\n\n// UseTLS if TLS is enabled\nfunc (f *InferenceIngressSpec) UseTLS() bool {\n\treturn f.TLS != nil && f.TLS.Enabled\n}\n\n// ObjectReference is a reference to an object with a given name and kind.\ntype ObjectReference struct {\n\tName string `json:\"name\"`\n\n\t// +optional\n\tKind string `json:\"kind,omitempty\"`\n}\n\n// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n\n// InferenceIngress is a list of Function resources\ntype InferenceIngressList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\n\tItems []InferenceIngress `json:\"items\"`\n}\n"
  },
  {
    "path": "ingress-operator/pkg/apis/modelzetes/v1/zz_generated.deepcopy.go",
    "content": "//go:build !ignore_autogenerated\n// +build !ignore_autogenerated\n\n/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by deepcopy-gen. DO NOT EDIT.\n\npackage v1\n\nimport (\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n)\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *InferenceIngress) DeepCopyInto(out *InferenceIngress) {\n\t*out = *in\n\tout.TypeMeta = in.TypeMeta\n\tin.ObjectMeta.DeepCopyInto(&out.ObjectMeta)\n\tin.Spec.DeepCopyInto(&out.Spec)\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceIngress.\nfunc (in *InferenceIngress) DeepCopy() *InferenceIngress {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(InferenceIngress)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n\n// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.\nfunc (in *InferenceIngress) DeepCopyObject() runtime.Object {\n\tif c := in.DeepCopy(); c != nil {\n\t\treturn c\n\t}\n\treturn nil\n}\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *InferenceIngressList) DeepCopyInto(out *InferenceIngressList) {\n\t*out = *in\n\tout.TypeMeta = in.TypeMeta\n\tin.ListMeta.DeepCopyInto(&out.ListMeta)\n\tif in.Items != nil {\n\t\tin, out := &in.Items, &out.Items\n\t\t*out = make([]InferenceIngress, len(*in))\n\t\tfor i := range *in {\n\t\t\t(*in)[i].DeepCopyInto(&(*out)[i])\n\t\t}\n\t}\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceIngressList.\nfunc (in *InferenceIngressList) DeepCopy() *InferenceIngressList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(InferenceIngressList)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n\n// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.\nfunc (in *InferenceIngressList) DeepCopyObject() runtime.Object {\n\tif c := in.DeepCopy(); c != nil {\n\t\treturn c\n\t}\n\treturn nil\n}\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *InferenceIngressSpec) DeepCopyInto(out *InferenceIngressSpec) {\n\t*out = *in\n\tif in.TLS != nil {\n\t\tin, out := &in.TLS, &out.TLS\n\t\t*out = new(InferenceIngressTLS)\n\t\t**out = **in\n\t}\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceIngressSpec.\nfunc (in *InferenceIngressSpec) DeepCopy() *InferenceIngressSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(InferenceIngressSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *InferenceIngressTLS) DeepCopyInto(out *InferenceIngressTLS) {\n\t*out = *in\n\tout.IssuerRef = in.IssuerRef\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceIngressTLS.\nfunc (in *InferenceIngressTLS) DeepCopy() *InferenceIngressTLS {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(InferenceIngressTLS)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *ObjectReference) DeepCopyInto(out *ObjectReference) {\n\t*out = *in\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.\nfunc (in *ObjectReference) DeepCopy() *ObjectReference {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ObjectReference)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n"
  },
  {
    "path": "ingress-operator/pkg/app/config.go",
    "content": "package app\n\nimport (\n\tcli \"github.com/urfave/cli/v2\"\n\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/config\"\n)\n\nfunc configFromCLI(c *cli.Context) config.Config {\n\tcfg := config.Config{}\n\n\t// kubernetes\n\tcfg.KubeConfig.Kubeconfig = c.String(flagKubeConfig)\n\tcfg.KubeConfig.MasterURL = c.String(flagMasterURL)\n\tcfg.KubeConfig.QPS = c.Int(flagQPS)\n\tcfg.KubeConfig.Burst = c.Int(flagBurst)\n\tcfg.KubeConfig.ResyncPeriod = c.Duration(flagResyncPeriod)\n\n\t// controller\n\tcfg.Controller.ThreadCount = c.Int(flagControllerThreads)\n\tcfg.Controller.Namespace = c.String(flagNamespace)\n\tcfg.Controller.Host = c.String(flagHost)\n\treturn cfg\n}\n"
  },
  {
    "path": "ingress-operator/pkg/app/root.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage app\n\nimport (\n\t\"time\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/pkg/errors\"\n\t\"github.com/sirupsen/logrus\"\n\tcli \"github.com/urfave/cli/v2\"\n\n\tcontroller \"github.com/tensorchord/openmodelz/ingress-operator/pkg/controller/v1\"\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/signals\"\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/version\"\n)\n\nconst (\n\tflagDebug = \"debug\"\n\n\t// kubernetes\n\tflagMasterURL    = \"master-url\"\n\tflagKubeConfig   = \"kube-config\"\n\tflagQPS          = \"kube-qps\"\n\tflagBurst        = \"kube-burst\"\n\tflagResyncPeriod = \"kube-resync-period\"\n\n\t// controller\n\tflagControllerThreads = \"controller-thread-count\"\n\tflagNamespace         = \"namespace\"\n\tflagHost              = \"host\"\n)\n\ntype App struct {\n\t*cli.App\n}\n\nfunc New() App {\n\tinternalApp := cli.NewApp()\n\tinternalApp.EnableBashCompletion = true\n\tinternalApp.Name = \"ingress-operator\"\n\tinternalApp.Usage = \"kubernetes operator for inference ingress\"\n\tinternalApp.HideHelpCommand = true\n\tinternalApp.HideVersion = false\n\tinternalApp.Version = version.GetVersion().String()\n\tinternalApp.Flags = []cli.Flag{\n\t\t&cli.BoolFlag{\n\t\t\tName:    flagDebug,\n\t\t\tUsage:   \"enable debug output in logs\",\n\t\t\tEnvVars: []string{\"DEBUG\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagMasterURL,\n\t\t\tUsage:   \"URL to master for kubernetes cluster\",\n\t\t\tEnvVars: []string{\"MODELZ_MASTER_URL\"},\n\t\t\tAliases: []string{\"mu\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagKubeConfig,\n\t\t\tUsage:   \"Path to kubeconfig file. If not provided, will use in-cluster config\",\n\t\t\tEnvVars: []string{\"MODELZ_KUBE_CONFIG\"},\n\t\t\tAliases: []string{\"kc\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagQPS,\n\t\t\tUsage:   \"QPS for kubernetes client\",\n\t\t\tValue:   100,\n\t\t\tEnvVars: []string{\"MODELZ_KUBE_QPS\"},\n\t\t\tAliases: []string{\"kq\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagBurst,\n\t\t\tValue:   250,\n\t\t\tUsage:   \"Burst for kubernetes client\",\n\t\t\tEnvVars: []string{\"MODELZ_KUBE_BURST\"},\n\t\t\tAliases: []string{\"kb\"},\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName:    flagResyncPeriod,\n\t\t\tValue:   time.Minute * 5,\n\t\t\tUsage:   \"Resync period for kubernetes client\",\n\t\t\tEnvVars: []string{\"MODELZ_KUBE_RESYNC_PERIOD\"},\n\t\t\tAliases: []string{\"kr\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagControllerThreads,\n\t\t\tValue:   1,\n\t\t\tUsage:   \"Number of threads to use for controller\",\n\t\t\tEnvVars: []string{\"MODELZ_CONTROLLER_THREAD_COUNT\"},\n\t\t\tAliases: []string{\"ct\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagNamespace,\n\t\t\tValue:   \"default\",\n\t\t\tUsage:   \"Namespace to create the ingress in. (We need to keep the same namespace as the inference ingress, because kubernetes does not allow cross namespace owner references)\",\n\t\t\tEnvVars: []string{\"MODELZ_NAMESPACE\"},\n\t\t\tAliases: []string{\"ns\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagHost,\n\t\t\tValue:   \"apiserver\",\n\t\t\tUsage:   \"Host to redirect the request to. (apiserver, agent)\",\n\t\t\tEnvVars: []string{\"MODELZ_HOST\"},\n\t\t},\n\t}\n\tinternalApp.Action = runServer\n\n\t// Deal with debug flag.\n\tvar debugEnabled bool\n\n\tinternalApp.Before = func(context *cli.Context) error {\n\t\tdebugEnabled = context.Bool(flagDebug)\n\n\t\tif debugEnabled {\n\t\t\tlogrus.SetReportCaller(true)\n\t\t\tlogrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true})\n\t\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t\t\tgin.SetMode(gin.DebugMode)\n\t\t} else {\n\t\t\tlogrus.SetFormatter(&logrus.JSONFormatter{})\n\t\t}\n\n\t\treturn nil\n\t}\n\treturn App{\n\t\tApp: internalApp,\n\t}\n}\n\nfunc runServer(clicontext *cli.Context) error {\n\tc := configFromCLI(clicontext)\n\n\tcfgString, _ := c.GetString()\n\tlogrus.WithField(\"config\", c).Info(\"starting ingress operator\")\n\n\tif err := c.Validate(); err != nil {\n\t\tif clicontext.Bool(flagDebug) {\n\t\t\treturn errors.Wrap(err, \"invalid config: \"+cfgString)\n\t\t} else {\n\t\t\treturn errors.Wrap(err, \"invalid config\")\n\t\t}\n\t}\n\n\t// set up signals so we handle the first shutdown signal gracefully\n\tstopCh := signals.SetupSignalHandler()\n\n\ts, err := controller.New(c, stopCh)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create server\")\n\t}\n\n\treturn s.Run(c.Controller.ThreadCount, stopCh)\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/clientset.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage versioned\n\nimport (\n\t\"fmt\"\n\n\ttensorchordv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned/typed/modelzetes/v1\"\n\tdiscovery \"k8s.io/client-go/discovery\"\n\trest \"k8s.io/client-go/rest\"\n\tflowcontrol \"k8s.io/client-go/util/flowcontrol\"\n)\n\ntype Interface interface {\n\tDiscovery() discovery.DiscoveryInterface\n\tTensorchordV1() tensorchordv1.TensorchordV1Interface\n}\n\n// Clientset contains the clients for groups. Each group has exactly one\n// version included in a Clientset.\ntype Clientset struct {\n\t*discovery.DiscoveryClient\n\ttensorchordV1 *tensorchordv1.TensorchordV1Client\n}\n\n// TensorchordV1 retrieves the TensorchordV1Client\nfunc (c *Clientset) TensorchordV1() tensorchordv1.TensorchordV1Interface {\n\treturn c.tensorchordV1\n}\n\n// Discovery retrieves the DiscoveryClient\nfunc (c *Clientset) Discovery() discovery.DiscoveryInterface {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.DiscoveryClient\n}\n\n// NewForConfig creates a new Clientset for the given config.\n// If config's RateLimiter is not set and QPS and Burst are acceptable,\n// NewForConfig will generate a rate-limiter in configShallowCopy.\nfunc NewForConfig(c *rest.Config) (*Clientset, error) {\n\tconfigShallowCopy := *c\n\tif configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {\n\t\tif configShallowCopy.Burst <= 0 {\n\t\t\treturn nil, fmt.Errorf(\"burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0\")\n\t\t}\n\t\tconfigShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)\n\t}\n\tvar cs Clientset\n\tvar err error\n\tcs.tensorchordV1, err = tensorchordv1.NewForConfig(&configShallowCopy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &cs, nil\n}\n\n// NewForConfigOrDie creates a new Clientset for the given config and\n// panics if there is an error in the config.\nfunc NewForConfigOrDie(c *rest.Config) *Clientset {\n\tvar cs Clientset\n\tcs.tensorchordV1 = tensorchordv1.NewForConfigOrDie(c)\n\n\tcs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)\n\treturn &cs\n}\n\n// New creates a new Clientset for the given RESTClient.\nfunc New(c rest.Interface) *Clientset {\n\tvar cs Clientset\n\tcs.tensorchordV1 = tensorchordv1.New(c)\n\n\tcs.DiscoveryClient = discovery.NewDiscoveryClient(c)\n\treturn &cs\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/doc.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\n// This package has the automatically generated clientset.\npackage versioned\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/fake/clientset_generated.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage fake\n\nimport (\n\tclientset \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned\"\n\ttensorchordv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned/typed/modelzetes/v1\"\n\tfaketensorchordv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned/typed/modelzetes/v1/fake\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\t\"k8s.io/client-go/discovery\"\n\tfakediscovery \"k8s.io/client-go/discovery/fake\"\n\t\"k8s.io/client-go/testing\"\n)\n\n// NewSimpleClientset returns a clientset that will respond with the provided objects.\n// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,\n// without applying any validations and/or defaults. It shouldn't be considered a replacement\n// for a real clientset and is mostly useful in simple unit tests.\nfunc NewSimpleClientset(objects ...runtime.Object) *Clientset {\n\to := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())\n\tfor _, obj := range objects {\n\t\tif err := o.Add(obj); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tcs := &Clientset{tracker: o}\n\tcs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}\n\tcs.AddReactor(\"*\", \"*\", testing.ObjectReaction(o))\n\tcs.AddWatchReactor(\"*\", func(action testing.Action) (handled bool, ret watch.Interface, err error) {\n\t\tgvr := action.GetResource()\n\t\tns := action.GetNamespace()\n\t\twatch, err := o.Watch(gvr, ns)\n\t\tif err != nil {\n\t\t\treturn false, nil, err\n\t\t}\n\t\treturn true, watch, nil\n\t})\n\n\treturn cs\n}\n\n// Clientset implements clientset.Interface. Meant to be embedded into a\n// struct to get a default implementation. This makes faking out just the method\n// you want to test easier.\ntype Clientset struct {\n\ttesting.Fake\n\tdiscovery *fakediscovery.FakeDiscovery\n\ttracker   testing.ObjectTracker\n}\n\nfunc (c *Clientset) Discovery() discovery.DiscoveryInterface {\n\treturn c.discovery\n}\n\nfunc (c *Clientset) Tracker() testing.ObjectTracker {\n\treturn c.tracker\n}\n\nvar (\n\t_ clientset.Interface = &Clientset{}\n\t_ testing.FakeClient  = &Clientset{}\n)\n\n// TensorchordV1 retrieves the TensorchordV1Client\nfunc (c *Clientset) TensorchordV1() tensorchordv1.TensorchordV1Interface {\n\treturn &faketensorchordv1.FakeTensorchordV1{Fake: &c.Fake}\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/fake/doc.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\n// This package has the automatically generated fake clientset.\npackage fake\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/fake/register.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage fake\n\nimport (\n\ttensorchordv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tserializer \"k8s.io/apimachinery/pkg/runtime/serializer\"\n\tutilruntime \"k8s.io/apimachinery/pkg/util/runtime\"\n)\n\nvar scheme = runtime.NewScheme()\nvar codecs = serializer.NewCodecFactory(scheme)\n\nvar localSchemeBuilder = runtime.SchemeBuilder{\n\ttensorchordv1.AddToScheme,\n}\n\n// AddToScheme adds all types of this clientset into the given scheme. This allows composition\n// of clientsets, like in:\n//\n//\timport (\n//\t  \"k8s.io/client-go/kubernetes\"\n//\t  clientsetscheme \"k8s.io/client-go/kubernetes/scheme\"\n//\t  aggregatorclientsetscheme \"k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme\"\n//\t)\n//\n//\tkclientset, _ := kubernetes.NewForConfig(c)\n//\t_ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)\n//\n// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types\n// correctly.\nvar AddToScheme = localSchemeBuilder.AddToScheme\n\nfunc init() {\n\tv1.AddToGroupVersion(scheme, schema.GroupVersion{Version: \"v1\"})\n\tutilruntime.Must(AddToScheme(scheme))\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/scheme/doc.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\n// This package contains the scheme of the automatically generated clientset.\npackage scheme\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/scheme/register.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage scheme\n\nimport (\n\ttensorchordv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tserializer \"k8s.io/apimachinery/pkg/runtime/serializer\"\n\tutilruntime \"k8s.io/apimachinery/pkg/util/runtime\"\n)\n\nvar Scheme = runtime.NewScheme()\nvar Codecs = serializer.NewCodecFactory(Scheme)\nvar ParameterCodec = runtime.NewParameterCodec(Scheme)\nvar localSchemeBuilder = runtime.SchemeBuilder{\n\ttensorchordv1.AddToScheme,\n}\n\n// AddToScheme adds all types of this clientset into the given scheme. This allows composition\n// of clientsets, like in:\n//\n//\timport (\n//\t  \"k8s.io/client-go/kubernetes\"\n//\t  clientsetscheme \"k8s.io/client-go/kubernetes/scheme\"\n//\t  aggregatorclientsetscheme \"k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme\"\n//\t)\n//\n//\tkclientset, _ := kubernetes.NewForConfig(c)\n//\t_ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)\n//\n// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types\n// correctly.\nvar AddToScheme = localSchemeBuilder.AddToScheme\n\nfunc init() {\n\tv1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: \"v1\"})\n\tutilruntime.Must(AddToScheme(Scheme))\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/typed/modelzetes/v1/doc.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\n// This package has the automatically generated typed clients.\npackage v1\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/typed/modelzetes/v1/fake/doc.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\n// Package fake has the automatically generated clients.\npackage fake\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/typed/modelzetes/v1/fake/fake_inferenceingress.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage fake\n\nimport (\n\t\"context\"\n\n\tmodelzetesv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tlabels \"k8s.io/apimachinery/pkg/labels\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\ttypes \"k8s.io/apimachinery/pkg/types\"\n\twatch \"k8s.io/apimachinery/pkg/watch\"\n\ttesting \"k8s.io/client-go/testing\"\n)\n\n// FakeInferenceIngresses implements InferenceIngressInterface\ntype FakeInferenceIngresses struct {\n\tFake *FakeTensorchordV1\n\tns   string\n}\n\nvar inferenceingressesResource = schema.GroupVersionResource{Group: \"tensorchord.ai\", Version: \"v1\", Resource: \"inferenceingresses\"}\n\nvar inferenceingressesKind = schema.GroupVersionKind{Group: \"tensorchord.ai\", Version: \"v1\", Kind: \"InferenceIngress\"}\n\n// Get takes name of the inferenceIngress, and returns the corresponding inferenceIngress object, and an error if there is any.\nfunc (c *FakeInferenceIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *modelzetesv1.InferenceIngress, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(inferenceingressesResource, c.ns, name), &modelzetesv1.InferenceIngress{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*modelzetesv1.InferenceIngress), err\n}\n\n// List takes label and field selectors, and returns the list of InferenceIngresses that match those selectors.\nfunc (c *FakeInferenceIngresses) List(ctx context.Context, opts v1.ListOptions) (result *modelzetesv1.InferenceIngressList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(inferenceingressesResource, inferenceingressesKind, c.ns, opts), &modelzetesv1.InferenceIngressList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &modelzetesv1.InferenceIngressList{ListMeta: obj.(*modelzetesv1.InferenceIngressList).ListMeta}\n\tfor _, item := range obj.(*modelzetesv1.InferenceIngressList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}\n\n// Watch returns a watch.Interface that watches the requested inferenceIngresses.\nfunc (c *FakeInferenceIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(inferenceingressesResource, c.ns, opts))\n\n}\n\n// Create takes the representation of a inferenceIngress and creates it.  Returns the server's representation of the inferenceIngress, and an error, if there is any.\nfunc (c *FakeInferenceIngresses) Create(ctx context.Context, inferenceIngress *modelzetesv1.InferenceIngress, opts v1.CreateOptions) (result *modelzetesv1.InferenceIngress, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(inferenceingressesResource, c.ns, inferenceIngress), &modelzetesv1.InferenceIngress{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*modelzetesv1.InferenceIngress), err\n}\n\n// Update takes the representation of a inferenceIngress and updates it. Returns the server's representation of the inferenceIngress, and an error, if there is any.\nfunc (c *FakeInferenceIngresses) Update(ctx context.Context, inferenceIngress *modelzetesv1.InferenceIngress, opts v1.UpdateOptions) (result *modelzetesv1.InferenceIngress, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(inferenceingressesResource, c.ns, inferenceIngress), &modelzetesv1.InferenceIngress{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*modelzetesv1.InferenceIngress), err\n}\n\n// Delete takes name of the inferenceIngress and deletes it. Returns an error if one occurs.\nfunc (c *FakeInferenceIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {\n\t_, err := c.Fake.\n\t\tInvokes(testing.NewDeleteAction(inferenceingressesResource, c.ns, name), &modelzetesv1.InferenceIngress{})\n\n\treturn err\n}\n\n// DeleteCollection deletes a collection of objects.\nfunc (c *FakeInferenceIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {\n\taction := testing.NewDeleteCollectionAction(inferenceingressesResource, c.ns, listOpts)\n\n\t_, err := c.Fake.Invokes(action, &modelzetesv1.InferenceIngressList{})\n\treturn err\n}\n\n// Patch applies the patch and returns the patched inferenceIngress.\nfunc (c *FakeInferenceIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *modelzetesv1.InferenceIngress, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(inferenceingressesResource, c.ns, name, pt, data, subresources...), &modelzetesv1.InferenceIngress{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*modelzetesv1.InferenceIngress), err\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/typed/modelzetes/v1/fake/fake_modelzetes_client.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage fake\n\nimport (\n\tv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned/typed/modelzetes/v1\"\n\trest \"k8s.io/client-go/rest\"\n\ttesting \"k8s.io/client-go/testing\"\n)\n\ntype FakeTensorchordV1 struct {\n\t*testing.Fake\n}\n\nfunc (c *FakeTensorchordV1) InferenceIngresses(namespace string) v1.InferenceIngressInterface {\n\treturn &FakeInferenceIngresses{c, namespace}\n}\n\n// RESTClient returns a RESTClient that is used to communicate\n// with API server by this client implementation.\nfunc (c *FakeTensorchordV1) RESTClient() rest.Interface {\n\tvar ret *rest.RESTClient\n\treturn ret\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/typed/modelzetes/v1/generated_expansion.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage v1\n\ntype InferenceIngressExpansion interface{}\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/typed/modelzetes/v1/inferenceingress.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage v1\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\tv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\tscheme \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\ttypes \"k8s.io/apimachinery/pkg/types\"\n\twatch \"k8s.io/apimachinery/pkg/watch\"\n\trest \"k8s.io/client-go/rest\"\n)\n\n// InferenceIngressesGetter has a method to return a InferenceIngressInterface.\n// A group's client should implement this interface.\ntype InferenceIngressesGetter interface {\n\tInferenceIngresses(namespace string) InferenceIngressInterface\n}\n\n// InferenceIngressInterface has methods to work with InferenceIngress resources.\ntype InferenceIngressInterface interface {\n\tCreate(ctx context.Context, inferenceIngress *v1.InferenceIngress, opts metav1.CreateOptions) (*v1.InferenceIngress, error)\n\tUpdate(ctx context.Context, inferenceIngress *v1.InferenceIngress, opts metav1.UpdateOptions) (*v1.InferenceIngress, error)\n\tDelete(ctx context.Context, name string, opts metav1.DeleteOptions) error\n\tDeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error\n\tGet(ctx context.Context, name string, opts metav1.GetOptions) (*v1.InferenceIngress, error)\n\tList(ctx context.Context, opts metav1.ListOptions) (*v1.InferenceIngressList, error)\n\tWatch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)\n\tPatch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.InferenceIngress, err error)\n\tInferenceIngressExpansion\n}\n\n// inferenceIngresses implements InferenceIngressInterface\ntype inferenceIngresses struct {\n\tclient rest.Interface\n\tns     string\n}\n\n// newInferenceIngresses returns a InferenceIngresses\nfunc newInferenceIngresses(c *TensorchordV1Client, namespace string) *inferenceIngresses {\n\treturn &inferenceIngresses{\n\t\tclient: c.RESTClient(),\n\t\tns:     namespace,\n\t}\n}\n\n// Get takes name of the inferenceIngress, and returns the corresponding inferenceIngress object, and an error if there is any.\nfunc (c *inferenceIngresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.InferenceIngress, err error) {\n\tresult = &v1.InferenceIngress{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferenceingresses\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}\n\n// List takes label and field selectors, and returns the list of InferenceIngresses that match those selectors.\nfunc (c *inferenceIngresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.InferenceIngressList, err error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\tresult = &v1.InferenceIngressList{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferenceingresses\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}\n\n// Watch returns a watch.Interface that watches the requested inferenceIngresses.\nfunc (c *inferenceIngresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferenceingresses\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}\n\n// Create takes the representation of a inferenceIngress and creates it.  Returns the server's representation of the inferenceIngress, and an error, if there is any.\nfunc (c *inferenceIngresses) Create(ctx context.Context, inferenceIngress *v1.InferenceIngress, opts metav1.CreateOptions) (result *v1.InferenceIngress, err error) {\n\tresult = &v1.InferenceIngress{}\n\terr = c.client.Post().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferenceingresses\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tBody(inferenceIngress).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}\n\n// Update takes the representation of a inferenceIngress and updates it. Returns the server's representation of the inferenceIngress, and an error, if there is any.\nfunc (c *inferenceIngresses) Update(ctx context.Context, inferenceIngress *v1.InferenceIngress, opts metav1.UpdateOptions) (result *v1.InferenceIngress, err error) {\n\tresult = &v1.InferenceIngress{}\n\terr = c.client.Put().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferenceingresses\").\n\t\tName(inferenceIngress.Name).\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tBody(inferenceIngress).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}\n\n// Delete takes name of the inferenceIngress and deletes it. Returns an error if one occurs.\nfunc (c *inferenceIngresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferenceingresses\").\n\t\tName(name).\n\t\tBody(&opts).\n\t\tDo(ctx).\n\t\tError()\n}\n\n// DeleteCollection deletes a collection of objects.\nfunc (c *inferenceIngresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {\n\tvar timeout time.Duration\n\tif listOpts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second\n\t}\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferenceingresses\").\n\t\tVersionedParams(&listOpts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tBody(&opts).\n\t\tDo(ctx).\n\t\tError()\n}\n\n// Patch applies the patch and returns the patched inferenceIngress.\nfunc (c *inferenceIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.InferenceIngress, err error) {\n\tresult = &v1.InferenceIngress{}\n\terr = c.client.Patch(pt).\n\t\tNamespace(c.ns).\n\t\tResource(\"inferenceingresses\").\n\t\tName(name).\n\t\tSubResource(subresources...).\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tBody(data).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/clientset/versioned/typed/modelzetes/v1/modelzetes_client.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage v1\n\nimport (\n\tv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned/scheme\"\n\trest \"k8s.io/client-go/rest\"\n)\n\ntype TensorchordV1Interface interface {\n\tRESTClient() rest.Interface\n\tInferenceIngressesGetter\n}\n\n// TensorchordV1Client is used to interact with features provided by the tensorchord.ai group.\ntype TensorchordV1Client struct {\n\trestClient rest.Interface\n}\n\nfunc (c *TensorchordV1Client) InferenceIngresses(namespace string) InferenceIngressInterface {\n\treturn newInferenceIngresses(c, namespace)\n}\n\n// NewForConfig creates a new TensorchordV1Client for the given config.\nfunc NewForConfig(c *rest.Config) (*TensorchordV1Client, error) {\n\tconfig := *c\n\tif err := setConfigDefaults(&config); err != nil {\n\t\treturn nil, err\n\t}\n\tclient, err := rest.RESTClientFor(&config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TensorchordV1Client{client}, nil\n}\n\n// NewForConfigOrDie creates a new TensorchordV1Client for the given config and\n// panics if there is an error in the config.\nfunc NewForConfigOrDie(c *rest.Config) *TensorchordV1Client {\n\tclient, err := NewForConfig(c)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn client\n}\n\n// New creates a new TensorchordV1Client for the given RESTClient.\nfunc New(c rest.Interface) *TensorchordV1Client {\n\treturn &TensorchordV1Client{c}\n}\n\nfunc setConfigDefaults(config *rest.Config) error {\n\tgv := v1.SchemeGroupVersion\n\tconfig.GroupVersion = &gv\n\tconfig.APIPath = \"/apis\"\n\tconfig.NegotiatedSerializer = scheme.Codecs.WithoutConversion()\n\n\tif config.UserAgent == \"\" {\n\t\tconfig.UserAgent = rest.DefaultKubernetesUserAgent()\n\t}\n\n\treturn nil\n}\n\n// RESTClient returns a RESTClient that is used to communicate\n// with API server by this client implementation.\nfunc (c *TensorchordV1Client) RESTClient() rest.Interface {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.restClient\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/informers/externalversions/factory.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage externalversions\n\nimport (\n\treflect \"reflect\"\n\tsync \"sync\"\n\ttime \"time\"\n\n\tversioned \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned\"\n\tinternalinterfaces \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/informers/externalversions/internalinterfaces\"\n\tmodelzetes \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/informers/externalversions/modelzetes\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\n// SharedInformerOption defines the functional option type for SharedInformerFactory.\ntype SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory\n\ntype sharedInformerFactory struct {\n\tclient           versioned.Interface\n\tnamespace        string\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n\tlock             sync.Mutex\n\tdefaultResync    time.Duration\n\tcustomResync     map[reflect.Type]time.Duration\n\n\tinformers map[reflect.Type]cache.SharedIndexInformer\n\t// startedInformers is used for tracking which informers have been started.\n\t// This allows Start() to be called multiple times safely.\n\tstartedInformers map[reflect.Type]bool\n}\n\n// WithCustomResyncConfig sets a custom resync period for the specified informer types.\nfunc WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {\n\treturn func(factory *sharedInformerFactory) *sharedInformerFactory {\n\t\tfor k, v := range resyncConfig {\n\t\t\tfactory.customResync[reflect.TypeOf(k)] = v\n\t\t}\n\t\treturn factory\n\t}\n}\n\n// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.\nfunc WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {\n\treturn func(factory *sharedInformerFactory) *sharedInformerFactory {\n\t\tfactory.tweakListOptions = tweakListOptions\n\t\treturn factory\n\t}\n}\n\n// WithNamespace limits the SharedInformerFactory to the specified namespace.\nfunc WithNamespace(namespace string) SharedInformerOption {\n\treturn func(factory *sharedInformerFactory) *sharedInformerFactory {\n\t\tfactory.namespace = namespace\n\t\treturn factory\n\t}\n}\n\n// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.\nfunc NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {\n\treturn NewSharedInformerFactoryWithOptions(client, defaultResync)\n}\n\n// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.\n// Listers obtained via this SharedInformerFactory will be subject to the same filters\n// as specified here.\n// Deprecated: Please use NewSharedInformerFactoryWithOptions instead\nfunc NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {\n\treturn NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))\n}\n\n// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.\nfunc NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {\n\tfactory := &sharedInformerFactory{\n\t\tclient:           client,\n\t\tnamespace:        v1.NamespaceAll,\n\t\tdefaultResync:    defaultResync,\n\t\tinformers:        make(map[reflect.Type]cache.SharedIndexInformer),\n\t\tstartedInformers: make(map[reflect.Type]bool),\n\t\tcustomResync:     make(map[reflect.Type]time.Duration),\n\t}\n\n\t// Apply all options\n\tfor _, opt := range options {\n\t\tfactory = opt(factory)\n\t}\n\n\treturn factory\n}\n\n// Start initializes all requested informers.\nfunc (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tfor informerType, informer := range f.informers {\n\t\tif !f.startedInformers[informerType] {\n\t\t\tgo informer.Run(stopCh)\n\t\t\tf.startedInformers[informerType] = true\n\t\t}\n\t}\n}\n\n// WaitForCacheSync waits for all started informers' cache were synced.\nfunc (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {\n\tinformers := func() map[reflect.Type]cache.SharedIndexInformer {\n\t\tf.lock.Lock()\n\t\tdefer f.lock.Unlock()\n\n\t\tinformers := map[reflect.Type]cache.SharedIndexInformer{}\n\t\tfor informerType, informer := range f.informers {\n\t\t\tif f.startedInformers[informerType] {\n\t\t\t\tinformers[informerType] = informer\n\t\t\t}\n\t\t}\n\t\treturn informers\n\t}()\n\n\tres := map[reflect.Type]bool{}\n\tfor informType, informer := range informers {\n\t\tres[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)\n\t}\n\treturn res\n}\n\n// InternalInformerFor returns the SharedIndexInformer for obj using an internal\n// client.\nfunc (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tinformerType := reflect.TypeOf(obj)\n\tinformer, exists := f.informers[informerType]\n\tif exists {\n\t\treturn informer\n\t}\n\n\tresyncPeriod, exists := f.customResync[informerType]\n\tif !exists {\n\t\tresyncPeriod = f.defaultResync\n\t}\n\n\tinformer = newFunc(f.client, resyncPeriod)\n\tf.informers[informerType] = informer\n\n\treturn informer\n}\n\n// SharedInformerFactory provides shared informers for resources in all known\n// API group versions.\ntype SharedInformerFactory interface {\n\tinternalinterfaces.SharedInformerFactory\n\tForResource(resource schema.GroupVersionResource) (GenericInformer, error)\n\tWaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool\n\n\tTensorchord() modelzetes.Interface\n}\n\nfunc (f *sharedInformerFactory) Tensorchord() modelzetes.Interface {\n\treturn modelzetes.New(f, f.namespace, f.tweakListOptions)\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/informers/externalversions/generic.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage externalversions\n\nimport (\n\t\"fmt\"\n\n\tv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\n// GenericInformer is type of SharedIndexInformer which will locate and delegate to other\n// sharedInformers based on type\ntype GenericInformer interface {\n\tInformer() cache.SharedIndexInformer\n\tLister() cache.GenericLister\n}\n\ntype genericInformer struct {\n\tinformer cache.SharedIndexInformer\n\tresource schema.GroupResource\n}\n\n// Informer returns the SharedIndexInformer.\nfunc (f *genericInformer) Informer() cache.SharedIndexInformer {\n\treturn f.informer\n}\n\n// Lister returns the GenericLister.\nfunc (f *genericInformer) Lister() cache.GenericLister {\n\treturn cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)\n}\n\n// ForResource gives generic access to a shared informer of the matching type\n// TODO extend this to unknown resources with a client pool\nfunc (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {\n\tswitch resource {\n\t// Group=tensorchord.ai, Version=v1\n\tcase v1.SchemeGroupVersion.WithResource(\"inferenceingresses\"):\n\t\treturn &genericInformer{resource: resource.GroupResource(), informer: f.Tensorchord().V1().InferenceIngresses().Informer()}, nil\n\n\t}\n\n\treturn nil, fmt.Errorf(\"no informer found for %v\", resource)\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage internalinterfaces\n\nimport (\n\ttime \"time\"\n\n\tversioned \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\n// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.\ntype NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer\n\n// SharedInformerFactory a small interface to allow for adding an informer without an import cycle\ntype SharedInformerFactory interface {\n\tStart(stopCh <-chan struct{})\n\tInformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer\n}\n\n// TweakListOptionsFunc is a function that transforms a v1.ListOptions.\ntype TweakListOptionsFunc func(*v1.ListOptions)\n"
  },
  {
    "path": "ingress-operator/pkg/client/informers/externalversions/modelzetes/interface.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage modelzetes\n\nimport (\n\tinternalinterfaces \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/informers/externalversions/internalinterfaces\"\n\tv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/informers/externalversions/modelzetes/v1\"\n)\n\n// Interface provides access to each of this group's versions.\ntype Interface interface {\n\t// V1 provides access to shared informers for resources in V1.\n\tV1() v1.Interface\n}\n\ntype group struct {\n\tfactory          internalinterfaces.SharedInformerFactory\n\tnamespace        string\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n}\n\n// New returns a new Interface.\nfunc New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {\n\treturn &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}\n}\n\n// V1 returns a new v1.Interface.\nfunc (g *group) V1() v1.Interface {\n\treturn v1.New(g.factory, g.namespace, g.tweakListOptions)\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/informers/externalversions/modelzetes/v1/inferenceingress.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage v1\n\nimport (\n\t\"context\"\n\ttime \"time\"\n\n\tmodelzetesv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\tversioned \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned\"\n\tinternalinterfaces \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/informers/externalversions/internalinterfaces\"\n\tv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/listers/modelzetes/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\twatch \"k8s.io/apimachinery/pkg/watch\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\n// InferenceIngressInformer provides access to a shared informer and lister for\n// InferenceIngresses.\ntype InferenceIngressInformer interface {\n\tInformer() cache.SharedIndexInformer\n\tLister() v1.InferenceIngressLister\n}\n\ntype inferenceIngressInformer struct {\n\tfactory          internalinterfaces.SharedInformerFactory\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n\tnamespace        string\n}\n\n// NewInferenceIngressInformer constructs a new informer for InferenceIngress type.\n// Always prefer using an informer factory to get a shared informer instead of getting an independent\n// one. This reduces memory footprint and number of connections to the server.\nfunc NewInferenceIngressInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {\n\treturn NewFilteredInferenceIngressInformer(client, namespace, resyncPeriod, indexers, nil)\n}\n\n// NewFilteredInferenceIngressInformer constructs a new informer for InferenceIngress type.\n// Always prefer using an informer factory to get a shared informer instead of getting an independent\n// one. This reduces memory footprint and number of connections to the server.\nfunc NewFilteredInferenceIngressInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {\n\treturn cache.NewSharedIndexInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\t\tif tweakListOptions != nil {\n\t\t\t\t\ttweakListOptions(&options)\n\t\t\t\t}\n\t\t\t\treturn client.TensorchordV1().InferenceIngresses(namespace).List(context.TODO(), options)\n\t\t\t},\n\t\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\t\tif tweakListOptions != nil {\n\t\t\t\t\ttweakListOptions(&options)\n\t\t\t\t}\n\t\t\t\treturn client.TensorchordV1().InferenceIngresses(namespace).Watch(context.TODO(), options)\n\t\t\t},\n\t\t},\n\t\t&modelzetesv1.InferenceIngress{},\n\t\tresyncPeriod,\n\t\tindexers,\n\t)\n}\n\nfunc (f *inferenceIngressInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\n\treturn NewFilteredInferenceIngressInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)\n}\n\nfunc (f *inferenceIngressInformer) Informer() cache.SharedIndexInformer {\n\treturn f.factory.InformerFor(&modelzetesv1.InferenceIngress{}, f.defaultInformer)\n}\n\nfunc (f *inferenceIngressInformer) Lister() v1.InferenceIngressLister {\n\treturn v1.NewInferenceIngressLister(f.Informer().GetIndexer())\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/informers/externalversions/modelzetes/v1/interface.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage v1\n\nimport (\n\tinternalinterfaces \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/informers/externalversions/internalinterfaces\"\n)\n\n// Interface provides access to all the informers in this group version.\ntype Interface interface {\n\t// InferenceIngresses returns a InferenceIngressInformer.\n\tInferenceIngresses() InferenceIngressInformer\n}\n\ntype version struct {\n\tfactory          internalinterfaces.SharedInformerFactory\n\tnamespace        string\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n}\n\n// New returns a new Interface.\nfunc New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {\n\treturn &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}\n}\n\n// InferenceIngresses returns a InferenceIngressInformer.\nfunc (v *version) InferenceIngresses() InferenceIngressInformer {\n\treturn &inferenceIngressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}\n"
  },
  {
    "path": "ingress-operator/pkg/client/listers/modelzetes/v1/expansion_generated.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by lister-gen. DO NOT EDIT.\n\npackage v1\n\n// InferenceIngressListerExpansion allows custom methods to be added to\n// InferenceIngressLister.\ntype InferenceIngressListerExpansion interface{}\n\n// InferenceIngressNamespaceListerExpansion allows custom methods to be added to\n// InferenceIngressNamespaceLister.\ntype InferenceIngressNamespaceListerExpansion interface{}\n"
  },
  {
    "path": "ingress-operator/pkg/client/listers/modelzetes/v1/inferenceingress.go",
    "content": "/*\nCopyright 2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by lister-gen. DO NOT EDIT.\n\npackage v1\n\nimport (\n\tv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\t\"k8s.io/client-go/tools/cache\"\n)\n\n// InferenceIngressLister helps list InferenceIngresses.\n// All objects returned here must be treated as read-only.\ntype InferenceIngressLister interface {\n\t// List lists all InferenceIngresses in the indexer.\n\t// Objects returned here must be treated as read-only.\n\tList(selector labels.Selector) (ret []*v1.InferenceIngress, err error)\n\t// InferenceIngresses returns an object that can list and get InferenceIngresses.\n\tInferenceIngresses(namespace string) InferenceIngressNamespaceLister\n\tInferenceIngressListerExpansion\n}\n\n// inferenceIngressLister implements the InferenceIngressLister interface.\ntype inferenceIngressLister struct {\n\tindexer cache.Indexer\n}\n\n// NewInferenceIngressLister returns a new InferenceIngressLister.\nfunc NewInferenceIngressLister(indexer cache.Indexer) InferenceIngressLister {\n\treturn &inferenceIngressLister{indexer: indexer}\n}\n\n// List lists all InferenceIngresses in the indexer.\nfunc (s *inferenceIngressLister) List(selector labels.Selector) (ret []*v1.InferenceIngress, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.InferenceIngress))\n\t})\n\treturn ret, err\n}\n\n// InferenceIngresses returns an object that can list and get InferenceIngresses.\nfunc (s *inferenceIngressLister) InferenceIngresses(namespace string) InferenceIngressNamespaceLister {\n\treturn inferenceIngressNamespaceLister{indexer: s.indexer, namespace: namespace}\n}\n\n// InferenceIngressNamespaceLister helps list and get InferenceIngresses.\n// All objects returned here must be treated as read-only.\ntype InferenceIngressNamespaceLister interface {\n\t// List lists all InferenceIngresses in the indexer for a given namespace.\n\t// Objects returned here must be treated as read-only.\n\tList(selector labels.Selector) (ret []*v1.InferenceIngress, err error)\n\t// Get retrieves the InferenceIngress from the indexer for a given namespace and name.\n\t// Objects returned here must be treated as read-only.\n\tGet(name string) (*v1.InferenceIngress, error)\n\tInferenceIngressNamespaceListerExpansion\n}\n\n// inferenceIngressNamespaceLister implements the InferenceIngressNamespaceLister\n// interface.\ntype inferenceIngressNamespaceLister struct {\n\tindexer   cache.Indexer\n\tnamespace string\n}\n\n// List lists all InferenceIngresses in the indexer for a given namespace.\nfunc (s inferenceIngressNamespaceLister) List(selector labels.Selector) (ret []*v1.InferenceIngress, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.InferenceIngress))\n\t})\n\treturn ret, err\n}\n\n// Get retrieves the InferenceIngress from the indexer for a given namespace and name.\nfunc (s inferenceIngressNamespaceLister) Get(name string) (*v1.InferenceIngress, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1.Resource(\"inferenceingress\"), name)\n\t}\n\treturn obj.(*v1.InferenceIngress), nil\n}\n"
  },
  {
    "path": "ingress-operator/pkg/config/config.go",
    "content": "package config\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"time\"\n)\n\ntype Config struct {\n\tKubeConfig KubeConfig       `json:\"kube_config,omitempty\"`\n\tController ControllerConfig `json:\"controller,omitempty\"`\n}\n\ntype ControllerConfig struct {\n\tThreadCount int    `json:\"thread_count,omitempty\"`\n\tNamespace   string `json:\"namespace,omitempty\"`\n\tHost        string `json:\"host,omitempty\"`\n}\n\ntype KubeConfig struct {\n\tKubeconfig   string        `json:\"kubeconfig,omitempty\"`\n\tMasterURL    string        `json:\"master_url,omitempty\"`\n\tQPS          int           `json:\"qps,omitempty\"`\n\tBurst        int           `json:\"burst,omitempty\"`\n\tResyncPeriod time.Duration `json:\"resync_period,omitempty\"`\n}\n\nfunc New() Config {\n\treturn Config{}\n}\n\nfunc (c Config) GetString() (string, error) {\n\tbytes, err := json.Marshal(c)\n\treturn string(bytes), err\n}\n\nfunc (c Config) Validate() error {\n\tif c.KubeConfig.QPS == 0 ||\n\t\tc.KubeConfig.Burst == 0 ||\n\t\tc.KubeConfig.ResyncPeriod == 0 {\n\t\treturn errors.New(\"invalid kubeconfig\")\n\t}\n\n\tif c.Controller.ThreadCount == 0 || c.Controller.Namespace == \"\" ||\n\t\tc.Controller.Host == \"\" {\n\t\treturn errors.New(\"invalid controller config\")\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "ingress-operator/pkg/consts/consts.go",
    "content": "package consts\n\nconst (\n\tKeyCert           = \"cert\"\n\tEnvironmentPrefix = \"MODELZ\"\n)\n"
  },
  {
    "path": "ingress-operator/pkg/controller/core.go",
    "content": "package controller\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apimachinery/pkg/util/runtime\"\n\t\"k8s.io/apimachinery/pkg/util/wait\"\n\tkubeinformers \"k8s.io/client-go/informers\"\n\t\"k8s.io/client-go/kubernetes\"\n\ttypedcorev1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n\t\"k8s.io/client-go/tools/cache\"\n\t\"k8s.io/client-go/tools/record\"\n\t\"k8s.io/client-go/util/workqueue\"\n\tklog \"k8s.io/klog\"\n\n\tfaasv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned/scheme\"\n\tfaasscheme \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned/scheme\"\n\tv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/informers/externalversions/modelzetes/v1\"\n\tlisters \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/listers/modelzetes/v1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n)\n\nconst AgentName = \"ingress-operator\"\nconst FaasIngressKind = \"InferenceIngress\"\nconst OpenfaasWorkloadPort = 8080\n\nconst (\n\t// SuccessSynced is used as part of the Event 'reason' when a Function is synced\n\tSuccessSynced = \"Synced\"\n\t// ErrResourceExists is used as part of the Event 'reason' when a Function fails\n\t// to sync due to a Deployment of the same name already existing.\n\tErrResourceExists = \"ErrResourceExists\"\n\t// MessageResourceExists is the message used for Events when a resource\n\t// fails to sync due to a Deployment already existing\n\tMessageResourceExists = \"Resource %q already exists and is not managed by controller\"\n\t// MessageResourceSynced is the message used for an Event fired when a Function\n\t// is synced successfully\n\tMessageResourceSynced = \"FunctionIngress synced successfully\"\n)\n\n// BaseController is the controller contains the common function ingress\n// implementation that is shared between the various versions of k8s.\ntype BaseController struct {\n\tFunctionsLister listers.InferenceIngressLister\n\tFunctionsSynced cache.InformerSynced\n\n\t// Workqueue is a rate limited work queue. This is used to queue work to be\n\t// processed instead of performing it as soon as a change happens. This\n\t// means we can ensure we only process a fixed amount of resources at a\n\t// time, and makes it easy to ensure we are never processing the same item\n\t// simultaneously in two different workers.\n\tWorkqueue workqueue.RateLimitingInterface\n\n\tSyncHandler func(ctx context.Context, key string) error\n}\n\nfunc (c BaseController) Run(threadiness int, stopCh <-chan struct{}) error {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer runtime.HandleCrash()\n\tdefer c.Workqueue.ShutDown()\n\tdefer cancel()\n\n\t// Start the informer factories to begin populating the informer caches\n\t// Wait for the caches to be synced before starting workers\n\tklog.Info(\"Waiting for informer caches to sync\")\n\tif ok := cache.WaitForCacheSync(stopCh, c.FunctionsSynced); !ok {\n\t\treturn fmt.Errorf(\"failed to wait for caches to sync\")\n\t}\n\n\tklog.Info(\"Starting workers\")\n\t// Launch two workers to process Function resources\n\tfor i := 0; i < threadiness; i++ {\n\t\tgo wait.Until(c.runWorker(ctx), time.Second, stopCh)\n\t}\n\n\tklog.Info(\"Started workers\")\n\t<-stopCh\n\tklog.Info(\"Shutting down workers\")\n\n\treturn nil\n}\n\n// runWorker is a long-running function that will continually call the\n// processNextWorkItem function in order to read and process a message on the workqueue.\nfunc (c BaseController) runWorker(ctx context.Context) func() {\n\treturn func() {\n\t\tfor c.processNextWorkItem(ctx) {\n\t\t}\n\t}\n}\n\n// processNextWorkItem will read a single work item off the workqueue and\n// attempt to process it, by calling the syncHandler.\nfunc (c BaseController) processNextWorkItem(ctx context.Context) bool {\n\tobj, shutdown := c.Workqueue.Get()\n\n\tif shutdown {\n\t\treturn false\n\t}\n\n\terr := func(obj interface{}) error {\n\t\tdefer c.Workqueue.Done(obj)\n\t\tvar key string\n\t\tvar ok bool\n\t\tif key, ok = obj.(string); !ok {\n\t\t\tc.Workqueue.Forget(obj)\n\t\t\truntime.HandleError(fmt.Errorf(\"expected string in workqueue but got %#v\", obj))\n\t\t\treturn nil\n\t\t}\n\t\tif err := c.SyncHandler(ctx, key); err != nil {\n\t\t\treturn fmt.Errorf(\"error syncing '%s': %s\", key, err.Error())\n\t\t}\n\t\tc.Workqueue.Forget(obj)\n\t\treturn nil\n\t}(obj)\n\n\tif err != nil {\n\t\truntime.HandleError(err)\n\t\treturn true\n\t}\n\n\treturn true\n}\n\n// enqueueFunction takes a fni resource and converts it into a namespace/name\n// string which is then put onto the work queue. This method should *not* be\n// passed resources of any type other than fni.\nfunc (c *BaseController) EnqueueFunction(obj interface{}) {\n\tvar key string\n\tvar err error\n\tif key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {\n\t\truntime.HandleError(err)\n\t\treturn\n\t}\n\tc.Workqueue.AddRateLimited(key)\n}\n\n// handleObject will take any resource implementing metav1.Object and attempt\n// to find the fni resource that 'owns' it. It does this by looking at the\n// objects metadata.ownerReferences field for an appropriate OwnerReference.\n// It then enqueues that fni resource to be processed. If the object does not\n// have an appropriate OwnerReference, it will simply be skipped.\nfunc (c BaseController) HandleObject(obj interface{}) {\n\tvar object metav1.Object\n\tvar ok bool\n\tif object, ok = obj.(metav1.Object); !ok {\n\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\tif !ok {\n\t\t\truntime.HandleError(fmt.Errorf(\"error decoding object, invalid type\"))\n\t\t\treturn\n\t\t}\n\t\tobject, ok = tombstone.Obj.(metav1.Object)\n\t\tif !ok {\n\t\t\truntime.HandleError(fmt.Errorf(\"error decoding object tombstone, invalid type\"))\n\t\t\treturn\n\t\t}\n\t\tklog.V(4).Infof(\"Recovered deleted object '%s' from tombstone\", object.GetName())\n\t}\n\n\tklog.V(4).Infof(\"Processing object: %s\", object.GetName())\n\tif ownerRef := metav1.GetControllerOf(object); ownerRef != nil {\n\t\t// If this object is not owned by a fni, we should not do anything more\n\t\t// with it.\n\t\tif ownerRef.Kind != FaasIngressKind {\n\t\t\treturn\n\t\t}\n\n\t\tfni, err := c.FunctionsLister.InferenceIngresses(object.GetNamespace()).Get(ownerRef.Name)\n\t\tif err != nil {\n\t\t\tklog.Infof(\"FunctionIngress '%s' deleted. Ignoring orphaned object '%s': %v\", ownerRef.Name, object.GetSelfLink(), err)\n\t\t\treturn\n\t\t}\n\n\t\tc.EnqueueFunction(fni)\n\t\treturn\n\t}\n}\n\nfunc (c BaseController) SetupEventHandlers(\n\tfunctionIngress v1.InferenceIngressInformer,\n\tkubeInformerFactory kubeinformers.SharedInformerFactory,\n) {\n\tfunctionIngress.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: c.EnqueueFunction,\n\t\tUpdateFunc: func(old, new interface{}) {\n\n\t\t\toldFn, ok := CheckCustomResourceType(old)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tnewFn, ok := CheckCustomResourceType(new)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdiffSpec := cmp.Diff(oldFn.Spec, newFn.Spec)\n\t\t\tdiffAnnotations := cmp.Diff(oldFn.ObjectMeta.Annotations, newFn.ObjectMeta.Annotations)\n\n\t\t\tif diffSpec != \"\" || diffAnnotations != \"\" {\n\t\t\t\tc.EnqueueFunction(new)\n\t\t\t}\n\t\t},\n\t})\n\n\t// Set up an event handler for when functions related resources like pods, deployments, replica sets\n\t// can't be materialized. This logs abnormal events like ImagePullBackOff, back-off restarting failed container,\n\t// failed to start container, oci runtime errors, etc\n\t// Enable this with -v=3\n\tkubeInformerFactory.Core().V1().Events().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tkey, err := cache.MetaNamespaceKeyFunc(obj)\n\t\t\tif err == nil {\n\t\t\t\tevent := obj.(*corev1.Event)\n\t\t\t\tsince := time.Since(event.LastTimestamp.Time)\n\t\t\t\t// log abnormal events occurred in the last minute\n\t\t\t\tif since.Seconds() < 61 && strings.Contains(event.Type, \"Warning\") {\n\t\t\t\t\tklog.V(3).Infof(\"Abnormal event detected on %s %s: %s\", event.LastTimestamp, key, event.Message)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t})\n}\n\nfunc GetClass(ingressType string) string {\n\tswitch ingressType {\n\tcase \"\":\n\tcase \"nginx\":\n\t\treturn \"nginx\"\n\tdefault:\n\t\treturn ingressType\n\t}\n\n\treturn \"nginx\"\n}\n\nfunc GetIssuerKind(issuerType string) string {\n\tswitch issuerType {\n\tcase \"ClusterIssuer\":\n\t\treturn \"cert-manager.io/cluster-issuer\"\n\tdefault:\n\t\treturn \"cert-manager.io/issuer\"\n\t}\n}\n\nfunc MakeAnnotations(fni *faasv1.InferenceIngress, host string) map[string]string {\n\tcontrolPlane, exist := fni.Annotations[consts.AnnotationControlPlaneKey]\n\tclass := GetClass(fni.Spec.IngressType)\n\tspecJSON, _ := json.Marshal(fni)\n\tannotations := make(map[string]string)\n\n\tannotations[\"ai.tensorchord.spec\"] = string(specJSON)\n\tinferenceNamespace := fni.Labels[consts.LabelInferenceNamespace]\n\n\tif !fni.Spec.BypassGateway {\n\t\tswitch class {\n\t\tcase \"nginx\":\n\t\t\tswitch host {\n\t\t\t// TODO: make this configurable\n\t\t\tcase \"apiserver\":\n\t\t\t\tannotations[\"nginx.ingress.kubernetes.io/rewrite-target\"] = \"/api/v1/\" + fni.Spec.Framework +\n\t\t\t\t\t\"/\" + fni.Spec.Function + \"/$1\"\n\t\t\t\tannotations[\"nginx.ingress.kubernetes.io/use-regex\"] = \"true\"\n\t\t\tdefault:\n\t\t\t\t// for inference created by modelz apiserver\n\t\t\t\tif exist && controlPlane == consts.ModelzAnnotationValue {\n\t\t\t\t\tannotations[\"nginx.ingress.kubernetes.io/rewrite-target\"] = \"/api/v1/\" + fni.Spec.Framework +\n\t\t\t\t\t\t\"/\" + fni.Spec.Function + \"/$1\"\n\t\t\t\t\tannotations[\"nginx.ingress.kubernetes.io/use-regex\"] = \"true\"\n\t\t\t\t\tannotations[\"nginx.ingress.kubernetes.io/ssl-redirect\"] = \"false\"\n\t\t\t\t} else {\n\t\t\t\t\tannotations[\"nginx.ingress.kubernetes.io/rewrite-target\"] = \"/inference/\" + fni.Name + \".\" + inferenceNamespace + \"/$1\"\n\t\t\t\t\tannotations[\"nginx.ingress.kubernetes.io/ssl-redirect\"] = \"false\"\n\t\t\t\t\tannotations[\"nginx.ingress.kubernetes.io/use-regex\"] = \"true\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tannotations[\"nginx.ingress.kubernetes.io/proxy-send-timeout\"] = \"300\"\n\tannotations[\"nginx.ingress.kubernetes.io/proxy-read-timeout\"] = \"300\"\n\tannotations[\"nginx.ingress.kubernetes.io/proxy-body-size\"] = \"16m\"\n\n\t// We use the default certificate for now.\n\t// if fni.Spec.UseTLS() {\n\t// \tissuerType := GetIssuerKind(fni.Spec.TLS.IssuerRef.Kind)\n\t// \tannotations[issuerType] = fni.Spec.TLS.IssuerRef.Name\n\t// }\n\n\t// Set annotations with overrides from FunctionIngress\n\t// annotations\n\tfor k, v := range fni.ObjectMeta.Annotations {\n\t\tannotations[k] = v\n\t}\n\n\treturn annotations\n}\n\nfunc MakeOwnerRef(fni *faasv1.InferenceIngress) []metav1.OwnerReference {\n\tref := []metav1.OwnerReference{\n\t\t*metav1.NewControllerRef(fni, schema.GroupVersionKind{\n\t\t\tGroup:   faasv1.SchemeGroupVersion.Group,\n\t\t\tVersion: faasv1.SchemeGroupVersion.Version,\n\t\t\tKind:    FaasIngressKind,\n\t\t}),\n\t}\n\treturn ref\n}\n\nfunc CheckCustomResourceType(obj interface{}) (faasv1.InferenceIngress, bool) {\n\tvar fn *faasv1.InferenceIngress\n\tvar ok bool\n\tif fn, ok = obj.(*faasv1.InferenceIngress); !ok {\n\t\tklog.Errorf(\"Event Watch received an invalid object: %#v\", obj)\n\t\treturn faasv1.InferenceIngress{}, false\n\t}\n\treturn *fn, true\n}\n\nfunc IngressNeedsUpdate(old, fni *faasv1.InferenceIngress) bool {\n\treturn !cmp.Equal(old.Spec, fni.Spec) ||\n\t\t!cmp.Equal(old.ObjectMeta.Annotations, fni.ObjectMeta.Annotations)\n}\n\nfunc EventRecorder(client kubernetes.Interface) record.EventRecorder {\n\t// Create event broadcaster\n\t// Add o6s types to the default Kubernetes Scheme so Events can be\n\t// logged for faas-controller types.\n\tfaasscheme.AddToScheme(scheme.Scheme)\n\tklog.V(4).Info(\"Creating event broadcaster\")\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(klog.V(4).Infof)\n\teventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: client.CoreV1().Events(\"\")})\n\treturn eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: AgentName})\n}\n"
  },
  {
    "path": "ingress-operator/pkg/controller/core_test.go",
    "content": "package controller\n\nimport (\n\t\"testing\"\n\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\tfaasv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n)\n\nfunc TestMakeAnnotations(t *testing.T) {\n\tcases := []struct {\n\t\tname     string\n\t\tingress  faasv1.InferenceIngress\n\t\texpected map[string]string\n\t\texcluded []string\n\t}{\n\t\t{\n\t\t\tname: \"can override ingress class value\",\n\t\t\tingress: faasv1.InferenceIngress{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\"kubernetes.io/ingress.class\": \"awesome-nginx\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\t\t\tIngressType: \"awesome-nginx\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: map[string]string{\n\t\t\t\t\"kubernetes.io/ingress.class\": \"awesome-nginx\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"bypass removes rewrite target\",\n\t\t\tingress: faasv1.InferenceIngress{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\"kubernetes.io/ingress.class\": \"nginx\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\t\t\tIngressType:   \"nginx\",\n\t\t\t\t\tFunction:      \"nodeinfo\",\n\t\t\t\t\tBypassGateway: true,\n\t\t\t\t\tDomain:        \"nodeinfo.example.com\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texcluded: []string{\"nginx.ingress.kubernetes.io/rewrite-target\"},\n\t\t},\n\t\t{\n\t\t\tname: \"default annotations includes a rewrite-target\",\n\t\t\tingress: faasv1.InferenceIngress{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tAnnotations: map[string]string{},\n\t\t\t\t},\n\t\t\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\t\t\tIngressType: \"nginx\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: map[string]string{\n\t\t\t\t\"nginx.ingress.kubernetes.io/rewrite-target\": \"/api/v1///$1\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"creates required nginx annotations\",\n\t\t\tingress: faasv1.InferenceIngress{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\"kubernetes.io/ingress.class\": \"nginx\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\t\t\tIngressType: \"nginx\",\n\t\t\t\t\tFramework:   \"mosec\",\n\t\t\t\t\tFunction:    \"main\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: map[string]string{\n\t\t\t\t\"nginx.ingress.kubernetes.io/rewrite-target\": \"/api/v1/mosec/main/$1\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"creates required skipper annotations\",\n\t\t\tingress: faasv1.InferenceIngress{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\"kubernetes.io/ingress.class\": \"skipper\",\n\t\t\t\t\t\t\"zalando.org/skipper-filter\":  `setPath(\"/function/nodeinfo\")`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\t\t\tIngressType:   \"skipper\",\n\t\t\t\t\tFunction:      \"nodeinfo\",\n\t\t\t\t\tBypassGateway: false,\n\t\t\t\t\tDomain:        \"nodeinfo.example.com\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: map[string]string{\n\t\t\t\t\"kubernetes.io/ingress.class\": \"skipper\",\n\t\t\t\t\"zalando.org/skipper-filter\":  `setPath(\"/function/nodeinfo\")`,\n\t\t\t},\n\t\t},\n\t\t// {\n\t\t// \tname: \"creates tls issuer annotation\",\n\t\t// \tingress: faasv1.InferenceIngress{\n\t\t// \t\tObjectMeta: metav1.ObjectMeta{\n\t\t// \t\t\tAnnotations: map[string]string{\n\t\t// \t\t\t\t\"kubernetes.io/ingress.class\": \"nginx\",\n\t\t// \t\t\t},\n\t\t// \t\t},\n\t\t// \t\tSpec: faasv1.InferenceIngressSpec{\n\t\t// \t\t\tIngressType:   \"nginx\",\n\t\t// \t\t\tFunction:      \"nodeinfo\",\n\t\t// \t\t\tBypassGateway: false,\n\t\t// \t\t\tDomain:        \"nodeinfo.example.com\",\n\t\t// \t\t\tTLS: &faasv1.InferenceIngressTLS{\n\t\t// \t\t\t\tIssuerRef: faasv1.ObjectReference{\n\t\t// \t\t\t\t\tName: \"clusterFoo\",\n\t\t// \t\t\t\t\tKind: \"ClusterIssuer\",\n\t\t// \t\t\t\t},\n\t\t// \t\t\t\tEnabled: true,\n\t\t// \t\t\t},\n\t\t// \t\t},\n\t\t// \t},\n\t\t// \texpected: map[string]string{\n\t\t// \t\t\"cert-manager.io/cluster-issuer\": \"clusterFoo\",\n\t\t// \t},\n\t\t// },\n\t\t// {\n\t\t// \tname: \"default tls issuer is local\",\n\t\t// \tingress: faasv1.InferenceIngress{\n\t\t// \t\tObjectMeta: metav1.ObjectMeta{\n\t\t// \t\t\tAnnotations: map[string]string{\n\t\t// \t\t\t\t\"kubernetes.io/ingress.class\": \"nginx\",\n\t\t// \t\t\t},\n\t\t// \t\t},\n\t\t// \t\tSpec: faasv1.InferenceIngressSpec{\n\t\t// \t\t\tIngressType:   \"nginx\",\n\t\t// \t\t\tFunction:      \"nodeinfo\",\n\t\t// \t\t\tBypassGateway: false,\n\t\t// \t\t\tDomain:        \"nodeinfo.example.com\",\n\t\t// \t\t\tTLS: &faasv1.InferenceIngressTLS{\n\t\t// \t\t\t\tIssuerRef: faasv1.ObjectReference{\n\t\t// \t\t\t\t\tName: \"clusterFoo\",\n\t\t// \t\t\t\t},\n\t\t// \t\t\t\tEnabled: true,\n\t\t// \t\t\t},\n\t\t// \t\t},\n\t\t// \t},\n\t\t// \texpected: map[string]string{\n\t\t// \t\t\"cert-manager.io/issuer\": \"clusterFoo\",\n\t\t// \t},\n\t\t// },\n\t}\n\n\tfor _, tc := range cases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tresult := MakeAnnotations(&tc.ingress, \"apiserver\")\n\t\t\tfor key, value := range tc.expected {\n\t\t\t\tfound, ok := result[key]\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Fatalf(\"Failed to find expected annotation: %q\", key)\n\t\t\t\t}\n\n\t\t\t\tif found != value {\n\t\t\t\t\tt.Fatalf(\"expected annotation value %q, got %q\", value, found)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, key := range tc.excluded {\n\t\t\t\tvalue, ok := result[key]\n\t\t\t\tif ok {\n\t\t\t\t\tt.Fatalf(\"annotations should not include %q, but it was found with value %q\", key, value)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "ingress-operator/pkg/controller/v1/controller.go",
    "content": "package v1\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strings\"\n\n\tpkgerrors \"github.com/pkg/errors\"\n\t\"github.com/sirupsen/logrus\"\n\n\tfaasv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\tclientset \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned\"\n\tinformers \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/informers/externalversions\"\n\tlisters \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/listers/modelzetes/v1\"\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/config\"\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/controller\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tnetv1 \"k8s.io/api/networking/v1\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/util/runtime\"\n\tkubeinformers \"k8s.io/client-go/informers\"\n\t\"k8s.io/client-go/kubernetes\"\n\tnetworkingv1 \"k8s.io/client-go/listers/networking/v1\"\n\t\"k8s.io/client-go/tools/cache\"\n\t\"k8s.io/client-go/tools/record\"\n\t\"k8s.io/client-go/util/workqueue\"\n)\n\n// SyncHandler is the controller implementation for Function resources\ntype SyncHandler struct {\n\tconfig config.Config\n\n\t// kubeclientset is a standard kubernetes clientset\n\tkubeclientset kubernetes.Interface\n\n\tfunctionsLister listers.InferenceIngressLister\n\n\tingressLister networkingv1.IngressLister\n\n\t// recorder is an event recorder for recording Event resources to the\n\t// Kubernetes API.\n\trecorder record.EventRecorder\n}\n\n// NewController returns a new OpenFaaS controller\nfunc NewController(\n\tcfg config.Config,\n\tkubeclientset kubernetes.Interface,\n\tfaasclientset clientset.Interface,\n\tkubeInformerFactory kubeinformers.SharedInformerFactory,\n\tfunctionIngressFactory informers.SharedInformerFactory,\n) controller.BaseController {\n\n\trecorder := controller.EventRecorder(kubeclientset)\n\tfunctionIngress := functionIngressFactory.Tensorchord().V1().InferenceIngresses()\n\tingressInformer := kubeInformerFactory.Networking().V1().Ingresses()\n\tingressLister := ingressInformer.Lister()\n\n\tsyncer := SyncHandler{\n\t\tconfig:          cfg,\n\t\tkubeclientset:   kubeclientset,\n\t\tfunctionsLister: functionIngress.Lister(),\n\t\tingressLister:   ingressLister,\n\t\trecorder:        recorder,\n\t}\n\n\tctrl := controller.BaseController{\n\t\tFunctionsLister: functionIngress.Lister(),\n\t\tFunctionsSynced: functionIngress.Informer().HasSynced,\n\t\tWorkqueue:       workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"FunctionIngresses\"),\n\t\tSyncHandler:     syncer.handler,\n\t}\n\tlogrus.Info(\"Setting up event handlers\")\n\tctrl.SetupEventHandlers(functionIngress, kubeInformerFactory)\n\tingressInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tDeleteFunc: ctrl.HandleObject,\n\t})\n\n\treturn ctrl\n}\n\n// handler compares the actual state with the desired, and attempts to\n// converge the two. It then updates the Status block of the fni resource\n// with the current status of the resource.\nfunc (h SyncHandler) handler(ctx context.Context, key string) error {\n\t// Convert the namespace/name string into a distinct namespace and name\n\tnamespace, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\truntime.HandleError(fmt.Errorf(\"invalid resource key: %s\", key))\n\t\treturn nil\n\t}\n\n\t// Get the fni resource with this namespace/name\n\tfni, err := h.functionsLister.InferenceIngresses(namespace).Get(name)\n\tif err != nil {\n\t\t// The fni resource may no longer exist, in which case we stop processing.\n\t\tif errors.IsNotFound(err) {\n\t\t\truntime.HandleError(fmt.Errorf(\"function ingress '%s' in work queue no longer exists\", key))\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\n\tlogger := logrus.WithFields(logrus.Fields{\n\t\t\"inference\": fni.Name,\n\t\t\"namespace\": fni.Namespace,\n\t})\n\n\tingresses := h.ingressLister.Ingresses(namespace)\n\tingress, getIngressErr := ingresses.Get(fni.Name)\n\tcreateIngress := errors.IsNotFound(getIngressErr)\n\n\tif !createIngress && ingress == nil {\n\t\tlogrus.Errorf(\"cannot get ingress: %s in %s, error: %s\", fni.Name, namespace, getIngressErr.Error())\n\t}\n\n\tlogger.Debugf(\"createIngress: %v\", createIngress)\n\n\tif createIngress {\n\t\thost := h.config.Controller.Host\n\n\t\trules := makeRules(fni, host)\n\t\ttls := makeTLS(fni)\n\n\t\tns := h.config.Controller.Namespace\n\n\t\tnewIngress := netv1.Ingress{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName:            name,\n\t\t\t\tNamespace:       ns,\n\t\t\t\tAnnotations:     controller.MakeAnnotations(fni, host),\n\t\t\t\tOwnerReferences: controller.MakeOwnerRef(fni),\n\t\t\t},\n\t\t\tSpec: netv1.IngressSpec{\n\t\t\t\tRules:            rules,\n\t\t\t\tIngressClassName: &fni.Spec.IngressType,\n\t\t\t\tTLS:              tls,\n\t\t\t},\n\t\t}\n\n\t\t_, createErr := h.kubeclientset.NetworkingV1().Ingresses(ns).Create(ctx, &newIngress, metav1.CreateOptions{})\n\t\tif createErr != nil {\n\t\t\tlogger.Errorf(\"cannot create ingress: %v in %v, error: %v\", name, namespace, createErr.Error())\n\t\t}\n\n\t\th.recorder.Event(fni, corev1.EventTypeNormal, controller.SuccessSynced, controller.MessageResourceSynced)\n\t\treturn nil\n\t}\n\n\told := faasv1.InferenceIngress{}\n\n\tif val, ok := ingress.Annotations[\"ai.tensorchord.spec\"]; ok && len(val) > 0 {\n\t\tunmarshalErr := json.Unmarshal([]byte(val), &old)\n\t\tif unmarshalErr != nil {\n\t\t\treturn pkgerrors.Wrap(unmarshalErr, \"unable to unmarshal from field inference\")\n\t\t}\n\t}\n\n\t// Update the Deployment resource if the fni definition differs\n\tif controller.IngressNeedsUpdate(&old, fni) {\n\t\tlogger.Debugf(\"updating ingress: %s in %s\", fni.Name, namespace)\n\n\t\tif old.ObjectMeta.Name != fni.ObjectMeta.Name {\n\t\t\treturn fmt.Errorf(\"cannot rename object\")\n\t\t}\n\n\t\tupdated := ingress.DeepCopy()\n\n\t\trules := makeRules(fni, h.config.Controller.Host)\n\n\t\tannotations := controller.MakeAnnotations(fni,\n\t\t\th.config.Controller.Host)\n\t\tfor k, v := range annotations {\n\t\t\tupdated.Annotations[k] = v\n\t\t}\n\n\t\tupdated.Spec.Rules = rules\n\t\tupdated.Spec.TLS = makeTLS(fni)\n\n\t\t_, updateErr := h.kubeclientset.NetworkingV1().Ingresses(namespace).Update(ctx, updated, metav1.UpdateOptions{})\n\t\tif updateErr != nil {\n\t\t\tlogrus.Errorf(\"error updating ingress: %v\", updateErr)\n\t\t\treturn updateErr\n\t\t}\n\t}\n\n\t// If an error occurs during Get/Create, we'll requeue the item so we can\n\t// attempt processing again later. This could have been caused by a\n\t// temporary network failure, or any other transient reason.\n\tif err != nil {\n\t\treturn fmt.Errorf(\"transient error: %v\", err)\n\t}\n\n\th.recorder.Event(fni, corev1.EventTypeNormal, controller.SuccessSynced, controller.MessageResourceSynced)\n\treturn nil\n}\n\nfunc makeRules(fni *faasv1.InferenceIngress, host string) []netv1.IngressRule {\n\tpath := \"/(.*)\"\n\n\tif fni.Spec.BypassGateway {\n\t\tpath = \"/\"\n\t}\n\n\tif len(fni.Spec.Path) > 0 {\n\t\tpath = fni.Spec.Path\n\t}\n\n\tif controller.GetClass(fni.Spec.IngressType) == \"traefik\" {\n\t\t// We have to trim the regex and the trailing slash for Traefik,\n\t\t// otherwise routing won't work\n\t\tpath = strings.TrimRight(path, \"/(.*)\")\n\t\tif len(path) == 0 {\n\t\t\tpath = \"/\"\n\t\t}\n\t}\n\n\tpathType := netv1.PathTypeImplementationSpecific\n\n\treturn []netv1.IngressRule{\n\t\t{\n\t\t\tHost: fni.Spec.Domain,\n\t\t\tIngressRuleValue: netv1.IngressRuleValue{\n\t\t\t\tHTTP: &netv1.HTTPIngressRuleValue{\n\t\t\t\t\tPaths: []netv1.HTTPIngressPath{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath:     path,\n\t\t\t\t\t\t\tPathType: &pathType,\n\t\t\t\t\t\t\tBackend: netv1.IngressBackend{\n\t\t\t\t\t\t\t\tService: &netv1.IngressServiceBackend{\n\t\t\t\t\t\t\t\t\tName: host,\n\t\t\t\t\t\t\t\t\tPort: netv1.ServiceBackendPort{\n\t\t\t\t\t\t\t\t\t\tNumber: controller.OpenfaasWorkloadPort,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc makeTLS(fni *faasv1.InferenceIngress) []netv1.IngressTLS {\n\tif !fni.Spec.UseTLS() {\n\t\treturn []netv1.IngressTLS{}\n\t}\n\n\treturn []netv1.IngressTLS{\n\t\t{\n\t\t\t// Use default secret name, thus no need to specify SecretName.\n\t\t\tHosts: []string{\n\t\t\t\tfni.Spec.Domain,\n\t\t\t},\n\t\t},\n\t}\n}\n"
  },
  {
    "path": "ingress-operator/pkg/controller/v1/controller_factory.go",
    "content": "package v1\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/sirupsen/logrus\"\n\tkubeinformers \"k8s.io/client-go/informers\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/tools/cache\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n\n\tclientset \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/clientset/versioned\"\n\tinformers \"github.com/tensorchord/openmodelz/ingress-operator/pkg/client/informers/externalversions\"\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/config\"\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/controller\"\n)\n\nfunc New(c config.Config, stopCh <-chan struct{}) (*controller.BaseController, error) {\n\tclientCmdConfig, err := clientcmd.BuildConfigFromFlags(\n\t\tc.KubeConfig.MasterURL, c.KubeConfig.Kubeconfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error building kubeconfig: %s\", err.Error())\n\t}\n\n\tclientCmdConfig.QPS = float32(c.KubeConfig.QPS)\n\tclientCmdConfig.Burst = c.KubeConfig.Burst\n\n\tkubeClient, err := kubernetes.NewForConfig(clientCmdConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error building Kubernetes clientset: %s\", err.Error())\n\t}\n\n\tingressClient, err := clientset.NewForConfig(clientCmdConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error building Inference clientset: %s\", err.Error())\n\t}\n\n\tkubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, c.KubeConfig.ResyncPeriod)\n\n\tingressInformerFactory := informers.NewSharedInformerFactoryWithOptions(ingressClient, c.KubeConfig.ResyncPeriod)\n\n\tcapabilities, err := getPreferredAvailableAPIs(kubeClient, \"Ingress\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error retrieving Kubernetes cluster capabilities: %s\", err.Error())\n\t}\n\tlogrus.Infof(\"cluster supports ingress in: %s\", capabilities)\n\n\tif !capabilities.Has(\"networking.k8s.io/v1\") {\n\t\treturn nil, errors.New(\"networking.k8s.io/v1 is not available\")\n\t}\n\n\tinferenceIngresses := ingressInformerFactory.Tensorchord().V1().InferenceIngresses()\n\tgo inferenceIngresses.Informer().Run(stopCh)\n\tif ok := cache.WaitForNamedCacheSync(\n\t\tfmt.Sprintf(\"%s:inferenceingresses\", \"tensorchord\"),\n\t\tstopCh, inferenceIngresses.Informer().HasSynced); !ok {\n\t\treturn nil, errors.New(\"failed to wait for inferenceingresses caches to sync\")\n\t}\n\tingresses := kubeInformerFactory.Networking().V1().Ingresses()\n\tgo ingresses.Informer().Run(stopCh)\n\tif ok := cache.WaitForNamedCacheSync(\n\t\tfmt.Sprintf(\"%s:ingresses\", \"networking\"),\n\t\tstopCh, ingresses.Informer().HasSynced); !ok {\n\t\treturn nil, errors.New(\"failed to wait for ingresses caches to sync\")\n\t}\n\n\tctr := NewController(c,\n\t\tkubeClient, ingressClient, kubeInformerFactory,\n\t\tingressInformerFactory)\n\treturn &ctr, nil\n}\n\n// getPreferredAvailableAPIs queries the cluster for the preferred resources information and returns a Capabilities\n// instance containing those api groups that support the specified kind.\n//\n// kind should be the title case singular name of the kind. For example, \"Ingress\" is the kind for a resource \"ingress\".\nfunc getPreferredAvailableAPIs(client kubernetes.Interface, kind string) (Capabilities, error) {\n\tdiscoveryclient := client.Discovery()\n\tlists, err := discoveryclient.ServerPreferredResources()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcaps := Capabilities{}\n\tfor _, list := range lists {\n\t\tif len(list.APIResources) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, resource := range list.APIResources {\n\t\t\tif len(resource.Verbs) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif resource.Kind == kind {\n\t\t\t\tcaps[list.GroupVersion] = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn caps, nil\n}\n\ntype Capabilities map[string]bool\n\nfunc (c Capabilities) Has(wanted string) bool {\n\treturn c[wanted]\n}\n\nfunc (c Capabilities) String() string {\n\tkeys := make([]string, 0, len(c))\n\tfor k := range c {\n\t\tkeys = append(keys, k)\n\t}\n\treturn strings.Join(keys, \", \")\n}\n"
  },
  {
    "path": "ingress-operator/pkg/controller/v1/controller_test.go",
    "content": "package v1\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\tnetv1 \"k8s.io/api/networking/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\tfaasv1 \"github.com/tensorchord/openmodelz/ingress-operator/pkg/apis/modelzetes/v1\"\n\t\"github.com/tensorchord/openmodelz/ingress-operator/pkg/controller\"\n)\n\nfunc Test_makeRules_Nginx_RootPath_HasRegex(t *testing.T) {\n\tingress := faasv1.InferenceIngress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{},\n\t\t},\n\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\tIngressType: \"nginx\",\n\t\t},\n\t}\n\n\trules := makeRules(&ingress, \"apiserver\")\n\n\tif len(rules) == 0 {\n\t\tt.Errorf(\"Ingress should give at least one rule\")\n\t\tt.Fail()\n\t}\n\n\twantPath := \"/(.*)\"\n\tgotPath := rules[0].HTTP.Paths[0].Path\n\n\tif gotPath != wantPath {\n\t\tt.Errorf(\"want path %s, but got %s\", wantPath, gotPath)\n\t}\n\n\tgotPort := rules[0].HTTP.Paths[0].Backend.Service.Port.Number\n\n\tif gotPort != controller.OpenfaasWorkloadPort {\n\t\tt.Errorf(\"want port %d, but got %d\", controller.OpenfaasWorkloadPort, gotPort)\n\t}\n}\n\nfunc Test_makeRules_Nginx_RootPath_IsRootWithBypassMode(t *testing.T) {\n\twantFunction := \"apiserver\"\n\tingress := faasv1.InferenceIngress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{},\n\t\t},\n\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\tBypassGateway: true,\n\t\t\tIngressType:   \"nginx\",\n\t\t\tFunction:      \"nodeinfo\",\n\t\t\t// Path:          \"/\",\n\t\t},\n\t}\n\n\trules := makeRules(&ingress, \"apiserver\")\n\n\tif len(rules) == 0 {\n\t\tt.Errorf(\"Ingress should give at least one rule\")\n\t\tt.Fail()\n\t}\n\n\twantPath := \"/\"\n\tgotPath := rules[0].HTTP.Paths[0].Path\n\n\tif gotPath != wantPath {\n\t\tt.Errorf(\"want path %s, but got %s\", wantPath, gotPath)\n\t}\n\n\tgotHost := rules[0].HTTP.Paths[0].Backend.Service.Name\n\n\tif gotHost != wantFunction {\n\t\tt.Errorf(\"want host to be function: %s, but got %s\", wantFunction, gotHost)\n\t}\n}\n\nfunc Test_makeRules_Nginx_PathOverride(t *testing.T) {\n\tingress := faasv1.InferenceIngress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{},\n\t\t},\n\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\tIngressType: \"nginx\",\n\t\t\tPath:        \"/v1/profiles/view/(.*)\",\n\t\t},\n\t}\n\n\trules := makeRules(&ingress, \"apiserver\")\n\n\tif len(rules) == 0 {\n\t\tt.Errorf(\"Ingress should give at least one rule\")\n\t\tt.Fail()\n\t}\n\n\twantPath := ingress.Spec.Path\n\tgotPath := rules[0].HTTP.Paths[0].Path\n\n\tif gotPath != wantPath {\n\t\tt.Errorf(\"want path %s, but got %s\", wantPath, gotPath)\n\t}\n}\n\nfunc Test_makeRules_Traefik_RootPath_TrimsRegex(t *testing.T) {\n\tingress := faasv1.InferenceIngress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{},\n\t\t},\n\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\tIngressType: \"traefik\",\n\t\t},\n\t}\n\n\trules := makeRules(&ingress, \"apiserver\")\n\n\tif len(rules) == 0 {\n\t\tt.Errorf(\"Ingress should give at least one rule\")\n\t\tt.Fail()\n\t}\n\n\twantPath := \"/\"\n\tgotPath := rules[0].HTTP.Paths[0].Path\n\tif gotPath != wantPath {\n\t\tt.Errorf(\"want path %s, but got %s\", wantPath, gotPath)\n\t}\n}\n\nfunc Test_makeRules_Traefik_NestedPath_TrimsRegex_And_TrailingSlash(t *testing.T) {\n\tingress := faasv1.InferenceIngress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{},\n\t\t},\n\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\tIngressType: \"traefik\",\n\t\t\tPath:        \"/v1/profiles/view/(.*)\",\n\t\t},\n\t}\n\n\trules := makeRules(&ingress, \"apiserver\")\n\n\tif len(rules) == 0 {\n\t\tt.Errorf(\"Ingress should give at least one rule\")\n\t\tt.Fail()\n\t}\n\n\twantPath := \"/v1/profiles/view\"\n\tgotPath := rules[0].HTTP.Paths[0].Path\n\tif gotPath != wantPath {\n\t\tt.Errorf(\"want path %s, but got %s\", wantPath, gotPath)\n\t}\n}\n\nfunc Test_makeTLS(t *testing.T) {\n\n\tcases := []struct {\n\t\tname     string\n\t\tfni      *faasv1.InferenceIngress\n\t\texpected []netv1.IngressTLS\n\t}{\n\t\t{\n\t\t\tname: \"tls disabled results in empty tls config\",\n\t\t\tfni: &faasv1.InferenceIngress{\n\t\t\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\t\t\tTLS: &faasv1.InferenceIngressTLS{\n\t\t\t\t\t\tEnabled: false,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: []netv1.IngressTLS{},\n\t\t},\n\t\t{\n\t\t\tname: \"tls enabled creates TLS object with correct host and secret with matching the host\",\n\t\t\tfni: &faasv1.InferenceIngress{\n\t\t\t\tSpec: faasv1.InferenceIngressSpec{\n\t\t\t\t\tDomain: \"foo.example.com\",\n\t\t\t\t\tTLS: &faasv1.InferenceIngressTLS{\n\t\t\t\t\t\tEnabled: true,\n\t\t\t\t\t\tIssuerRef: faasv1.ObjectReference{\n\t\t\t\t\t\t\tName: \"test-issuer\",\n\t\t\t\t\t\t\tKind: \"ClusterIssuer\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: []netv1.IngressTLS{\n\t\t\t\t{\n\t\t\t\t\tHosts: []string{\n\t\t\t\t\t\t\"foo.example.com\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tgot := makeTLS(tc.fni)\n\t\t\tif !reflect.DeepEqual(tc.expected, got) {\n\t\t\t\tt.Fatalf(\"want tls config %v, got %v\", tc.expected, got)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "ingress-operator/pkg/controller/v1/docs.go",
    "content": "package v1\n\n/* v1 package provides the original ingress controller implementation.\n\nThis provides support for ingress operator on k8s >= 1.19 networking/v1 api group\n*/\n"
  },
  {
    "path": "ingress-operator/pkg/signals/signal.go",
    "content": "package signals\n\nimport (\n\t\"os\"\n\t\"os/signal\"\n)\n\nvar onlyOneSignalHandler = make(chan struct{})\n\n// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned\n// which is closed on one of these signals. If a second signal is caught, the program\n// is terminated with exit code 1.\nfunc SetupSignalHandler() (stopCh <-chan struct{}) {\n\tclose(onlyOneSignalHandler) // panics when called twice\n\n\tstop := make(chan struct{})\n\tc := make(chan os.Signal, 2)\n\tsignal.Notify(c, shutdownSignals...)\n\tgo func() {\n\t\t<-c\n\t\tclose(stop)\n\t\t<-c\n\t\tos.Exit(1) // second signal. Exit directly.\n\t}()\n\n\treturn stop\n}\n"
  },
  {
    "path": "ingress-operator/pkg/signals/signal_posix.go",
    "content": "//go:build !windows\n// +build !windows\n\npackage signals\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\nvar shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}\n"
  },
  {
    "path": "ingress-operator/pkg/signals/signal_windows.go",
    "content": "package signals\n\nimport (\n\t\"os\"\n)\n\nvar shutdownSignals = []os.Signal{os.Interrupt}\n"
  },
  {
    "path": "ingress-operator/pkg/version/version.go",
    "content": "/*\n   Copyright The TensorChord Inc.\n   Copyright The BuildKit Authors.\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npackage version\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\t// Package is filled at linking time\n\tPackage = \"github.com/tensorchord/openmodelz/modelzetes\"\n\n\t// Revision is filled with the VCS (e.g. git) revision being used to build\n\t// the program at linking time.\n\tRevision = \"\"\n\n\tversion         = \"0.0.0+unknown\"\n\tbuildDate       = \"1970-01-01T00:00:00Z\" // output from `date -u +'%Y-%m-%dT%H:%M:%SZ'`\n\tgitCommit       = \"\"                     // output from `git rev-parse HEAD`\n\tgitTag          = \"\"                     // output from `git describe --exact-match --tags HEAD` (if clean tree state)\n\tgitTreeState    = \"\"                     // determined from `git status --porcelain`. either 'clean' or 'dirty'\n\tdevelopmentFlag = \"false\"\n)\n\n// Version contains envd version information\ntype Version struct {\n\tVersion      string\n\tBuildDate    string\n\tGitCommit    string\n\tGitTag       string\n\tGitTreeState string\n\tGoVersion    string\n\tCompiler     string\n\tPlatform     string\n}\n\nfunc (v Version) String() string {\n\treturn v.Version\n}\n\n// SetGitTagForE2ETest sets the gitTag for test purpose.\nfunc SetGitTagForE2ETest(tag string) {\n\tgitTag = tag\n}\n\n// GetEnvdVersion gets Envd version information\nfunc GetEnvdVersion() string {\n\tvar versionStr string\n\n\tif gitCommit != \"\" && gitTag != \"\" &&\n\t\tgitTreeState == \"clean\" && developmentFlag == \"false\" {\n\t\t// if we have a clean tree state and the current commit is tagged,\n\t\t// this is an official release.\n\t\tversionStr = gitTag\n\t} else {\n\t\t// otherwise formulate a version string based on as much metadata\n\t\t// information we have available.\n\t\tif strings.HasPrefix(version, \"v\") {\n\t\t\tversionStr = version\n\t\t} else {\n\t\t\tversionStr = \"v\" + version\n\t\t}\n\t\tif len(gitCommit) >= 7 {\n\t\t\tversionStr += \"+\" + gitCommit[0:7]\n\t\t\tif gitTreeState != \"clean\" {\n\t\t\t\tversionStr += \".dirty\"\n\t\t\t}\n\t\t} else {\n\t\t\tversionStr += \"+unknown\"\n\t\t}\n\t}\n\treturn versionStr\n}\n\n// GetVersion returns the version information\nfunc GetVersion() Version {\n\treturn Version{\n\t\tVersion:      GetEnvdVersion(),\n\t\tBuildDate:    buildDate,\n\t\tGitCommit:    gitCommit,\n\t\tGitTag:       gitTag,\n\t\tGitTreeState: gitTreeState,\n\t\tGoVersion:    runtime.Version(),\n\t\tCompiler:     runtime.Compiler,\n\t\tPlatform:     fmt.Sprintf(\"%s/%s\", runtime.GOOS, runtime.GOARCH),\n\t}\n}\n\nvar (\n\treRelease *regexp.Regexp\n\treDev     *regexp.Regexp\n\treOnce    sync.Once\n)\n\nfunc UserAgent() string {\n\tversion := GetVersion().String()\n\n\treOnce.Do(func() {\n\t\treRelease = regexp.MustCompile(`^(v[0-9]+\\.[0-9]+)\\.[0-9]+$`)\n\t\treDev = regexp.MustCompile(`^(v[0-9]+\\.[0-9]+)\\.[0-9]+`)\n\t})\n\n\tif matches := reRelease.FindAllStringSubmatch(version, 1); len(matches) > 0 {\n\t\tversion = matches[0][1]\n\t} else if matches := reDev.FindAllStringSubmatch(version, 1); len(matches) > 0 {\n\t\tversion = matches[0][1] + \"-dev\"\n\t}\n\n\treturn \"envd/\" + version\n}\n"
  },
  {
    "path": "ingress-operator/vendor.go",
    "content": "//go:build vendor\n\npackage main\n\n// This file exists to trick \"go mod vendor\" to include \"main\" packages.\n// It is not expected to build, the build tag above is only to prevent this\n// file from being included in builds.\n\nimport (\n\t_ \"k8s.io/code-generator/cmd/client-gen\"\n\t_ \"k8s.io/code-generator/cmd/deepcopy-gen\"\n\t_ \"k8s.io/code-generator/cmd/defaulter-gen\"\n\t_ \"k8s.io/code-generator/cmd/informer-gen\"\n\t_ \"k8s.io/code-generator/cmd/lister-gen\"\n\t_ \"k8s.io/code-generator/cmd/openapi-gen\"\n)\n\nfunc main() {}\n"
  },
  {
    "path": "mdz/.gitignore",
    "content": "# Binaries for programs and plugins\n*.exe\n*.dll\n*.so\n*.dylib\n\n# Test binary, build with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736\n.glide/\n.idea\n\nbin/\ndebug-bin/\n**/password.txt\n**/gateway-password.txt\n\n.vscode\nof_kind_portforward.pid\n/kind*\n/kubectl\n/yaml_armhf\n/yaml_arm64\n\n/broker-*\n\n/chart/pro-builder/out\n/chart/pro-builder/payload.txt\n/pgconnector.yaml\n\njwt_key\njwt_key.pub\n/*.pid\n.tools/\n"
  },
  {
    "path": "mdz/Makefile",
    "content": "# Copyright 2022 TensorChord Inc.\n#\n# The old school Makefile, following are required targets. The Makefile is written\n# to allow building multiple binaries. You are free to add more targets or change\n# existing implementations, as long as the semantics are preserved.\n#\n#   make              - default to 'build' target\n#   make lint         - code analysis\n#   make test         - run unit test (or plus integration test)\n#   make build        - alias to build-local target\n#   make build-local  - build local binary targets\n#   make build-linux  - build linux binary targets\n#   make container    - build containers\n#   $ docker login registry -u username -p xxxxx\n#   make push         - push containers\n#   make clean        - clean up targets\n#\n# Not included but recommended targets:\n#   make e2e-test\n#\n# The makefile is also responsible to populate project version information.\n#\n\n#\n# Tweak the variables based on your project.\n#\n\n# This repo's root import path (under GOPATH).\nROOT := github.com/tensorchord/openmodelz/mdz\n\n# Target binaries. You can build multiple binaries for a single project.\nTARGETS := mdz\n\n# Container image prefix and suffix added to targets.\n# The final built images are:\n#   $[REGISTRY]/$[IMAGE_PREFIX]$[TARGET]$[IMAGE_SUFFIX]:$[VERSION]\n# $[REGISTRY] is an item from $[REGISTRIES], $[TARGET] is an item from $[TARGETS].\nIMAGE_PREFIX ?= $(strip )\nIMAGE_SUFFIX ?= $(strip )\n\n# Container registries.\nREGISTRY ?= ghcr.io/tensorchord\n\n# Container registry for base images.\nBASE_REGISTRY ?= docker.io\nBASE_REGISTRY_USER ?= modelzai\n\n# Disable CGO by default.\nCGO_ENABLED ?= 0\n\nGOOS ?= $(shell go env GOOS)\nGOARCH ?= $(shell go env GOARCH)\n\n#\n# These variables should not need tweaking.\n#\n\n# It's necessary to set this because some environments don't link sh -> bash.\nexport SHELL := bash\n\n# It's necessary to set the errexit flags for the bash shell.\nexport SHELLOPTS := errexit\n\nPACKAGE_NAME := github.com/tensorchord/openmodelz/mdz\nGOLANG_CROSS_VERSION  ?= v1.17.6\n\n# Project main package location (can be multiple ones).\nCMD_DIR := ./cmd\n\n# Project output directory.\nOUTPUT_DIR := ./bin\nDEBUG_DIR := ./debug-bin\n\n# Build directory.\nBUILD_DIR := ./build\n\n# Current version of the project.\nVERSION ?= $(shell git describe --match 'v[0-9]*' --always --tags --abbrev=0)\nBUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\nGIT_COMMIT=$(shell git rev-parse HEAD)\nGIT_TAG=$(shell if [ -z \"`git status --porcelain`\" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)\nGIT_TREE_STATE=$(shell if [ -z \"`git status --porcelain`\" ]; then echo \"clean\" ; else echo \"dirty\"; fi)\nGITSHA ?= $(shell git rev-parse --short HEAD)\nBUILD_FLAGS ?= -s -w \\\n  -X $(ROOT)/pkg/version.version=$(VERSION) \\\n  -X $(ROOT)/pkg/version.buildDate=$(BUILD_DATE) \\\n  -X $(ROOT)/pkg/version.gitCommit=$(GIT_COMMIT) \\\n  -X $(ROOT)/pkg/version.gitTreeState=$(GIT_TREE_STATE)\n\n# Track code version with Docker Label.\nDOCKER_LABELS ?= git-describe=\"$(shell date -u +v%Y%m%d)-$(shell git describe --tags --always --dirty)\"\n\n# Golang standard bin directory.\nGOPATH ?= $(shell go env GOPATH)\nGOROOT ?= $(shell go env GOROOT)\nBIN_DIR := $(GOPATH)/bin\nGOLANGCI_LINT := $(BIN_DIR)/golangci-lint\n\n# check if we need embed the dashboard\nDASHBOARD_BUILD ?= debug\n\n# Default golang flags used in build and test\n# -mod=vendor: force go to use the vendor files instead of using the `$GOPATH/pkg/mod`\n# -p: the number of programs that can be run in parallel\n# -count: run each test and benchmark 1 times. Set this flag to disable test cache\nexport GOFLAGS ?= -count=1\n\n#\n# Define all targets. At least the following commands are required:\n#\n\n# All targets.\n.PHONY: help lint test build container push addlicense debug debug-local build-local generate clean test-local addlicense-install release build-image\n\n.DEFAULT_GOAL:=build\n\nbuild: build-release  ## Build the release version\n\nhelp:  ## Display this help\n\t@awk 'BEGIN {FS = \":.*##\"; printf \"\\nUsage:\\n  make \\033[36m<target>\\033[0m\\n\"} /^[a-zA-Z0-9_-]+:.*?##/ { printf \"  \\033[36m%-15s\\033[0m %s\\n\", $$1, $$2 } /^##@/ { printf \"\\n\\033[1m%s\\033[0m\\n\", substr($$0, 5) } ' $(MAKEFILE_LIST)\n\ndebug: debug-local  ## Build the debug version\n\n# more info about `GOGC` env: https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint\nlint: $(GOLANGCI_LINT)  ## Lint GO code\n\t@$(GOLANGCI_LINT) run\n\n$(GOLANGCI_LINT):\n\tcurl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$(go env GOPATH)/bin\n\nmockgen-install:\n\tgo install github.com/golang/mock/mockgen@v1.6.0\n\naddlicense-install:\n\tgo install github.com/google/addlicense@latest\n\nsqlc-install:\n\tgo install github.com/kyleconroy/sqlc/cmd/sqlc@latest\n\n# https://github.com/swaggo/swag/pull/1322, we should use master instead of latest for now.\nswag-install:\n\tgo install github.com/swaggo/swag/cmd/swag@v1.8.7\n\nbuild-local:\n\t@for target in $(TARGETS); do \\\n\t  CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build \\\n\t  -trimpath -v -o $(OUTPUT_DIR)/$${target} \\\n      -ldflags \"$(BUILD_FLAGS)\" \\\n      $(CMD_DIR)/$${target}; \\\n\tdone\n\nbuild-release: BUILD_FLAGS += -X $(ROOT)/pkg/version.gitTag=$(GIT_TAG)\nbuild-release:\n\t@for target in $(TARGETS); do \\\n\t  CGO_ENABLED=$(CGO_ENABLED) go build -trimpath -o $(OUTPUT_DIR)/$${target} \\\n\t\t-ldflags \"$(BUILD_FLAGS)\" \\\n\t\t$(CMD_DIR)/$${target}; \\\n\tdone\n\n# It is used by vscode to attach into the process.\ndebug-local:\n\t@for target in $(TARGETS); do \\\n\t  CGO_ENABLED=$(CGO_ENABLED) go build -tags $(DASHBOARD_BUILD) -trimpath \\\n\t\t-v -o $(DEBUG_DIR)/$${target} \\\n\t\t-ldflags \"$(BUILD_FLAGS)\" \\\n\t\t-gcflags='all=-N -l' \\\n\t\t$(CMD_DIR)/$${target}; \\\n\tdone\n\naddlicense: addlicense-install  ## Add license to GO code files\n\taddlicense -l mpl -c \"TensorChord Inc.\" $$(find . -type f -name '*.go' | grep -v pkg/docs/docs.go)\n\ntest-local:\n\t@go test -tags=$(DASHBOARD_BUILD) -v -race -coverprofile=coverage.out ./...\n\ntest:  ## Run the tests\n\t@go test -tags=$(DASHBOARD_BUILD) -race -coverpkg=./pkg/... -coverprofile=coverage.out ./...\n\t@go tool cover -func coverage.out | tail -n 1 | awk '{ print \"Total coverage: \" $$3 }'\n\nclean:  ## Clean the outputs and artifacts\n\t@-rm -vrf ${OUTPUT_DIR}\n\t@-rm -vrf ${DEBUG_DIR}\n\t@-rm -vrf build dist .eggs *.egg-info\n\nfmt: swag-install ## Run go fmt against code.\n\tgo fmt ./...\n\tswag fmt\n\nvet: ## Run go vet against code.\n\tgo vet ./...\n\nbuild-image: build-local\n\tdocker build -t ${BASE_REGISTRY}/${BASE_REGISTRY_USER}/modelz-autoscaler:dev -f Dockerfile ./bin\n\tdocker push ${BASE_REGISTRY}/${BASE_REGISTRY_USER}/autoscaler:dev\n\nrelease:\n\t@if [ ! -f \".release-env\" ]; then \\\n\t\techo \"\\033[91m.release-env is required for release\\033[0m\";\\\n\t\texit 1;\\\n\tfi\n\tdocker run \\\n\t\t--rm \\\n\t\t--privileged \\\n\t\t-e CGO_ENABLED=1 \\\n\t\t--env-file .release-env \\\n\t\t-v /var/run/docker.sock:/var/run/docker.sock \\\n\t\t-v `pwd`:/go/src/$(PACKAGE_NAME) \\\n\t\t-v `pwd`/sysroot:/sysroot \\\n\t\t-w /go/src/$(PACKAGE_NAME) \\\n\t\tgoreleaser/goreleaser-cross:${GOLANG_CROSS_VERSION} \\\n\t\trelease --rm-dist\n\ntsschema: swag\n\t@cd dashboard; pnpm tsschema\n\ngenerate: mockgen-install sqlc-install swag tsschema\n\t@mockgen -source pkg/query/querier.go -destination pkg/query/mock/mock.go -package mock\n\t@sqlc generate\n\ndashboard-build:\n\t@cd dashboard; pnpm build\n"
  },
  {
    "path": "mdz/README.md",
    "content": "<div align=\"center\">\n\n# mdz\n\nCLI for OpenModelZ.\n\n</div>\n\n<p align=center>\n<a href=\"https://discord.gg/KqswhpVgdU\"><img alt=\"discord invitation link\" src=\"https://dcbadge.vercel.app/api/server/KqswhpVgdU?style=flat\"></a>\n<a href=\"https://twitter.com/TensorChord\"><img src=\"https://img.shields.io/twitter/follow/tensorchord?style=social\" alt=\"trackgit-views\" /></a>\n</p>\n\n## Installation\n\n```\npip install openmodelz\n```\n\n## CLI Reference\n\nPlease check out our [CLI Documentation](./docs/cli/mdz.md)\n"
  },
  {
    "path": "mdz/cmd/mdz/main.go",
    "content": "/*\nCopyright © 2023 NAME HERE <EMAIL ADDRESS>\n*/\npackage main\n\nimport \"github.com/tensorchord/openmodelz/mdz/pkg/cmd\"\n\nfunc main() {\n\tcmd.Execute()\n}\n"
  },
  {
    "path": "mdz/docs/cli/mdz.md",
    "content": "## mdz\n\nmdz manages your deployments\n\n### Synopsis\n\nmdz helps you deploy applications, manage servers, and troubleshoot issues.\n\n### Examples\n\n```\n  mdz server start\n  mdz deploy --image modelzai/llm-bloomz-560m:23.06.13 --name llm\n  mdz list\n  mdz logs llm\n  mdz port-forward llm 7860\n  mdz exec llm ps\n  mdz exec llm --tty bash\n  mdz delete llm\n\n```\n\n### Options\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -h, --help                help for mdz\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz delete](mdz_delete.md)\t - Delete OpenModelz inferences\n* [mdz deploy](mdz_deploy.md)\t - Deploy a new deployment\n* [mdz exec](mdz_exec.md)\t - Execute a command in a deployment\n* [mdz list](mdz_list.md)\t - List the deployments\n* [mdz logs](mdz_logs.md)\t - Print the logs for a deployment\n* [mdz port-forward](mdz_port-forward.md)\t - Forward one local port to a deployment\n* [mdz scale](mdz_scale.md)\t - Scale a deployment\n* [mdz server](mdz_server.md)\t - Manage the servers\n* [mdz version](mdz_version.md)\t - Print the client and agent version information\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_delete.md",
    "content": "## mdz delete\n\nDelete OpenModelz inferences\n\n### Synopsis\n\nDeletes OpenModelZ inferences\n\n```\nmdz delete [flags]\n```\n\n### Examples\n\n```\n  mdz delete blomdz-560m\n```\n\n### Options\n\n```\n  -h, --help   help for delete\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz](mdz.md)\t - mdz manages your deployments\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_deploy.md",
    "content": "## mdz deploy\n\nDeploy a new deployment\n\n### Synopsis\n\nDeploys a new deployment directly via flags.\n\n```\nmdz deploy [flags]\n```\n\n### Examples\n\n```\n  mdz deploy --image=modelzai/llm-blomdz-560m:23.06.13\n  mdz deploy --image=modelzai/llm-blomdz-560m:23.06.13 --name blomdz-560m --node-labels gpu=true,name=node-name\n```\n\n### Options\n\n```\n      --command string        Command to run\n      --gpu int               Number of GPUs\n  -h, --help                  help for deploy\n      --image string          Image to deploy\n      --max-replicas int32    Maximum number of replicas (default 1)\n      --min-replicas int32    Minimum number of replicas (can be 0) (default 1)\n      --name string           Name of inference\n  -l, --node-labels strings   Node labels\n      --port int32            Port to deploy on (default 8080)\n      --probe-path string     HTTP Health probe path\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz](mdz.md)\t - mdz manages your deployments\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_exec.md",
    "content": "## mdz exec\n\nExecute a command in a deployment\n\n### Synopsis\n\nExecute a command in a deployment. If no instance is specified, the first instance is used.\n\n```\nmdz exec [flags]\n```\n\n### Examples\n\n```\n  mdz exec bloomz-560m ps\n  mdz exec bloomz-560m --instance bloomz-560m-abcde-abcde ps\n  mdz exec bllomz-560m -ti bash\n  mdz exec bloomz-560m --instance bloomz-560m-abcde-abcde -ti bash\n```\n\n### Options\n\n```\n  -h, --help              help for exec\n  -s, --instance string   Instance name\n  -i, --interactive       Keep stdin open even if not attached\n  -t, --tty               Allocate a TTY for the container\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz](mdz.md)\t - mdz manages your deployments\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_list.md",
    "content": "## mdz list\n\nList the deployments\n\n### Synopsis\n\nList the deployments\n\n```\nmdz list [flags]\n```\n\n### Examples\n\n```\n  mdz list\n  mdz list -v\n  mdz list -q\n```\n\n### Options\n\n```\n  -h, --help      help for list\n  -q, --quiet     Quiet mode - print out only the inference names\n  -v, --verbose   Verbose mode - print out all inference details\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz](mdz.md)\t - mdz manages your deployments\n* [mdz list instance](mdz_list_instance.md)\t - List all instances for the given deployment\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_list_instance.md",
    "content": "## mdz list instance\n\nList all instances for the given deployment\n\n### Synopsis\n\nList all instances for the given deployment\n\n```\nmdz list instance [flags]\n```\n\n### Examples\n\n```\n  mdz list instance bloomz-560m\n  mdz list instance bloomz-560m -v\n  mdz list instance bloomz-560m -q\n```\n\n### Options\n\n```\n  -h, --help      help for instance\n  -q, --quiet     Quiet mode - print out only the instance names\n  -v, --verbose   Verbose mode - print out all instance details\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz list](mdz_list.md)\t - List the deployments\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_logs.md",
    "content": "## mdz logs\n\nPrint the logs for a deployment\n\n### Synopsis\n\nPrint the logs for a deployment\n\n```\nmdz logs [flags]\n```\n\n### Examples\n\n```\n  mdz logs blomdz-560m\n```\n\n### Options\n\n```\n  -e, --end string     Only return logs before this timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)\n  -f, --follow         Follow log output\n  -h, --help           help for logs\n  -s, --since string   Show logs since timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes) (default \"2006-01-02T15:04:05Z\")\n  -t, --tail int       Number of lines to show from the end of the logs\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz](mdz.md)\t - mdz manages your deployments\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_port-forward.md",
    "content": "## mdz port-forward\n\nForward one local port to a deployment\n\n### Synopsis\n\nForward one local port to a deployment\n\n```\nmdz port-forward [flags]\n```\n\n### Examples\n\n```\n  mdz port-forward blomdz-560m 7860\n```\n\n### Options\n\n```\n  -h, --help   help for port-forward\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz](mdz.md)\t - mdz manages your deployments\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_scale.md",
    "content": "## mdz scale\n\nScale a deployment\n\n### Synopsis\n\nScale a deployment\n\n```\nmdz scale [flags]\n```\n\n### Examples\n\n```\n  mdz scale bloomz-560m --replicas 3\n```\n\n### Options\n\n```\n  -h, --help             help for scale\n  -r, --replicas int32   Number of replicas to scale to\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz](mdz.md)\t - mdz manages your deployments\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_server.md",
    "content": "## mdz server\n\nManage the servers\n\n### Synopsis\n\nManage the servers\n\n### Examples\n\n```\n  mdz server start\n```\n\n### Options\n\n```\n  -h, --help      help for server\n  -v, --verbose   Verbose output\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz](mdz.md)\t - mdz manages your deployments\n* [mdz server delete](mdz_server_delete.md)\t - Delete a node from the cluster\n* [mdz server destroy](mdz_server_destroy.md)\t - Destroy the cluster\n* [mdz server join](mdz_server_join.md)\t - Join to the cluster\n* [mdz server label](mdz_server_label.md)\t - Update the labels on a server\n* [mdz server list](mdz_server_list.md)\t - List all servers in the cluster\n* [mdz server start](mdz_server_start.md)\t - Start the server\n* [mdz server stop](mdz_server_stop.md)\t - Stop the server\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_server_delete.md",
    "content": "## mdz server delete\n\nDelete a node from the cluster\n\n### Synopsis\n\nDelete a node from the cluster\n\n```\nmdz server delete [flags]\n```\n\n### Examples\n\n```\n  mdz server delete gpu-node-1\n```\n\n### Options\n\n```\n  -h, --help   help for delete\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n  -v, --verbose             Verbose output\n```\n\n### SEE ALSO\n\n* [mdz server](mdz_server.md)\t - Manage the servers\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_server_destroy.md",
    "content": "## mdz server destroy\n\nDestroy the cluster\n\n### Synopsis\n\nDestroy the cluster\n\n```\nmdz server destroy [flags]\n```\n\n### Examples\n\n```\n  mdz server destroy\n```\n\n### Options\n\n```\n  -h, --help   help for destroy\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n  -v, --verbose             Verbose output\n```\n\n### SEE ALSO\n\n* [mdz server](mdz_server.md)\t - Manage the servers\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_server_join.md",
    "content": "## mdz server join\n\nJoin to the cluster\n\n### Synopsis\n\nJoin to the cluster\n\n```\nmdz server join [flags]\n```\n\n### Examples\n\n```\n  mdz server join 192.168.31.192\n```\n\n### Options\n\n```\n  -h, --help                               help for join\n      --mirror-endpoints https://quay.io   Mirror URL endpoints of the registry like https://quay.io\n      --mirror-name string                 Mirror domain name of the registry (default \"docker.io\")\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n  -v, --verbose             Verbose output\n```\n\n### SEE ALSO\n\n* [mdz server](mdz_server.md)\t - Manage the servers\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_server_label.md",
    "content": "## mdz server label\n\nUpdate the labels on a server\n\n### Synopsis\n\nUpdate the labels on a server\n\n  *  A label key and value must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to 63 characters each.\n  *  Optionally, the key can begin with a DNS subdomain prefix and a single '/', like example.com/my-app.\n\t\n\n```\nmdz server label [flags]\n```\n\n### Examples\n\n```\n  mdz server label node-name key=value [key=value...]\n```\n\n### Options\n\n```\n  -h, --help   help for label\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n  -v, --verbose             Verbose output\n```\n\n### SEE ALSO\n\n* [mdz server](mdz_server.md)\t - Manage the servers\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_server_list.md",
    "content": "## mdz server list\n\nList all servers in the cluster\n\n### Synopsis\n\nList all servers in the cluster\n\n```\nmdz server list [flags]\n```\n\n### Examples\n\n```\n  mdz server list\n```\n\n### Options\n\n```\n  -h, --help      help for list\n  -q, --quiet     Quiet mode - print out only the server names\n  -v, --verbose   Verbose mode - print out all server details\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz server](mdz_server.md)\t - Manage the servers\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_server_start.md",
    "content": "## mdz server start\n\nStart the server\n\n### Synopsis\n\nStart the server with the public IP of the machine. If not provided, the internal IP will be used automatically.\n\n```\nmdz server start [flags]\n```\n\n### Examples\n\n```\n  mdz server start\n  mdz server start -v\n  mdz server start 1.2.3.4\n```\n\n### Options\n\n```\n  -g, --force-gpu                          Start the server with GPU support (ignore the GPU detection)\n  -h, --help                               help for start\n      --mirror-endpoints https://quay.io   Mirror URL endpoints of the registry like https://quay.io\n      --mirror-name string                 Mirror domain name of the registry (default \"docker.io\")\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n  -v, --verbose             Verbose output\n```\n\n### SEE ALSO\n\n* [mdz server](mdz_server.md)\t - Manage the servers\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_server_stop.md",
    "content": "## mdz server stop\n\nStop the server\n\n### Synopsis\n\nStop the server\n\n```\nmdz server stop [flags]\n```\n\n### Examples\n\n```\n  mdz server stop\n```\n\n### Options\n\n```\n  -h, --help   help for stop\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n  -v, --verbose             Verbose output\n```\n\n### SEE ALSO\n\n* [mdz server](mdz_server.md)\t - Manage the servers\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/cli/mdz_version.md",
    "content": "## mdz version\n\nPrint the client and agent version information\n\n### Synopsis\n\nPrint the client and server version information\n\n```\nmdz version [flags]\n```\n\n### Examples\n\n```\n  mdz version\n```\n\n### Options\n\n```\n  -h, --help   help for version\n```\n\n### Options inherited from parent commands\n\n```\n      --debug               Enable debug logging\n      --disable-telemetry   Disable anonymous telemetry\n  -u, --url string          URL to use for the server (MDZ_URL) (default http://localhost:80)\n```\n\n### SEE ALSO\n\n* [mdz](mdz.md)\t - mdz manages your deployments\n\n###### Auto generated by spf13/cobra on 11-Aug-2023\n"
  },
  {
    "path": "mdz/docs/macOS-quickstart.md",
    "content": "# `mdz` Quick Start Guide for macOS\n\n`mdz` only runs on Linux. To use it on macOS, you'll need a Linux VM. This guide will show you how to set up a Linux VM on macOS, install and use `mdz` on it.\n\n- [`mdz` Quick Start Guide for macOS](#mdz-quick-start-guide-for-macos)\n  - [VM Setup](#vm-setup)\n  - [Install `mdz`](#install-mdz)\n    - [Drop into the VM shell](#drop-into-the-vm-shell)\n    - [Install build dependencies](#install-build-dependencies)\n    - [Clone the repository and build](#clone-the-repository-and-build)\n  - [`mdz` Usage](#mdz-usage)\n\n## VM Setup\n\nI use [OrbStack](https://docs.orbstack.dev/machines/) to create and manage my VMs. \n\n![](images/orbstack-vm-create.png)\n\nOpen OrbStack and go to the `Linux Machines` tab, then click `Create` button to create a new VM if you don't already have one.\n\nThis guide uses Archlinux, but you're free to use any distribution you like.\n\n## Install `mdz`\n\n### Drop into the VM shell\n\n```\norb\n# now you're in the Linux VM's shell\n```\n\n### Install build dependencies\n\n```\n# or use your favorite package manager\nsudo pacman -Sy go git\n```\n\n### Clone the repository and build\n\n```\ngit clone https://github.com/tensorchord/OpenModelZ.git\ncd OpenModelZ/mdz && make\n# `make` will build the `mdz` binary under `bin/` for you\n```\n\n## `mdz` Usage\n\n```\n# replace with your `mdz` path, typically it's `./bin/mdz`\nmdz --help\n```\n\n\n\n"
  },
  {
    "path": "mdz/examples/bloomz-560m-openai/README.md",
    "content": "# Bloomz 560M OpenAI Compatible API\n\nThis is a simple API that allows you to use the Bloomz 560M as a OpenAI Gym environment.\n\n## Deploy\n\n```bash\n$ mdz deploy --image modelzai/llm-bloomz-560m:23.06.13 --name llm\n```\n\n### Get the deployment\n\n```bash\n$ mdz list\n NAME  ENDPOINT                                      STATUS  REPLICAS \n llm   http://localhost:31112/inference/llm.default  Ready   1/1      \n```\n\n### Test the deployment\n\n```python\nimport openai\nopenai.api_base=\"http://localhost:31112/inference/llm.default\"\nopenai.api_key=\"any\"\nopenai.debug = True\n\n# create a chat completion\nchat_completion = openai.ChatCompletion.create(model=\"\", messages=[\n    {\"role\": \"user\", \"content\": \"Who are you?\"},\n    {\"role\": \"assistant\", \"content\": \"I am a student\"},\n    {\"role\": \"user\", \"content\": \"What do you learn?\"},\n    {\"role\": \"assistant\", \"content\": \"I learn math\"},\n    {\"role\": \"user\", \"content\": \"Do you like english?\"}\n], max_tokens=100)\n```\n"
  },
  {
    "path": "mdz/hack/cli-doc-gen/main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\n\tcmd \"github.com/tensorchord/openmodelz/mdz/pkg/cmd\"\n)\n\nfunc main() {\n\tpath, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"Generating docs in\", filepath.Join(path, \"docs\", \"cli\"))\n\tif err := cmd.GenMarkdownTree(filepath.Join(path, \"docs\", \"cli\")); err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/runtime/create.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\tdockertypes \"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/go-connections/nat\"\n\t\"github.com/moby/moby/pkg/jsonmessage\"\n\t\"github.com/moby/term\"\n\t\"github.com/phayes/freeport\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nfunc (r *Runtime) InferenceCreate(ctx context.Context, req types.InferenceDeployment) error {\n\tcfg := &container.Config{\n\t\tImage:        req.Spec.Image,\n\t\tExposedPorts: nat.PortSet{},\n\t}\n\n\tvar port int32 = 8080\n\tif req.Spec.Port != nil {\n\t\tport = *req.Spec.Port\n\t}\n\n\tnow := time.Now()\n\treq.Status = types.InferenceDeploymentStatus{\n\t\tPhase:     types.PhaseNotReady,\n\t\tReplicas:  1,\n\t\tCreatedAt: &now,\n\t}\n\t// Lock the mutex and set cache\n\tr.mutex.Lock()\n\tr.cache[req.Spec.Name] = req\n\tr.mutex.Unlock()\n\n\tgo func() error {\n\t\tbody, err := r.cli.ImagePull(context.TODO(), req.Spec.Image, dockertypes.ImagePullOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer body.Close()\n\n\t\ttermFd, isTerm := term.GetFdInfo(os.Stdout)\n\t\terr = jsonmessage.DisplayJSONMessagesStream(body, os.Stdout, termFd, isTerm, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thostCfg := &container.HostConfig{\n\t\t\tRestartPolicy: container.RestartPolicy{\n\t\t\t\tName: \"always\",\n\t\t\t},\n\t\t\tPortBindings: nat.PortMap{},\n\t\t}\n\n\t\tnatPort := nat.Port(fmt.Sprintf(\"%d/tcp\", port))\n\n\t\thostPort, err := freeport.GetFreePort()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thostCfg.PortBindings[natPort] = []nat.PortBinding{\n\t\t\t{\n\t\t\t\tHostIP:   Localhost,\n\t\t\t\tHostPort: fmt.Sprintf(\"%d\", hostPort),\n\t\t\t},\n\t\t}\n\t\tcfg.ExposedPorts[natPort] = struct{}{}\n\n\t\tcfg.Labels = expectedLabels(req)\n\n\t\tctr, err := r.cli.ContainerCreate(context.TODO(), cfg, hostCfg, nil, nil, req.Spec.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := r.cli.ContainerStart(context.TODO(), ctr.ID, dockertypes.ContainerStartOptions{}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tr.mutex.Lock()\n\t\tnew := r.cache[req.Spec.Name]\n\t\tnew.Status = types.InferenceDeploymentStatus{\n\t\t\tPhase:             types.PhaseReady,\n\t\t\tReplicas:          1,\n\t\t\tAvailableReplicas: 1,\n\t\t\tCreatedAt:         &now,\n\t\t}\n\t\tr.mutex.Unlock()\n\t\treturn nil\n\t}()\n\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/runtime/delete.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\n\tdockertypes \"github.com/docker/docker/api/types\"\n\t\"github.com/tensorchord/openmodelz/agent/client\"\n)\n\nfunc (r *Runtime) InferenceDelete(ctx context.Context, name string) error {\n\tdefer func() {\n\t\tr.mutex.Lock()\n\t\tdelete(r.cache, name)\n\t\tr.mutex.Unlock()\n\t}()\n\n\tctr, err := r.cli.ContainerInspect(ctx, name)\n\tif err != nil {\n\t\tif !client.IsErrNotFound(err) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif ctr.Config.Labels[labelVendor] != valueVendor {\n\t\treturn nil\n\t}\n\n\tif err := r.cli.ContainerRemove(ctx, name, dockertypes.ContainerRemoveOptions{\n\t\tForce: true,\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/runtime/label.go",
    "content": "package runtime\n\nimport \"github.com/tensorchord/openmodelz/agent/api/types\"\n\nconst (\n\tlabelVendor = \"ai.modelz.open.vendor\"\n\tvalueVendor = \"openmodelz\"\n\n\tlabelName = \"ai.modelz.open.name\"\n)\n\nfunc expectedLabels(inf types.InferenceDeployment) map[string]string {\n\treturn map[string]string{\n\t\tlabelVendor: valueVendor,\n\t\tlabelName:   inf.Spec.Name,\n\t}\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/runtime/list.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\n\tdockertypes \"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/api/types/filters\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nfunc (r *Runtime) InferenceList(ns string) ([]types.InferenceDeployment, error) {\n\tr.mutex.Lock()\n\tres := r.cache\n\tr.mutex.Unlock()\n\n\tctrs, err := r.cli.ContainerList(context.TODO(), dockertypes.ContainerListOptions{\n\t\tFilters: filters.NewArgs(filters.Arg(\"label\", labelVendor+\"=\"+valueVendor)),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ctr := range ctrs {\n\t\tinf := types.InferenceDeployment{\n\t\t\tSpec: types.InferenceDeploymentSpec{\n\t\t\t\tName:      ctr.Labels[labelName],\n\t\t\t\tImage:     ctr.Image,\n\t\t\t\tNamespace: \"default\",\n\t\t\t},\n\t\t\tStatus: types.InferenceDeploymentStatus{},\n\t\t}\n\t\tif ctr.State == \"running\" {\n\t\t\tinf.Status.Phase = types.PhaseReady\n\t\t\tinf.Status.AvailableReplicas = 1\n\t\t\tinf.Status.Replicas = 1\n\t\t} else {\n\t\t\tinf.Status.Phase = types.PhaseNotReady\n\t\t\tinf.Status.Replicas = 1\n\t\t}\n\n\t\tres[inf.Spec.Name] = inf\n\t}\n\n\tl := []types.InferenceDeployment{}\n\tfor _, inf := range res {\n\t\tl = append(l, inf)\n\t}\n\treturn l, nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/runtime/proxy.go",
    "content": "package runtime\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\nfunc (r *Runtime) InfereceProxy(c *gin.Context, name string) error {\n\tctr, err := r.cli.ContainerInspect(c.Request.Context(), name)\n\tif err != nil {\n\t\treturn errdefs.System(err)\n\t}\n\n\tif ctr.Config.Labels[labelVendor] != valueVendor {\n\t\treturn errdefs.NotFound(errors.New(\"container not found\"))\n\t}\n\n\tport := \"\"\n\tfor _, c := range ctr.HostConfig.PortBindings {\n\t\tif len(c) > 0 {\n\t\t\tport = c[0].HostPort\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif port == \"\" {\n\t\treturn errdefs.NotFound(errors.New(\"port not found\"))\n\t}\n\n\turl, err := url.Parse(\"http://\" + Localhost + \":\" + port)\n\tif err != nil {\n\t\treturn errdefs.System(err)\n\t}\n\tproxyServer := httputil.NewSingleHostReverseProxy(url)\n\n\tproxyServer.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout:   time.Minute * 5,\n\t\t\tKeepAlive: time.Minute * 5,\n\t\t\tDualStack: true,\n\t\t}).DialContext,\n\t}\n\tproxyServer.Director = func(req *http.Request) {\n\t\ttargetQuery := url.RawQuery\n\t\treq.URL.Scheme = url.Scheme\n\t\treq.URL.Host = url.Host\n\t\t// req.URL.Path, req.URL.RawPath = joinURLPath(target, req.URL)\n\t\tif targetQuery == \"\" || req.URL.RawQuery == \"\" {\n\t\t\treq.URL.RawQuery = targetQuery + req.URL.RawQuery\n\t\t} else {\n\t\t\treq.URL.RawQuery = targetQuery + \"&\" + req.URL.RawQuery\n\t\t}\n\t\treq.URL.Path = c.Param(\"proxyPath\")\n\t\tif req.URL.Path == \"\" {\n\t\t\treq.URL.Path = \"/\"\n\t\t}\n\t}\n\n\tproxyServer.ServeHTTP(c.Writer, c.Request)\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/runtime/runtime.go",
    "content": "package runtime\n\nimport (\n\t\"context\"\n\t\"sync\"\n\n\t\"github.com/docker/docker/client\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\ntype Runtime struct {\n\tcli   client.APIClient\n\tcache map[string]types.InferenceDeployment\n\tmutex sync.Mutex\n}\n\nconst (\n\tLocalhost = \"127.0.0.1\"\n)\n\nfunc New() (*Runtime, error) {\n\tcli, err := client.NewClientWithOpts(client.FromEnv)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcli.NegotiateAPIVersion(context.Background())\n\n\treturn &Runtime{\n\t\tcli:   cli,\n\t\tcache: map[string]types.InferenceDeployment{},\n\t}, nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/error.go",
    "content": "package server\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"github.com/tensorchord/openmodelz/agent/errdefs\"\n)\n\n// Error defines a standard application error.\ntype Error struct {\n\t// Machine-readable error code.\n\tHTTPStatusCode int `json:\"http_status_code,omitempty\"`\n\n\t// Human-readable message.\n\tMessage string `json:\"message,omitempty\"`\n\tRequest string `json:\"request,omitempty\"`\n\n\t// Logical operation and nested error.\n\tOp  string `json:\"op,omitempty\"`\n\tErr error  `json:\"error,omitempty\"`\n}\n\n// Error returns the string representation of the error message.\nfunc (e *Error) Error() string {\n\tvar buf bytes.Buffer\n\n\t// Print the current operation in our stack, if any.\n\tif e.Op != \"\" {\n\t\tfmt.Fprintf(&buf, \"%s: \", e.Op)\n\t}\n\n\t// If wrapping an error, print its Error() message.\n\t// Otherwise print the error code & message.\n\tif e.Err != nil {\n\t\tbuf.WriteString(e.Err.Error())\n\t} else {\n\t\tif e.HTTPStatusCode != 0 {\n\t\t\tfmt.Fprintf(&buf, \"<%s> \", http.StatusText(e.HTTPStatusCode))\n\t\t}\n\t\tbuf.WriteString(e.Message)\n\t}\n\treturn buf.String()\n}\n\nfunc NewError(code int, err error, op string) error {\n\treturn &Error{\n\t\tHTTPStatusCode: code,\n\t\tErr:            err,\n\t\tMessage:        err.Error(),\n\t\tOp:             op,\n\t}\n}\n\nfunc errFromErrDefs(err error, op string) error {\n\tif errdefs.IsCancelled(err) {\n\t\treturn NewError(http.StatusRequestTimeout, err, op)\n\t} else if errdefs.IsConflict(err) {\n\t\treturn NewError(http.StatusConflict, err, op)\n\t} else if errdefs.IsDataLoss(err) {\n\t\treturn NewError(http.StatusInternalServerError, err, op)\n\t} else if errdefs.IsDeadline(err) {\n\t\treturn NewError(http.StatusRequestTimeout, err, op)\n\t} else if errdefs.IsForbidden(err) {\n\t\treturn NewError(http.StatusForbidden, err, op)\n\t} else if errdefs.IsInvalidParameter(err) {\n\t\treturn NewError(http.StatusBadRequest, err, op)\n\t} else if errdefs.IsNotFound(err) {\n\t\treturn NewError(http.StatusNotFound, err, op)\n\t} else if errdefs.IsNotImplemented(err) {\n\t\treturn NewError(http.StatusNotImplemented, err, op)\n\t} else if errdefs.IsNotModified(err) {\n\t\treturn NewError(http.StatusNotModified, err, op)\n\t} else if errdefs.IsSystem(err) {\n\t\treturn NewError(http.StatusInternalServerError, err, op)\n\t} else if errdefs.IsUnauthorized(err) {\n\t\treturn NewError(http.StatusUnauthorized, err, op)\n\t} else if errdefs.IsUnavailable(err) {\n\t\treturn NewError(http.StatusServiceUnavailable, err, op)\n\t} else if errdefs.IsUnknown(err) {\n\t\treturn NewError(http.StatusInternalServerError, err, op)\n\t}\n\treturn NewError(http.StatusInternalServerError, err, op)\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/handler_healthz.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n)\n\n// @Summary     Healthz\n// @Description Healthz\n// @Tags        system\n// @Accept      json\n// @Produce     json\n// @Success     200\n// @Router      /healthz [get]\nfunc (s *Server) handleHealthz(c *gin.Context) error {\n\tc.Status(http.StatusOK)\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/handler_inference_create.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Create the inferences.\n// @Description Create the inferences.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       request body     types.InferenceDeployment true \"query params\"\n// @Success     201     {object} types.InferenceDeployment\n// @Router      /system/inferences [post]\nfunc (s *Server) handleInferenceCreate(c *gin.Context) error {\n\tevent := types.DeploymentCreateEvent\n\n\tvar req types.InferenceDeployment\n\tif err := c.ShouldBindJSON(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, event)\n\t}\n\n\t// Set the default values.\n\ts.validator.DefaultDeployRequest(&req)\n\n\t// Validate the request.\n\tif err := s.validator.ValidateDeployRequest(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, event)\n\t}\n\n\t// Create the inference.\n\tif err := s.runtime.InferenceCreate(c.Request.Context(), req); err != nil {\n\t\treturn errFromErrDefs(err, event)\n\t}\n\tc.JSON(http.StatusCreated, req)\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/handler_inference_delete.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Delete the inferences.\n// @Description Delete the inferences.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       request   body     types.DeleteFunctionRequest true \"query params\"\n// @Param       namespace query    string                      true \"Namespace\"  example(\"modelz-d3524a71-c17c-4c92-8faf-8603f02f4713\")\n// @Success     202       {object} types.DeleteFunctionRequest\n// @Router      /system/inferences [delete]\nfunc (s *Server) handleInferenceDelete(c *gin.Context) error {\n\tevent := types.DeploymentDeleteEvent\n\tvar req types.DeleteFunctionRequest\n\tif err := c.ShouldBindJSON(&req); err != nil {\n\t\treturn NewError(http.StatusBadRequest, err, event)\n\t}\n\n\tif req.FunctionName == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest,\n\t\t\terrors.New(\"function name is required\"), event)\n\t}\n\n\tif err := s.runtime.InferenceDelete(c.Request.Context(),\n\t\treq.FunctionName); err != nil {\n\t\treturn errFromErrDefs(err, event)\n\t}\n\n\tc.JSON(http.StatusAccepted, req)\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/handler_inference_get.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t_ \"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     Get the inference by name.\n// @Description Get the inference by name.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string true \"Namespace\"  example(\"modelz-d3524a71-c17c-4c92-8faf-8603f02f4713\")\n// @Param       name      path     string true \"inference id\" example(\"e50886f3-caa6-449f-9fa8-7849c6ba2e08\")\n// @Success     200       {object} types.InferenceDeployment\n// @Router      /system/inference/{name} [get]\nfunc (s *Server) handleInferenceGet(c *gin.Context) error {\n\tnamespace := c.Query(\"namespace\")\n\tif namespace == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"namespace is required\"), \"inference-get\")\n\t}\n\tname := c.Param(\"name\")\n\tif name == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"name is required\"), \"inference-get\")\n\t}\n\n\t// function, err := s.runtime.InferenceGet(namespace, name)\n\t// if err != nil {\n\t// \treturn errFromErrDefs(err, \"inference-get\")\n\t// }\n\n\t// c.JSON(http.StatusOK, function)\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/handler_inference_list.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t_ \"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\n// @Summary     List the inferences.\n// @Description List the inferences.\n// @Tags        inference\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string true \"Namespace\"  example(\"modelz-d3524a71-c17c-4c92-8faf-8603f02f4713\")\n// @Success     200       {object} []types.InferenceDeployment\n// @Router      /system/inferences [get]\nfunc (s *Server) handleInferenceList(c *gin.Context) error {\n\tnamespace := c.Query(\"namespace\")\n\tif namespace == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"namespace is required\"), \"inference-list\")\n\t}\n\n\tinferenes, err := s.runtime.InferenceList(namespace)\n\tif err != nil {\n\t\treturn errFromErrDefs(err, \"inference-list\")\n\t}\n\n\tc.JSON(http.StatusOK, inferenes)\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/handler_inference_logs.go",
    "content": "package server\n\nimport (\n\t\"github.com/gin-gonic/gin\"\n)\n\n// @Summary     Get the inference logs.\n// @Description Get the inference logs.\n// @Tags        log\n// @Accept      json\n// @Produce     json\n// @Param       namespace query    string true  \"Namespace\" example(\"modelz-d3524a71-c17c-4c92-8faf-8603f02f4713\")\n// @Param       name      query    string true  \"Name\"\n// @Param       instance  query    string false \"Instance\"\n// @Param       tail      query    int    false \"Tail\"\n// @Param       follow    query    bool   false \"Follow\"\n// @Param       since     query    string false \"Since\" example(\"2023-04-01T00:06:31+08:00\")\n// @Param       end       query    string false \"End\"   example(\"2023-05-31T00:06:31+08:00\")\n// @Success     200       {object} []types.Message\n// @Router      /system/logs/inference [get]\nfunc (s *Server) handleInferenceLogs(c *gin.Context) error {\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/handler_inference_proxy.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\n\t\"github.com/gin-gonic/gin\"\n)\n\n// @Summary     Inference.\n// @Description Inference proxy.\n// @Tags        inference-proxy\n// @Accept      json\n// @Produce     json\n// @Router      /inference/{name} [post]\n// @Router      /inference/{name} [get]\n// @Router      /inference/{name} [put]\n// @Router      /inference/{name} [delete]\nfunc (s *Server) handleInferenceProxy(c *gin.Context) error {\n\tnamespacedName := c.Param(\"name\")\n\tif namespacedName == \"\" {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, errors.New(\"name is required\"), \"inference-proxy\")\n\t}\n\n\t_, name, err := getNamespaceAndName(namespacedName)\n\tif err != nil {\n\t\treturn NewError(\n\t\t\thttp.StatusBadRequest, err, \"inference-proxy\")\n\t}\n\n\treturn s.runtime.InfereceProxy(c, name)\n}\n\nfunc getNamespaceAndName(name string) (string, string, error) {\n\tif !strings.Contains(name, \".\") {\n\t\treturn \"\", \"\", fmt.Errorf(\"name is not namespaced\")\n\t}\n\tnamespace := name[strings.LastIndexAny(name, \".\")+1:]\n\tinfName := strings.TrimSuffix(name, \".\"+namespace)\n\n\tif namespace == \"\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"namespace is empty\")\n\t}\n\n\tif infName == \"\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"inference name is empty\")\n\t}\n\treturn namespace, infName, nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/handler_info.go",
    "content": "package server\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/version\"\n)\n\n// @Summary     Get system info.\n// @Description Get system info.\n// @Tags        system\n// @Accept      json\n// @Produce     json\n// @Success     200 {object} types.ProviderInfo\n// @Router      /system/info [get]\nfunc (s *Server) handleInfo(c *gin.Context) error {\n\tv := version.GetVersion()\n\tc.JSON(http.StatusOK, types.ProviderInfo{\n\t\tName:          \"local agent\",\n\t\tOrchestration: \"docker\",\n\t\tVersion: &types.VersionInfo{\n\t\t\tVersion:      v.Version,\n\t\t\tBuildDate:    v.BuildDate,\n\t\t\tGitCommit:    v.GitCommit,\n\t\t\tGitTag:       v.GitTag,\n\t\t\tGitTreeState: v.GitTreeState,\n\t\t\tGoVersion:    v.GoVersion,\n\t\t\tCompiler:     v.Compiler,\n\t\t\tPlatform:     v.Platform,\n\t\t},\n\t})\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/middleware_callid.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/google/uuid\"\n)\n\nfunc (s Server) middlewareCallID(c *gin.Context) error {\n\tstart := time.Now()\n\tif len(c.Request.Header.Get(\"X-Call-Id\")) == 0 {\n\t\tcallID := uuid.New().String()\n\t\tc.Request.Header.Add(\"X-Call-Id\", callID)\n\t\tc.Writer.Header().Add(\"X-Call-Id\", callID)\n\t}\n\n\tc.Request.Header.Add(\"X-Start-Time\", fmt.Sprintf(\"%d\", start.UTC().UnixNano()))\n\tc.Writer.Header().Add(\"X-Start-Time\", fmt.Sprintf(\"%d\", start.UTC().UnixNano()))\n\n\tc.Next()\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/server_factory.go",
    "content": "package server\n\nimport (\n\t\"github.com/gin-contrib/cors\"\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/tensorchord/openmodelz/agent/pkg/server/validator\"\n\tginlogrus \"github.com/toorop/gin-logrus\"\n\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/agentd/runtime\"\n)\n\ntype Server struct {\n\trouter        *gin.Engine\n\tmetricsRouter *gin.Engine\n\tlogger        *logrus.Entry\n\tvalidator     *validator.Validator\n\truntime       *runtime.Runtime\n}\n\nfunc New() (*Server, error) {\n\trouter := gin.New()\n\trouter.Use(ginlogrus.Logger(logrus.StandardLogger(), \"/healthz\"))\n\trouter.Use(gin.Recovery())\n\n\t// metrics server\n\tmetricsRouter := gin.New()\n\tmetricsRouter.Use(gin.Recovery())\n\n\tif gin.Mode() == gin.DebugMode {\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t\tlogrus.Debug(\"Allow CORS\")\n\t\trouter.Use(cors.New(cors.Config{\n\t\t\tAllowOrigins: []string{\"*\"},\n\t\t\tAllowMethods: []string{\"GET\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\"},\n\t\t\tAllowHeaders: []string{\"*\"},\n\t\t}))\n\t}\n\n\tlogger := logrus.WithField(\"component\", \"agentd\")\n\n\tr, err := runtime.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts := &Server{\n\t\trouter:        router,\n\t\tmetricsRouter: metricsRouter,\n\t\tlogger:        logger,\n\t\tvalidator:     validator.New(),\n\t\truntime:       r,\n\t}\n\n\ts.registerRoutes()\n\treturn s, nil\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/server_handlerfunc.go",
    "content": "package server\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\n\t\"github.com/gin-gonic/gin\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype HandlerFunc func(c *gin.Context) error\n\nfunc WrapHandler(handler HandlerFunc) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\terr := handler(c)\n\t\tif err != nil {\n\t\t\tvar serverErr *Error\n\t\t\tif !errors.As(err, &serverErr) {\n\t\t\t\tserverErr = &Error{\n\t\t\t\t\tHTTPStatusCode: http.StatusInternalServerError,\n\t\t\t\t\tErr:            err,\n\t\t\t\t\tMessage:        err.Error(),\n\t\t\t\t}\n\t\t\t}\n\t\t\tserverErr.Request = c.Request.Method + \" \" + c.Request.URL.String()\n\n\t\t\tif gin.Mode() == \"debug\" {\n\t\t\t\tlogrus.Debugf(\"error: %+v\", err)\n\t\t\t} else {\n\t\t\t\t// Remove detailed info when in the release mode\n\t\t\t\tserverErr.Op = \"\"\n\t\t\t\tserverErr.Err = nil\n\t\t\t}\n\n\t\t\tc.JSON(serverErr.HTTPStatusCode, serverErr)\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/server_init_route.go",
    "content": "package server\n\nimport (\n\tswaggerfiles \"github.com/swaggo/files\"\n\tginSwagger \"github.com/swaggo/gin-swagger\"\n)\n\nconst (\n\tendpointInferencePlural = \"/inferences\"\n\tendpointInference       = \"/inference\"\n\tendpointScaleInference  = \"/scale-inference\"\n\tendpointInfo            = \"/info\"\n\tendpointLogPlural       = \"/logs\"\n\tendpointNamespacePlural = \"/namespaces\"\n\tendpointHealthz         = \"/healthz\"\n\tendpointBuild           = \"/build\"\n)\n\nfunc (s *Server) registerRoutes() {\n\troot := s.router.Group(\"/\")\n\n\t// swagger\n\troot.GET(\"/swagger/*any\", ginSwagger.WrapHandler(swaggerfiles.Handler))\n\t// dataplane\n\troot.Any(\"/inference/:name\",\n\t\tWrapHandler(s.middlewareCallID),\n\t\tWrapHandler(s.handleInferenceProxy))\n\troot.Any(\"/inference/:name/*proxyPath\",\n\t\tWrapHandler(s.middlewareCallID),\n\t\tWrapHandler(s.handleInferenceProxy))\n\n\t// healthz\n\troot.GET(endpointHealthz, WrapHandler(s.handleHealthz))\n\n\t// control plane\n\tcontrolPlane := root.Group(\"/system\")\n\t// inferences\n\tcontrolPlane.GET(endpointInferencePlural,\n\t\tWrapHandler(s.handleInferenceList))\n\tcontrolPlane.POST(endpointInferencePlural,\n\t\tWrapHandler(s.handleInferenceCreate))\n\t// controlPlane.PUT(endpointInferencePlural,\n\t// \tWrapHandler(s.handleInferenceUpdate))\n\tcontrolPlane.DELETE(endpointInferencePlural,\n\t\tWrapHandler(s.handleInferenceDelete))\n\t// controlPlane.POST(endpointScaleInference,\n\t// \tWrapHandler(s.handleInferenceScale))\n\tcontrolPlane.GET(endpointInference+\"/:name\",\n\t\tWrapHandler(s.handleInferenceGet))\n\n\t// instances\n\t// controlPlane.GET(endpointInference+\"/:name/instances\",\n\t// \tWrapHandler(s.handleInferenceInstance))\n\n\t// info\n\tcontrolPlane.GET(endpointInfo, WrapHandler(s.handleInfo))\n\n\t// logs\n\tcontrolPlane.GET(endpointLogPlural+endpointInference,\n\t\tWrapHandler(s.handleInferenceLogs))\n}\n"
  },
  {
    "path": "mdz/pkg/agentd/server/server_run.go",
    "content": "package server\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc (s *Server) Run(port int) error {\n\tsrv := &http.Server{\n\t\tAddr:         fmt.Sprintf(\":%d\", port),\n\t\tHandler:      s.router,\n\t\tWriteTimeout: time.Hour * 24,\n\t\tReadTimeout:  time.Hour * 24,\n\t}\n\n\tgo func() {\n\t\tif err := srv.ListenAndServe(); err != nil &&\n\t\t\t!errors.Is(err, http.ErrServerClosed) {\n\t\t\tlogrus.Errorf(\"listen on port %d error: %v\", port, err)\n\t\t}\n\t}()\n\n\tlogrus.WithField(\"port\", port).\n\t\tInfo(\"server is running...\")\n\n\tquit := make(chan os.Signal, 1)\n\tsignal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)\n\t<-quit\n\tlogrus.Info(\"shutdown server\")\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\treturn srv.Shutdown(ctx)\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/delete.go",
    "content": "package cmd\n\nimport (\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/spf13/cobra\"\n)\n\n// deleteCmd represents the delete command\nvar deleteCmd = &cobra.Command{\n\tUse:     \"delete\",\n\tShort:   \"Delete OpenModelz inferences\",\n\tLong:    `Deletes OpenModelZ inferences`,\n\tExample: `  mdz delete blomdz-560m`,\n\tGroupID: \"basic\",\n\tPreRunE: commandInit,\n\tArgs:    cobra.ExactArgs(1),\n\tRunE:    commandDelete,\n}\n\nfunc init() {\n\trootCmd.AddCommand(deleteCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n}\n\nfunc commandDelete(cmd *cobra.Command, args []string) error {\n\tname := args[0]\n\n\tif err := agentClient.InferenceRemove(\n\t\tcmd.Context(), namespace, name); err != nil {\n\t\tcmd.PrintErrf(\"Failed to remove the inference: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\n\tcmd.Printf(\"Inference %s is deleted\\n\", name)\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/deploy.go",
    "content": "package cmd\n\nimport (\n\t\"math/rand\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/cockroachdb/errors\"\n\tpetname \"github.com/dustinkirkland/golang-petname\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/telemetry\"\n)\n\nvar (\n\t// Used for flags.\n\tdeployImage       string\n\tdeployPort        int32\n\tdeployMinReplicas int32\n\tdeployMaxReplicas int32\n\tdeployName        string\n\tdeployGPU         int\n\tdeployNodeLabel   []string\n\tdeployCommand     string\n\tdeployProbePath   string\n)\n\n// deployCmd represents the deploy command\nvar deployCmd = &cobra.Command{\n\tUse:   \"deploy\",\n\tShort: \"Deploy a new deployment\",\n\tLong:  `Deploys a new deployment directly via flags.`,\n\tExample: `  mdz deploy --image=modelzai/llm-blomdz-560m:23.06.13\n  mdz deploy --image=modelzai/llm-blomdz-560m:23.06.13 --name blomdz-560m --node-labels gpu=true,name=node-name`,\n\tGroupID: \"basic\",\n\tPreRunE: commandInit,\n\tRunE:    commandDeploy,\n}\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n\trootCmd.AddCommand(deployCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n\t// deployCmd.Flags().BoolP(\"toggle\", \"t\", false, \"Help message for toggle\")\n\tdeployCmd.Flags().StringVar(&deployImage, \"image\", \"\", \"Image to deploy\")\n\tdeployCmd.Flags().Int32Var(&deployPort, \"port\", 8080, \"Port to deploy on\")\n\tdeployCmd.Flags().Int32Var(&deployMinReplicas, \"min-replicas\", 1, \"Minimum number of replicas (can be 0)\")\n\tdeployCmd.Flags().Int32Var(&deployMaxReplicas, \"max-replicas\", 1, \"Maximum number of replicas\")\n\tdeployCmd.Flags().IntVar(&deployGPU, \"gpu\", 0, \"Number of GPUs\")\n\tdeployCmd.Flags().StringVar(&deployName, \"name\", \"\", \"Name of inference\")\n\tdeployCmd.Flags().StringSliceVarP(&deployNodeLabel, \"node-labels\", \"l\", []string{}, \"Node labels\")\n\tdeployCmd.Flags().StringVar(&deployCommand, \"command\", \"\", \"Command to run\")\n\tdeployCmd.Flags().StringVar(&deployProbePath, \"probe-path\", \"\", \"HTTP Health probe path\")\n}\n\nfunc commandDeploy(cmd *cobra.Command, args []string) error {\n\tif deployImage == \"\" {\n\t\treturn cmd.Help()\n\t}\n\n\tname := deployName\n\tif name == \"\" {\n\t\tname = petname.Generate(2, \"-\")\n\t}\n\n\tvar typ types.ScalingType = types.ScalingTypeCapacity\n\tinf := types.InferenceDeployment{\n\t\tSpec: types.InferenceDeploymentSpec{\n\t\t\tImage:     deployImage,\n\t\t\tNamespace: namespace,\n\t\t\tName:      name,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"ai.tensorchord.name\": name,\n\t\t\t},\n\t\t\tFramework: types.FrameworkOther,\n\t\t\tScaling: &types.ScalingConfig{\n\t\t\t\tMinReplicas:     int32Ptr(deployMinReplicas),\n\t\t\t\tMaxReplicas:     int32Ptr(deployMaxReplicas),\n\t\t\t\tTargetLoad:      int32Ptr(10),\n\t\t\t\tType:            &typ,\n\t\t\t\tStartupDuration: int32Ptr(600),\n\t\t\t\tZeroDuration:    int32Ptr(600),\n\t\t\t},\n\t\t\tPort: int32Ptr(deployPort),\n\t\t},\n\t}\n\n\tif deployCommand != \"\" {\n\t\tinf.Spec.Command = &deployCommand\n\t}\n\tif deployProbePath != \"\" {\n\t\tinf.Spec.HTTPProbePath = &deployProbePath\n\t}\n\n\tif len(deployNodeLabel) > 0 {\n\t\tinf.Spec.Constraints = []string{}\n\t\tfor _, label := range deployNodeLabel {\n\t\t\tinf.Spec.Constraints = append(inf.Spec.Constraints, \"tensorchord.ai/\"+label)\n\t\t}\n\t}\n\n\tif deployGPU > 0 {\n\t\tGPUNum := types.Quantity(strconv.Itoa(deployGPU))\n\t\tinf.Spec.Resources = &types.ResourceRequirements{\n\t\t\t// no need to set Requests for GPU\n\t\t\tLimits: types.ResourceList{\n\t\t\t\ttypes.ResourceGPU: GPUNum,\n\t\t\t},\n\t\t}\n\t}\n\n\ttelemetry.GetTelemetry().Record(\n\t\t\"deploy\",\n\t\ttelemetry.AddField(\"GPU\", deployGPU),\n\t\ttelemetry.AddField(\"FromZero\", deployMinReplicas == 0),\n\t)\n\n\tif _, err := agentClient.InferenceCreate(\n\t\tcmd.Context(), namespace, inf); err != nil {\n\t\tcmd.PrintErrf(\"Failed to create the inference: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\n\tcmd.Printf(\"Inference %s is created\\n\", inf.Spec.Name)\n\treturn nil\n}\n\nfunc int32Ptr(i int32) *int32 { return &i }\n"
  },
  {
    "path": "mdz/pkg/cmd/exec.go",
    "content": "package cmd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/tensorchord/openmodelz/agent/client\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/cmd/streams\"\n\tterminal \"golang.org/x/term\"\n\t\"k8s.io/apimachinery/pkg/util/rand\"\n)\n\nvar (\n\texecInstance    string\n\texecTTY         bool\n\texecInteractive bool\n)\n\n// execCommand represents the exec command\nvar execCommand = &cobra.Command{\n\tUse:   \"exec\",\n\tShort: \"Execute a command in a deployment\",\n\tLong:  `Execute a command in a deployment. If no instance is specified, the first instance is used.`,\n\tExample: `  mdz exec bloomz-560m ps\n  mdz exec bloomz-560m --instance bloomz-560m-abcde-abcde ps\n  mdz exec bllomz-560m -ti bash\n  mdz exec bloomz-560m --instance bloomz-560m-abcde-abcde -ti bash`,\n\tGroupID: \"debug\",\n\tPreRunE: commandInit,\n\tArgs:    cobra.MinimumNArgs(1),\n\tRunE:    commandExec,\n}\n\nfunc init() {\n\trootCmd.AddCommand(execCommand)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n\texecCommand.Flags().StringVarP(&execInstance, \"instance\", \"s\", \"\", \"Instance name\")\n\texecCommand.Flags().BoolVarP(&execTTY, \"tty\", \"t\", false, \"Allocate a TTY for the container\")\n\texecCommand.Flags().BoolVarP(&execInteractive, \"interactive\", \"i\", false, \"Keep stdin open even if not attached\")\n}\n\nfunc commandExec(cmd *cobra.Command, args []string) error {\n\tname := args[0]\n\n\tif execInstance == \"\" {\n\t\tinstances, err := agentClient.InstanceList(cmd.Context(), namespace, name)\n\t\tif err != nil {\n\t\t\tcmd.PrintErrf(\"Failed to list instances: %s\\n\", errors.Cause(err))\n\t\t\treturn err\n\t\t}\n\t\tif len(instances) == 0 {\n\t\t\tcmd.PrintErrf(\"instance %s not found\\n\", name)\n\t\t\treturn errors.Newf(\"instance %s not found\", name)\n\t\t} else if len(instances) > 1 {\n\t\t\tcmd.PrintErrf(\"inference %s has multiple instances, please specify with -i\\n\", name)\n\t\t\treturn errors.Newf(\"inference %s has multiple instances, please specify with -i\", name)\n\t\t}\n\t\texecInstance = instances[0].Spec.Name\n\t}\n\n\tif execTTY {\n\t\tshell := \"sh\"\n\t\tif len(args) > 1 {\n\t\t\tshell = args[1]\n\t\t} else if len(args) > 2 {\n\t\t\tcmd.PrintErrf(\"too many arguments in tty mode, please use a shell program e.g. bash\\n\")\n\t\t\treturn fmt.Errorf(\"too many arguments\")\n\t\t}\n\n\t\tif !isAvailableShell(shell) {\n\t\t\tcmd.PrintErrf(\"shell %s is not available, try `sh` or `bash`\\n\", shell)\n\t\t\treturn fmt.Errorf(\"shell %s is not available, try `sh` or `bash`\", shell)\n\t\t}\n\n\t\tresp, err := agentClient.InstanceExecTTY(cmd.Context(), namespace, name, execInstance, []string{shell})\n\t\tif err != nil {\n\t\t\tcmd.PrintErrf(\"Failed to execute the shell: %s\\n\", errors.Cause(err))\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Conn.Close()\n\t\tc := resp.Conn\n\n\t\tif !terminal.IsTerminal(0) || !terminal.IsTerminal(1) {\n\t\t\tcmd.PrintErrf(\"stdin/stdout should be terminal\\n\")\n\t\t\treturn fmt.Errorf(\"stdin/stdout should be terminal\")\n\t\t}\n\t\t// oldState, err := terminal.MakeRaw(0)\n\t\t// if err != nil {\n\t\t// \tcmd.PrintErrf(\"Failed to make raw terminal: %s\\n\", errors.Cause(err))\n\t\t// \treturn err\n\t\t// }\n\t\t// oldOutState, err := terminal.MakeRaw(1)\n\t\t// if err != nil {\n\t\t// \tcmd.PrintErrf(\"Failed to make raw terminal: %s\\n\", errors.Cause(err))\n\t\t// \treturn err\n\t\t// }\n\t\t// defer func() {\n\t\t// \tterminal.Restore(0, oldState)\n\t\t// \tterminal.Restore(1, oldOutState)\n\t\t// }()\n\n\t\t// Send terminal size.\n\t\tw, h, err := terminal.GetSize(0)\n\t\tif err != nil {\n\t\t\tcmd.PrintErrf(\"Failed to get terminal size: %s\\n\", errors.Cause(err))\n\t\t\treturn err\n\t\t}\n\t\tmsg := &client.TerminalMessage{\n\t\t\tID:   rand.String(5),\n\t\t\tOp:   \"resize\",\n\t\t\tData: \"\",\n\t\t\tRows: uint16(h),\n\t\t\tCols: uint16(w),\n\t\t}\n\t\tif err := c.WriteJSON(msg); err != nil {\n\t\t\tcmd.PrintErrf(\"Failed to send terminal message: %s\\n\", errors.Cause(err))\n\t\t\treturn err\n\t\t}\n\n\t\terrCh := make(chan error, 1)\n\t\tcli := newMDZCLI()\n\n\t\tgo func() {\n\t\t\tdefer close(errCh)\n\t\t\terrCh <- func() error {\n\t\t\t\tstreamer := hijackedIOStreamer{\n\t\t\t\t\tstreams:      cli,\n\t\t\t\t\tinputStream:  cli.In(),\n\t\t\t\t\toutputStream: cli.Out(),\n\t\t\t\t\terrorStream:  cli.Err(),\n\t\t\t\t\tresp:         resp,\n\t\t\t\t\ttty:          true,\n\t\t\t\t\tdetachKeys:   \"\",\n\t\t\t\t}\n\n\t\t\t\treturn streamer.stream(cmd.Context())\n\t\t\t}()\n\t\t}()\n\n\t\tif err := <-errCh; err != nil {\n\t\t\tlogrus.Debugf(\"Error hijack: %s\", err)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t} else {\n\t\tres, err := agentClient.InstanceExec(cmd.Context(), namespace, name, execInstance, args[1:], false)\n\t\tif err != nil {\n\t\t\tcmd.PrintErrf(\"Failed to execute the command: %s\\n\", errors.Cause(err))\n\t\t\treturn err\n\t\t}\n\n\t\tcmd.Printf(\"%s\", res)\n\t\treturn nil\n\t}\n}\n\nfunc isAvailableShell(shell string) bool {\n\tswitch shell {\n\tcase \"sh\", \"bash\", \"zsh\", \"fish\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\ntype mdzCli struct {\n\tin  *streams.In\n\tout *streams.Out\n\terr io.Writer\n}\n\nfunc newMDZCLI() *mdzCli {\n\treturn &mdzCli{\n\t\tin:  streams.NewIn(os.Stdin),\n\t\tout: streams.NewOut(os.Stdout),\n\t\terr: os.Stderr,\n\t}\n}\n\nfunc (c mdzCli) In() *streams.In {\n\treturn c.in\n}\n\nfunc (c mdzCli) Out() *streams.Out {\n\treturn c.out\n}\n\nfunc (c mdzCli) Err() io.Writer {\n\treturn c.err\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/exec_stream.go",
    "content": "package cmd\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com/moby/term\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"github.com/tensorchord/openmodelz/agent/client\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/cmd/ioutils\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/cmd/streams\"\n)\n\n// Streams is an interface which exposes the standard input and output streams\ntype Streams interface {\n\tIn() *streams.In\n\tOut() *streams.Out\n\tErr() io.Writer\n}\n\n// The default escape key sequence: ctrl-p, ctrl-q\n// TODO: This could be moved to `pkg/term`.\nvar defaultEscapeKeys = []byte{16, 17}\n\n// A hijackedIOStreamer handles copying input to and output from streams to the\n// connection.\ntype hijackedIOStreamer struct {\n\tstreams      Streams\n\tinputStream  io.ReadCloser\n\toutputStream io.Writer\n\terrorStream  io.Writer\n\n\tresp client.HijackedResponse\n\n\ttty        bool\n\tdetachKeys string\n}\n\n// stream handles setting up the IO and then begins streaming stdin/stdout\n// to/from the hijacked connection, blocking until it is either done reading\n// output, the user inputs the detach key sequence when in TTY mode, or when\n// the given context is cancelled.\nfunc (h *hijackedIOStreamer) stream(ctx context.Context) error {\n\trestoreInput, err := h.setupInput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to setup input stream: %s\", err)\n\t}\n\n\tdefer restoreInput()\n\n\toutputDone := h.beginOutputStream(restoreInput)\n\tinputDone, detached := h.beginInputStream(restoreInput)\n\n\tselect {\n\tcase err := <-outputDone:\n\t\treturn err\n\tcase <-inputDone:\n\t\t// Input stream has closed.\n\t\tif h.outputStream != nil || h.errorStream != nil {\n\t\t\t// Wait for output to complete streaming.\n\t\t\tselect {\n\t\t\tcase err := <-outputDone:\n\t\t\t\treturn err\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase err := <-detached:\n\t\t// Got a detach key sequence.\n\t\treturn err\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n\nfunc (h *hijackedIOStreamer) setupInput() (restore func(), err error) {\n\tif h.inputStream == nil || !h.tty {\n\t\t// No need to setup input TTY.\n\t\t// The restore func is a nop.\n\t\treturn func() {}, nil\n\t}\n\n\tif err := setRawTerminal(h.streams); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to set IO streams as raw terminal: %s\", err)\n\t}\n\n\t// Use sync.Once so we may call restore multiple times but ensure we\n\t// only restore the terminal once.\n\tvar restoreOnce sync.Once\n\trestore = func() {\n\t\trestoreOnce.Do(func() {\n\t\t\t_ = restoreTerminal(h.streams, h.inputStream)\n\t\t})\n\t}\n\n\t// Wrap the input to detect detach escape sequence.\n\t// Use default escape keys if an invalid sequence is given.\n\tescapeKeys := defaultEscapeKeys\n\tif h.detachKeys != \"\" {\n\t\tcustomEscapeKeys, err := term.ToBytes(h.detachKeys)\n\t\tif err != nil {\n\t\t\tlogrus.Warnf(\"invalid detach escape keys, using default: %s\", err)\n\t\t} else {\n\t\t\tescapeKeys = customEscapeKeys\n\t\t}\n\t}\n\n\th.inputStream = ioutils.NewReadCloserWrapper(term.NewEscapeProxy(h.inputStream, escapeKeys), h.inputStream.Close)\n\n\treturn restore, nil\n}\n\nfunc (h *hijackedIOStreamer) beginOutputStream(restoreInput func()) <-chan error {\n\tif h.outputStream == nil && h.errorStream == nil {\n\t\t// There is no need to copy output.\n\t\treturn nil\n\t}\n\n\toutputDone := make(chan error)\n\tgo func() {\n\t\tvar err error\n\n\t\t// When TTY is ON, use regular copy\n\t\tif h.outputStream != nil && h.tty {\n\t\t\t_, err = io.Copy(h.outputStream, h.resp)\n\t\t\t// We should restore the terminal as soon as possible\n\t\t\t// once the connection ends so any following print\n\t\t\t// messages will be in normal type.\n\t\t\trestoreInput()\n\t\t}\n\n\t\tlogrus.Debug(\"[hijack] End of stdout\")\n\n\t\tif err != nil {\n\t\t\tlogrus.Debugf(\"Error receiveStdout: %s\", err)\n\t\t}\n\n\t\toutputDone <- err\n\t}()\n\n\treturn outputDone\n}\n\nfunc (h *hijackedIOStreamer) beginInputStream(restoreInput func()) (doneC <-chan struct{}, detachedC <-chan error) {\n\tinputDone := make(chan struct{})\n\tdetached := make(chan error)\n\n\tgo func() {\n\t\tif h.inputStream != nil {\n\t\t\t_, err := io.Copy(h.resp, h.inputStream)\n\t\t\t// We should restore the terminal as soon as possible\n\t\t\t// once the connection ends so any following print\n\t\t\t// messages will be in normal type.\n\t\t\trestoreInput()\n\n\t\t\tlogrus.Debug(\"[hijack] End of stdin\")\n\n\t\t\tif _, ok := err.(term.EscapeError); ok {\n\t\t\t\tdetached <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\t// This error will also occur on the receive\n\t\t\t\t// side (from stdout) where it will be\n\t\t\t\t// propagated back to the caller.\n\t\t\t\tlogrus.Debugf(\"Error sendStdin: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\t// if err := h.resp.CloseWrite(); err != nil {\n\t\t// \tlogrus.Debugf(\"Couldn't send EOF: %s\", err)\n\t\t// }\n\n\t\tclose(inputDone)\n\t}()\n\n\treturn inputDone, detached\n}\n\nfunc setRawTerminal(streams Streams) error {\n\tif err := streams.In().SetRawTerminal(); err != nil {\n\t\treturn err\n\t}\n\treturn streams.Out().SetRawTerminal()\n}\n\nfunc restoreTerminal(streams Streams, in io.Closer) error {\n\tstreams.In().RestoreTerminal()\n\tstreams.Out().RestoreTerminal()\n\t// WARNING: DO NOT REMOVE THE OS CHECKS !!!\n\t// For some reason this Close call blocks on darwin..\n\t// As the client exits right after, simply discard the close\n\t// until we find a better solution.\n\t//\n\t// This can also cause the client on Windows to get stuck in Win32 CloseHandle()\n\t// in some cases. See https://github.com/docker/docker/issues/28267#issuecomment-288237442\n\t// Tracked internally at Microsoft by VSO #11352156. In the\n\t// Windows case, you hit this if you are using the native/v2 console,\n\t// not the \"legacy\" console, and you start the client in a new window. eg\n\t// `start docker run --rm -it microsoft/nanoserver cmd /s /c echo foobar`\n\t// will hang. Remove start, and it won't repro.\n\tif in != nil && runtime.GOOS != \"darwin\" && runtime.GOOS != \"windows\" {\n\t\treturn in.Close()\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/ioutils/reader.go",
    "content": "package ioutils // import \"github.com/docker/docker/pkg/ioutils\"\n\nimport (\n\t\"context\"\n\t\"io\"\n\n\t// make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered\n\t// TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged.\n\t_ \"crypto/sha256\"\n\t_ \"crypto/sha512\"\n)\n\n// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser\n// It calls the given callback function when closed. It should be constructed\n// with NewReadCloserWrapper\ntype ReadCloserWrapper struct {\n\tio.Reader\n\tcloser func() error\n}\n\n// Close calls back the passed closer function\nfunc (r *ReadCloserWrapper) Close() error {\n\treturn r.closer()\n}\n\n// NewReadCloserWrapper returns a new io.ReadCloser.\nfunc NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {\n\treturn &ReadCloserWrapper{\n\t\tReader: r,\n\t\tcloser: closer,\n\t}\n}\n\ntype readerErrWrapper struct {\n\treader io.Reader\n\tcloser func()\n}\n\nfunc (r *readerErrWrapper) Read(p []byte) (int, error) {\n\tn, err := r.reader.Read(p)\n\tif err != nil {\n\t\tr.closer()\n\t}\n\treturn n, err\n}\n\n// NewReaderErrWrapper returns a new io.Reader.\nfunc NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {\n\treturn &readerErrWrapper{\n\t\treader: r,\n\t\tcloser: closer,\n\t}\n}\n\n// OnEOFReader wraps an io.ReadCloser and a function\n// the function will run at the end of file or close the file.\ntype OnEOFReader struct {\n\tRc io.ReadCloser\n\tFn func()\n}\n\nfunc (r *OnEOFReader) Read(p []byte) (n int, err error) {\n\tn, err = r.Rc.Read(p)\n\tif err == io.EOF {\n\t\tr.runFunc()\n\t}\n\treturn\n}\n\n// Close closes the file and run the function.\nfunc (r *OnEOFReader) Close() error {\n\terr := r.Rc.Close()\n\tr.runFunc()\n\treturn err\n}\n\nfunc (r *OnEOFReader) runFunc() {\n\tif fn := r.Fn; fn != nil {\n\t\tfn()\n\t\tr.Fn = nil\n\t}\n}\n\n// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read\n// operations.\ntype cancelReadCloser struct {\n\tcancel func()\n\tpR     *io.PipeReader // Stream to read from\n\tpW     *io.PipeWriter\n}\n\n// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the\n// context is cancelled. The returned io.ReadCloser must be closed when it is\n// no longer needed.\nfunc NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {\n\tpR, pW := io.Pipe()\n\n\t// Create a context used to signal when the pipe is closed\n\tdoneCtx, cancel := context.WithCancel(context.Background())\n\n\tp := &cancelReadCloser{\n\t\tcancel: cancel,\n\t\tpR:     pR,\n\t\tpW:     pW,\n\t}\n\n\tgo func() {\n\t\t_, err := io.Copy(pW, in)\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t// If the context was closed, p.closeWithError\n\t\t\t// was already called. Calling it again would\n\t\t\t// change the error that Read returns.\n\t\tdefault:\n\t\t\tp.closeWithError(err)\n\t\t}\n\t\tin.Close()\n\t}()\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tp.closeWithError(ctx.Err())\n\t\t\tcase <-doneCtx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn p\n}\n\n// Read wraps the Read method of the pipe that provides data from the wrapped\n// ReadCloser.\nfunc (p *cancelReadCloser) Read(buf []byte) (n int, err error) {\n\treturn p.pR.Read(buf)\n}\n\n// closeWithError closes the wrapper and its underlying reader. It will\n// cause future calls to Read to return err.\nfunc (p *cancelReadCloser) closeWithError(err error) {\n\tp.pW.CloseWithError(err)\n\tp.cancel()\n}\n\n// Close closes the wrapper its underlying reader. It will cause\n// future calls to Read to return io.EOF.\nfunc (p *cancelReadCloser) Close() error {\n\tp.closeWithError(io.EOF)\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/list.go",
    "content": "package cmd\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/jedib0t/go-pretty/v6/table\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/telemetry\"\n)\n\nconst (\n\tannotationDomain = \"ai.tensorchord.domain\"\n)\n\nvar (\n\t// Used for flags.\n\tlistQuiet   bool\n\tlistVerbose bool\n)\n\n// listCommand represents the list command\nvar listCommand = &cobra.Command{\n\tUse:   \"list\",\n\tShort: \"List the deployments\",\n\tLong:  `List the deployments`,\n\tExample: `  mdz list\n  mdz list -v\n  mdz list -q`,\n\tGroupID: \"basic\",\n\tPreRunE: commandInit,\n\tRunE:    commandList,\n}\n\nfunc init() {\n\trootCmd.AddCommand(listCommand)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n\tlistCommand.Flags().BoolVarP(&listQuiet, \"quiet\", \"q\", false, \"Quiet mode - print out only the inference names\")\n\tlistCommand.Flags().BoolVarP(&listVerbose, \"verbose\", \"v\", false, \"Verbose mode - print out all inference details\")\n}\n\nfunc commandList(cmd *cobra.Command, args []string) error {\n\ttelemetry.GetTelemetry().Record(\"list\")\n\tinfs, err := agentClient.InferenceList(cmd.Context(), namespace)\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to list inferences: %v\\n\", err)\n\t\treturn err\n\t}\n\n\tsort.Sort(byName(infs))\n\tif listQuiet {\n\t\tfor _, inf := range infs {\n\t\t\tcmd.Printf(\"%s\\n\", inf.Spec.Name)\n\t\t}\n\t\treturn nil\n\t} else if listVerbose {\n\t\tt := table.NewWriter()\n\t\tt.SetStyle(table.Style{\n\t\t\tBox:     table.StyleBoxDefault,\n\t\t\tColor:   table.ColorOptionsDefault,\n\t\t\tFormat:  table.FormatOptionsDefault,\n\t\t\tHTML:    table.DefaultHTMLOptions,\n\t\t\tOptions: table.OptionsNoBordersAndSeparators,\n\t\t\tTitle:   table.TitleOptionsDefault,\n\t\t})\n\t\tt.AppendHeader(table.Row{\"Name\", \"Endpoint\", \"Image\", \"Status\", \"Invocations\", \"Replicas\", \"CreatedAt\"})\n\n\t\tfor _, inf := range infs {\n\t\t\tfunctionImage := inf.Spec.Image\n\t\t\tcreatedAt := \"\"\n\t\t\tif inf.Status.CreatedAt != nil {\n\t\t\t\tcreatedAt = inf.Status.CreatedAt.String()\n\t\t\t}\n\t\t\tt.AppendRow(table.Row{\n\t\t\t\tinf.Spec.Name,\n\t\t\t\tgetEndpoint(inf),\n\t\t\t\tfunctionImage,\n\t\t\t\tinf.Status.Phase,\n\t\t\t\tint64(inf.Status.InvocationCount),\n\t\t\t\tfmt.Sprintf(\"%d/%d\", inf.Status.AvailableReplicas, inf.Status.Replicas),\n\t\t\t\tcreatedAt,\n\t\t\t})\n\t\t}\n\n\t\tcmd.Println(t.Render())\n\t} else {\n\t\tt := table.NewWriter()\n\t\tt.SetStyle(table.Style{\n\t\t\tBox:     table.StyleBoxDefault,\n\t\t\tColor:   table.ColorOptionsDefault,\n\t\t\tFormat:  table.FormatOptionsDefault,\n\t\t\tHTML:    table.DefaultHTMLOptions,\n\t\t\tOptions: table.OptionsNoBordersAndSeparators,\n\t\t\tTitle:   table.TitleOptionsDefault,\n\t\t})\n\t\tt.AppendHeader(table.Row{\"Name\", \"Endpoint\", \"Status\", \"Invocations\", \"Replicas\"})\n\t\tfor _, inf := range infs {\n\t\t\tt.AppendRow(table.Row{\n\t\t\t\tinf.Spec.Name,\n\t\t\t\tgetEndpoint(inf),\n\t\t\t\tinf.Status.Phase,\n\t\t\t\tint64(inf.Status.InvocationCount),\n\t\t\t\tfmt.Sprintf(\"%d/%d\", inf.Status.AvailableReplicas, inf.Status.Replicas),\n\t\t\t})\n\t\t}\n\t\tcmd.Println(t.Render())\n\t}\n\treturn nil\n}\n\ntype byName []types.InferenceDeployment\n\nfunc (a byName) Len() int           { return len(a) }\nfunc (a byName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a byName) Less(i, j int) bool { return a[i].Spec.Name < a[j].Spec.Name }\n\nfunc getEndpoint(inf types.InferenceDeployment) string {\n\tendpoint := fmt.Sprintf(\"%s/inference/%s.%s\", mdzURL, inf.Spec.Name, inf.Spec.Namespace)\n\tif d, ok := inf.Spec.Annotations[annotationDomain]; ok {\n\t\t// Replace https with http now.\n\t\trawHTTPDomain := strings.Replace(d, \"https://\", \"http://\", 1)\n\t\tendpoint = fmt.Sprintf(\"%s\\n%s\", rawHTTPDomain, endpoint)\n\t}\n\treturn endpoint\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/list_instance.go",
    "content": "package cmd\n\nimport (\n\t\"sort\"\n\n\t\"github.com/jedib0t/go-pretty/v6/table\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n)\n\nvar (\n\t// Used for flags.\n\tlistInstanceQuiet   bool\n\tlistInstanceVerbose bool\n)\n\n// listInstanceCmd represents the list instance command\nvar listInstanceCmd = &cobra.Command{\n\tUse:   \"instance\",\n\tShort: \"List all instances for the given deployment\",\n\tLong:  `List all instances for the given deployment`,\n\tExample: `  mdz list instance bloomz-560m\n  mdz list instance bloomz-560m -v\n  mdz list instance bloomz-560m -q`,\n\tArgs:    cobra.ExactArgs(1),\n\tPreRunE: commandInit,\n\tRunE:    commandListInstance,\n}\n\nfunc init() {\n\tlistCommand.AddCommand(listInstanceCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n\tlistInstanceCmd.Flags().BoolVarP(&listInstanceQuiet, \"quiet\", \"q\", false, \"Quiet mode - print out only the instance names\")\n\tlistInstanceCmd.Flags().BoolVarP(&listInstanceVerbose, \"verbose\", \"v\", false, \"Verbose mode - print out all instance details\")\n}\n\nfunc commandListInstance(cmd *cobra.Command, args []string) error {\n\tinstances, err := agentClient.InstanceList(cmd.Context(), namespace, args[0])\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to list inference instances: %v\\n\", err)\n\t\treturn err\n\t}\n\n\tsort.Sort(byInstanceName(instances))\n\n\tif listInstanceQuiet {\n\t\tfor _, i := range instances {\n\t\t\tcmd.Printf(\"%s\\n\", i.Spec.Name)\n\t\t}\n\t\treturn nil\n\t} else if listInstanceVerbose {\n\t\tt := table.NewWriter()\n\t\tt.SetStyle(table.Style{\n\t\t\tBox:     table.StyleBoxDefault,\n\t\t\tColor:   table.ColorOptionsDefault,\n\t\t\tFormat:  table.FormatOptionsDefault,\n\t\t\tHTML:    table.DefaultHTMLOptions,\n\t\t\tOptions: table.OptionsNoBordersAndSeparators,\n\t\t\tTitle:   table.TitleOptionsDefault,\n\t\t})\n\t\tt.AppendHeader(table.Row{\"Name\", \"Status\", \"Reason\", \"Message\", \"CreatedAt\"})\n\t\tfor _, i := range instances {\n\t\t\tt.AppendRow(table.Row{i.Spec.Name, i.Status.Phase,\n\t\t\t\ti.Status.Reason, i.Status.Message, i.Status.StartTime})\n\t\t}\n\t\tcmd.Println(t.Render())\n\t\treturn nil\n\t} else {\n\t\tt := table.NewWriter()\n\t\tt.SetStyle(table.Style{\n\t\t\tBox:     table.StyleBoxDefault,\n\t\t\tColor:   table.ColorOptionsDefault,\n\t\t\tFormat:  table.FormatOptionsDefault,\n\t\t\tHTML:    table.DefaultHTMLOptions,\n\t\t\tOptions: table.OptionsNoBordersAndSeparators,\n\t\t\tTitle:   table.TitleOptionsDefault,\n\t\t})\n\t\tt.AppendHeader(table.Row{\"Name\", \"Status\", \"CreatedAt\"})\n\t\tfor _, i := range instances {\n\t\t\tt.AppendRow(table.Row{i.Spec.Name, i.Status.Phase, i.Status.StartTime})\n\t\t}\n\t\tcmd.Println(t.Render())\n\t\treturn nil\n\t}\n}\n\ntype byInstanceName []types.InferenceDeploymentInstance\n\nfunc (a byInstanceName) Len() int           { return len(a) }\nfunc (a byInstanceName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a byInstanceName) Less(i, j int) bool { return a[i].Spec.Name < a[j].Spec.Name }\n"
  },
  {
    "path": "mdz/pkg/cmd/localagent.go",
    "content": "package cmd\n\nimport (\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/agentd/server\"\n)\n\nvar (\n\tlocalAgentPort int\n)\n\n// localAgentCmd represents the local-agent command\nvar localAgentCmd = &cobra.Command{\n\tUse:     \"local-agent\",\n\tShort:   \"Start agent with local docker runtime\",\n\tLong:    `Start agent with local docker runtime`,\n\tExample: `  mdz local-agent`,\n\tGroupID: \"basic\",\n\tPreRunE: commandInit,\n\tRunE:    commandLocalAgent,\n\tHidden:  true,\n}\n\nfunc init() {\n\trootCmd.AddCommand(localAgentCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n\tlocalAgentCmd.Flags().IntVarP(&localAgentPort, \"port\", \"p\", 31112, \"Port to listen on\")\n}\n\nfunc commandLocalAgent(cmd *cobra.Command, args []string) error {\n\tserver, err := server.New()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn server.Run(localAgentPort)\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/logs.go",
    "content": "package cmd\n\nimport (\n\t\"github.com/spf13/cobra\"\n)\n\nvar (\n\t// Used for flags.\n\ttail   int\n\tsince  string\n\tend    string\n\tfollow bool\n)\n\n// logCmd represents the log command\nvar logsCmd = &cobra.Command{\n\tUse:     \"logs\",\n\tShort:   \"Print the logs for a deployment\",\n\tLong:    `Print the logs for a deployment`,\n\tExample: `  mdz logs blomdz-560m`,\n\tGroupID: \"debug\",\n\tPreRunE: commandInit,\n\tArgs:    cobra.ExactArgs(1),\n\tRunE:    commandLogs,\n}\n\nfunc init() {\n\trootCmd.AddCommand(logsCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n\tlogsCmd.Flags().IntVarP(&tail, \"tail\", \"t\", 0, \"Number of lines to show from the end of the logs\")\n\tlogsCmd.Flags().StringVarP(&since, \"since\", \"s\", \"2006-01-02T15:04:05Z\", \"Show logs since timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)\")\n\tlogsCmd.Flags().StringVarP(&end, \"end\", \"e\", \"\", \"Only return logs before this timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)\")\n\tlogsCmd.Flags().BoolVarP(&follow, \"follow\", \"f\", false, \"Follow log output\")\n}\n\nfunc commandLogs(cmd *cobra.Command, args []string) error {\n\tlogStream, err := agentClient.DeploymentLogGet(cmd.Context(), namespace, args[0], since, tail, end, follow)\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to get logs: %s\\n\", err)\n\t\treturn err\n\t}\n\tfor log := range logStream {\n\t\tcmd.Printf(\"%s: %s\\n\", log.Instance, log.Text)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/portforward.go",
    "content": "package cmd\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n)\n\n// portForwardCmd represents the port-forward command\nvar portForwardCmd = &cobra.Command{\n\tUse:     \"port-forward\",\n\tShort:   \"Forward one local port to a deployment\",\n\tLong:    `Forward one local port to a deployment`,\n\tExample: `  mdz port-forward blomdz-560m 7860`,\n\tGroupID: \"debug\",\n\tPreRunE: commandInit,\n\tArgs:    cobra.ExactArgs(2),\n\tRunE:    commandForward,\n}\n\nfunc init() {\n\trootCmd.AddCommand(portForwardCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n}\n\nfunc commandForward(cmd *cobra.Command, args []string) error {\n\tname := args[0]\n\tport := args[1]\n\n\tif _, err := agentClient.InferenceGet(cmd.Context(), namespace, name); err != nil {\n\t\tcmd.PrintErrf(\"Failed to get inference: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\n\turl, err := url.Parse(fmt.Sprintf(\"%s/inference/%s.%s\", mdzURL, name, namespace))\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to parse URL: %s\\n\", errors.Cause(err))\n\t\treturn errors.Newf(\"failed to parse URL: %s\\n\", errors.Cause(err))\n\t}\n\trp := httputil.NewSingleHostReverseProxy(url)\n\n\tcmd.Printf(\"Forwarding inference %s to local port %s\\n\", name, port)\n\tlogrus.WithField(\"url\", url).Debugf(\n\t\t\"Forwarding inference %s to local port %s\\n\", name, port)\n\thandler := func(p *httputil.ReverseProxy) func(http.ResponseWriter, *http.Request) {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\tcmd.Printf(\"Handling connection for %s\\n\", port)\n\t\t\tp.ServeHTTP(w, r)\n\t\t}\n\t}\n\thttp.HandleFunc(\"/\", handler(rp))\n\terr = http.ListenAndServe(fmt.Sprintf(\":%s\", port), nil)\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to listen and serve: %s\\n\", errors.Cause(err))\n\t\treturn errors.Newf(\"failed to listen and serve: %s\", errors.Cause(err))\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/root.go",
    "content": "package cmd\n\nimport (\n\t\"os\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/cobra/doc\"\n\n\t\"github.com/tensorchord/openmodelz/agent/client\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/telemetry\"\n)\n\nvar (\n\t// Used for flags.\n\tmdzURL           string\n\tnamespace        string\n\tdebug            bool\n\tdisableTelemetry bool\n\n\tagentClient *client.Client\n)\n\n// rootCmd represents the base command when called without any subcommands\nvar rootCmd = &cobra.Command{\n\tUse:   \"mdz\",\n\tShort: \"mdz manages your deployments\",\n\tLong:  `mdz helps you deploy applications, manage servers, and troubleshoot issues.`,\n\tExample: `  mdz server start\n  mdz deploy --image modelzai/llm-bloomz-560m:23.06.13 --name llm\n  mdz list\n  mdz logs llm\n  mdz port-forward llm 7860\n  mdz exec llm ps\n  mdz exec llm --tty bash\n  mdz delete llm\n`,\n\tSilenceUsage: true,\n\t// Uncomment the following line if your bare application\n\t// has an action associated with it:\n\t// Run: func(cmd *cobra.Command, args []string) { },\n}\n\n// Execute adds all child commands to the root command and sets flags appropriately.\n// This is called by main.main(). It only needs to happen once to the rootCmd.\nfunc Execute() {\n\terr := rootCmd.Execute()\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc init() {\n\t// Here you will define your flags and configuration settings.\n\t// Cobra supports persistent flags, which, if defined here,\n\t// will be global for your application.\n\n\t// rootCmd.PersistentFlags().StringVar(&cfgFile, \"config\", \"\", \"config file (default is $HOME/.mdz.yaml)\")\n\trootCmd.PersistentFlags().StringVarP(&mdzURL, \"url\", \"u\", \"\", \"URL to use for the server (MDZ_URL) (default http://localhost:80)\")\n\n\trootCmd.PersistentFlags().StringVarP(&namespace, \"namespace\", \"n\", \"default\", \"Namespace to use for OpenModelZ inferences\")\n\trootCmd.PersistentFlags().MarkHidden(\"namespace\")\n\n\trootCmd.PersistentFlags().BoolVarP(&debug, \"debug\", \"\", false, \"Enable debug logging\")\n\n\trootCmd.PersistentFlags().BoolVarP(&disableTelemetry, \"disable-telemetry\", \"\", false, \"Disable anonymous telemetry\")\n\n\t// Cobra also supports local flags, which will only run\n\t// when this action is called directly.\n\trootCmd.AddGroup(&cobra.Group{ID: \"basic\", Title: \"Basic Commands:\"})\n\trootCmd.AddGroup(&cobra.Group{ID: \"debug\", Title: \"Troubleshooting and Debugging Commands:\"})\n\trootCmd.AddGroup(&cobra.Group{ID: \"management\", Title: \"Management Commands:\"})\n\n\t// telemetry\n\tif err := telemetry.Initialize(!disableTelemetry); err != nil {\n\t\tlogrus.WithError(err).Debug(\"Failed to initialize telemetry\")\n\t}\n}\n\nfunc commandInit(cmd *cobra.Command, args []string) error {\n\tif err := commandInitLog(cmd, args); err != nil {\n\t\treturn err\n\t}\n\n\tif agentClient == nil {\n\t\tif mdzURL == \"\" {\n\t\t\t// Checkout environment variable MDZ_URL.\n\t\t\tmdzURL = os.Getenv(\"MDZ_URL\")\n\t\t}\n\t\tif mdzURL == \"\" {\n\t\t\tmdzURL = \"http://localhost:80\"\n\t\t}\n\t\tvar err error\n\t\tagentClient, err = client.NewClientWithOpts(client.WithHost(mdzURL))\n\t\tif err != nil {\n\t\t\tcmd.PrintErrf(\"Failed to connect to agent: %s\\n\", errors.Cause(err))\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc commandInitLog(cmd *cobra.Command, args []string) error {\n\tif debug {\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t\tlogrus.Debug(\"Debug logging enabled\")\n\t\tlogrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true})\n\t}\n\treturn nil\n}\n\nfunc GenMarkdownTree(dir string) error {\n\treturn doc.GenMarkdownTree(rootCmd, dir)\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/scale.go",
    "content": "package cmd\n\nimport (\n\t\"github.com/spf13/cobra\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/telemetry\"\n)\n\nvar (\n\t// Used for flags.\n\treplicas               int32\n\tmin                    int32\n\tmax                    int32\n\ttargetInflightRequests int32\n)\n\n// scaleCmd represents the scale command\nvar scaleCmd = &cobra.Command{\n\tUse:     \"scale\",\n\tShort:   \"Scale a deployment\",\n\tLong:    `Scale a deployment`,\n\tExample: `  mdz scale bloomz-560m --replicas 3`,\n\tGroupID: \"basic\",\n\tPreRunE: commandInit,\n\tArgs:    cobra.ExactArgs(1),\n\tRunE:    commandScale,\n}\n\nfunc init() {\n\trootCmd.AddCommand(scaleCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n\tscaleCmd.Flags().Int32VarP(&replicas, \"replicas\", \"r\", 0, \"Number of replicas to scale to\")\n\tscaleCmd.Flags().Int32VarP(&min, \"min\", \"m\", 0, \"Minimum number of replicas to scale to\")\n\tscaleCmd.Flags().Int32VarP(&max, \"max\", \"x\", 0, \"Maximum number of replicas to scale to\")\n\tscaleCmd.Flags().Int32VarP(&targetInflightRequests, \"target-inflight-requests\", \"t\", 0, \"Target number of inflight requests per replica\")\n\tscaleCmd.MarkFlagRequired(\"replicas\")\n\tscaleCmd.Flags().MarkHidden(\"min\")\n\tscaleCmd.Flags().MarkHidden(\"max\")\n\tscaleCmd.Flags().MarkHidden(\"target-inflight-requests\")\n}\n\nfunc commandScale(cmd *cobra.Command, args []string) error {\n\tname := args[0]\n\tdeployment, err := agentClient.InferenceGet(cmd.Context(), namespace, name)\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to get deployment: %s\\n\", err)\n\t\treturn err\n\t}\n\n\tif replicas != 0 {\n\t\tdeployment.Spec.Scaling.MinReplicas = int32Ptr(replicas)\n\t\tdeployment.Spec.Scaling.MaxReplicas = int32Ptr(replicas)\n\n\t\tif _, err := agentClient.DeploymentUpdate(cmd.Context(), namespace, deployment); err != nil {\n\t\t\tcmd.PrintErrf(\"Failed to update deployment: %s\\n\", err)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tif min != 0 {\n\t\tdeployment.Spec.Scaling.MinReplicas = int32Ptr(min)\n\t}\n\tif max != 0 {\n\t\tdeployment.Spec.Scaling.MaxReplicas = int32Ptr(max)\n\t}\n\tif targetInflightRequests != 0 {\n\t\tdeployment.Spec.Scaling.TargetLoad = int32Ptr(targetInflightRequests)\n\t}\n\n\ttelemetry.GetTelemetry().Record(\"scale\")\n\n\tif _, err := agentClient.DeploymentUpdate(cmd.Context(), namespace, deployment); err != nil {\n\t\tcmd.PrintErrf(\"Failed to update deployment: %s\\n\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/server.go",
    "content": "package cmd\n\nimport (\n\t\"time\"\n\n\t\"github.com/spf13/cobra\"\n)\n\nvar (\n\tserverVerbose                 bool\n\tserverPollingInterval         time.Duration = 3 * time.Second\n\tserverRegistryMirrorName      string\n\tserverRegistryMirrorEndpoints []string\n)\n\n// serverCmd represents the server command\nvar serverCmd = &cobra.Command{\n\tUse:     \"server\",\n\tShort:   \"Manage the servers\",\n\tLong:    `Manage the servers`,\n\tExample: `  mdz server start`,\n\tGroupID: \"management\",\n\tPreRunE: commandInitLog,\n}\n\nfunc init() {\n\trootCmd.AddCommand(serverCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\tserverCmd.PersistentFlags().BoolVarP(&serverVerbose, \"verbose\", \"v\", false, \"Verbose output\")\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/server_delete.go",
    "content": "package cmd\n\nimport \"github.com/spf13/cobra\"\n\nvar serverDeleteCmd = &cobra.Command{\n\tUse:     \"delete\",\n\tShort:   \"Delete a node from the cluster\",\n\tLong:    `Delete a node from the cluster`,\n\tExample: `  mdz server delete gpu-node-1`,\n\tPreRunE: commandInit,\n\tArgs:    cobra.MinimumNArgs(1),\n\tRunE:    commandServerDelete,\n}\n\nfunc init() {\n\tserverCmd.AddCommand(serverDeleteCmd)\n}\n\nfunc commandServerDelete(cmd *cobra.Command, args []string) error {\n\tnodeName := args[0]\n\tif err := agentClient.ServerNodeDelete(cmd.Context(), nodeName); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/server_destroy.go",
    "content": "package cmd\n\nimport (\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/server\"\n)\n\n// serverDestroyCmd represents the server destroy command\nvar serverDestroyCmd = &cobra.Command{\n\tUse:     \"destroy\",\n\tShort:   \"Destroy the cluster\",\n\tLong:    `Destroy the cluster`,\n\tExample: `  mdz server destroy`,\n\tPreRunE: commandInitLog,\n\tRunE:    commandServerDestroy,\n}\n\nfunc init() {\n\tserverCmd.AddCommand(serverDestroyCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n}\n\nfunc commandServerDestroy(cmd *cobra.Command, args []string) error {\n\tengine, err := server.NewDestroy(server.Options{\n\t\tVerbose:       serverVerbose,\n\t\tOutputStream:  cmd.ErrOrStderr(),\n\t\tRetryInternal: serverPollingInterval,\n\t})\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to destroy the server: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\n\t_, err = engine.Run()\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to destroy the server: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\tcmd.Printf(\"✅ Server destroyed\\n\")\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/server_join.go",
    "content": "package cmd\n\nimport (\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/server\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/telemetry\"\n)\n\n// serverJoinCmd represents the server join command\nvar serverJoinCmd = &cobra.Command{\n\tUse:     \"join\",\n\tShort:   \"Join to the cluster\",\n\tLong:    `Join to the cluster`,\n\tExample: `  mdz server join 192.168.31.192`,\n\tPreRunE: commandInitLog,\n\tArgs:    cobra.ExactArgs(1),\n\tRunE:    commandServerJoin,\n}\n\nfunc init() {\n\tserverCmd.AddCommand(serverJoinCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n\tserverJoinCmd.Flags().StringVarP(&serverRegistryMirrorName, \"mirror-name\", \"\",\n\t\t\"docker.io\", \"Mirror domain name of the registry\")\n\tserverJoinCmd.Flags().StringArrayVarP(&serverRegistryMirrorEndpoints, \"mirror-endpoints\", \"\",\n\t\t[]string{}, \"Mirror URL endpoints of the registry like `https://quay.io`\")\n}\n\nfunc commandServerJoin(cmd *cobra.Command, args []string) error {\n\tengine, err := server.NewJoin(server.Options{\n\t\tVerbose:       serverVerbose,\n\t\tOutputStream:  cmd.ErrOrStderr(),\n\t\tRetryInternal: serverPollingInterval,\n\t\tServerIP:      args[0],\n\t\tMirror: server.Mirror{\n\t\t\tName:      serverRegistryMirrorName,\n\t\t\tEndpoints: serverRegistryMirrorEndpoints,\n\t\t},\n\t})\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to configure before join: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\n\ttelemetry.GetTelemetry().Record(\"server join\")\n\n\t_, err = engine.Run()\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to join the cluster: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\tcmd.Printf(\"✅ Server joined\\n\")\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/server_label.go",
    "content": "package cmd\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/spf13/cobra\"\n)\n\n// serverLabelCmd represents the server label command\nvar serverLabelCmd = &cobra.Command{\n\tUse:   \"label\",\n\tShort: \"Update the labels on a server\",\n\tLong: `Update the labels on a server\n\n  *  A label key and value must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to 63 characters each.\n  *  Optionally, the key can begin with a DNS subdomain prefix and a single '/', like example.com/my-app.\n\t`,\n\tExample: `  mdz server label node-name key=value [key=value...]`,\n\tPreRunE: commandInit,\n\tArgs:    cobra.MinimumNArgs(1),\n\tRunE:    commandServerLabel,\n}\n\nfunc init() {\n\tserverCmd.AddCommand(serverLabelCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n}\n\nfunc commandServerLabel(cmd *cobra.Command, args []string) error {\n\tnodeName := args[0]\n\tlabels := args[1:]\n\n\tnodeLabels, err := parseNodeLabels(labels)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := agentClient.ServerLabelCreate(cmd.Context(),\n\t\tnodeName, nodeLabels); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc parseNodeLabels(labels []string) (map[string]string, error) {\n\tres := make(map[string]string)\n\tfor _, label := range labels {\n\t\tif !strings.Contains(label, \"=\") {\n\t\t\treturn nil, fmt.Errorf(\"label must be in the form of key=value\")\n\t\t}\n\t\t// Split the label into key and value\n\t\tparts := strings.SplitN(label, \"=\", 2)\n\t\tkey := parts[0]\n\t\tvalue := parts[1]\n\t\tif len(key) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"label key cannot be empty\")\n\t\t}\n\t\tres[key] = value\n\t}\n\treturn res, nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/server_list.go",
    "content": "package cmd\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/jedib0t/go-pretty/v6/table\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/telemetry\"\n)\n\nvar (\n\t// Used for flags.\n\tserverListQuiet   bool\n\tserverListVerbose bool\n)\n\n// serverListCmd represents the server list command\nvar serverListCmd = &cobra.Command{\n\tUse:     \"list\",\n\tShort:   \"List all servers in the cluster\",\n\tLong:    `List all servers in the cluster`,\n\tExample: `  mdz server list`,\n\tPreRunE: commandInit,\n\tRunE:    commandServerList,\n}\n\nfunc init() {\n\tserverCmd.AddCommand(serverListCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n\tserverListCmd.Flags().BoolVarP(&serverListQuiet, \"quiet\", \"q\", false, \"Quiet mode - print out only the server names\")\n\tserverListCmd.Flags().BoolVarP(&serverListVerbose, \"verbose\", \"v\", false, \"Verbose mode - print out all server details\")\n}\n\nfunc commandServerList(cmd *cobra.Command, args []string) error {\n\ttelemetry.GetTelemetry().Record(\"server list\")\n\tservers, err := agentClient.ServerList(cmd.Context())\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to list servers: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\n\tif serverListQuiet {\n\t\tfor _, server := range servers {\n\t\t\tcmd.Printf(\"%s\\n\", server.Spec.Name)\n\t\t}\n\t} else if serverListVerbose {\n\t\tt := table.NewWriter()\n\t\tt.SetStyle(table.Style{\n\t\t\tBox:     table.StyleBoxDefault,\n\t\t\tColor:   table.ColorOptionsDefault,\n\t\t\tFormat:  table.FormatOptionsDefault,\n\t\t\tHTML:    table.DefaultHTMLOptions,\n\t\t\tOptions: table.OptionsNoBordersAndSeparators,\n\t\t\tTitle:   table.TitleOptionsDefault,\n\t\t})\n\t\tt.AppendHeader(table.Row{\"Name\", \"Phase\", \"Allocatable\", \"Capacity\", \"Distribution\", \"OS\", \"Kernel\", \"Labels\"})\n\n\t\tfor _, server := range servers {\n\t\t\tt.AppendRow(table.Row{server.Spec.Name, server.Status.Phase,\n\t\t\t\tresourceListString(server.Status.Allocatable),\n\t\t\t\tresourceListString(server.Status.Capacity),\n\t\t\t\tserver.Status.System.OSImage,\n\t\t\t\tserver.Status.System.OperatingSystem,\n\t\t\t\tserver.Status.System.KernelVersion,\n\t\t\t\tlabelsString(server.Spec.Labels),\n\t\t\t})\n\t\t}\n\t\tcmd.Println(t.Render())\n\t} else {\n\t\tt := table.NewWriter()\n\t\tt.SetStyle(table.Style{\n\t\t\tBox:     table.StyleBoxDefault,\n\t\t\tColor:   table.ColorOptionsDefault,\n\t\t\tFormat:  table.FormatOptionsDefault,\n\t\t\tHTML:    table.DefaultHTMLOptions,\n\t\t\tOptions: table.OptionsNoBordersAndSeparators,\n\t\t\tTitle:   table.TitleOptionsDefault,\n\t\t})\n\t\tt.AppendHeader(table.Row{\"Name\", \"Phase\", \"Allocatable\", \"Capacity\"})\n\n\t\tfor _, server := range servers {\n\t\t\tt.AppendRow(table.Row{server.Spec.Name, server.Status.Phase,\n\t\t\t\tresourceListString(server.Status.Allocatable),\n\t\t\t\tresourceListString(server.Status.Capacity)})\n\t\t}\n\t\tcmd.Println(t.Render())\n\t}\n\treturn nil\n}\n\nfunc labelsString(labels map[string]string) string {\n\tres := \"\"\n\tfor k, v := range labels {\n\t\tres += fmt.Sprintf(\"%s=%s\\n\", k, v)\n\t}\n\tif len(res) == 0 {\n\t\treturn res\n\t}\n\treturn res[:len(res)-1]\n}\n\nfunc prettyByteSize(quantity string) (string, error) {\n\tr, err := resource.ParseQuantity(quantity)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbf := float64(r.Value())\n\tfor _, unit := range []string{\"\", \"Ki\", \"Mi\", \"Gi\", \"Ti\"} {\n\t\tif math.Abs(bf) < 1024.0 {\n\t\t\treturn fmt.Sprintf(\"%3.1f%sB\", bf, unit), nil\n\t\t}\n\t\tbf /= 1024.0\n\t}\n\treturn fmt.Sprintf(\"%.1fPiB\", bf), nil\n}\n\nfunc resourceListString(l types.ResourceList) string {\n\tres := fmt.Sprintf(\"cpu: %s\", l[types.ResourceCPU])\n\tmemory, ok := l[types.ResourceMemory]\n\tif ok {\n\t\tprettyMem, err := prettyByteSize(string(memory))\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"failed to parse the memory quantity: %s\", memory)\n\t\t} else {\n\t\t\tmemory = types.Quantity(prettyMem)\n\t\t}\n\t}\n\tres += fmt.Sprintf(\"\\nmemory: %s\", memory)\n\tif l[types.ResourceGPU] != \"\" {\n\t\tres += fmt.Sprintf(\"\\ngpu: %s\", l[types.ResourceGPU])\n\t}\n\treturn res\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/server_start.go",
    "content": "package cmd\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/tensorchord/openmodelz/agent/pkg/consts\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/server\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/telemetry\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/version\"\n)\n\nvar (\n\tserverStartRuntime    string\n\tserverStartDomain     string = consts.Domain\n\tserverStartVersion    string\n\tserverStartWithGPU    bool\n\tenableModelZCloud     bool\n\tmodelzCloudUrl        string\n\tmodelzCloudAgentToken string\n\tmodelzCloudRegion     string\n)\n\n// serverStartCmd represents the server start command\nvar serverStartCmd = &cobra.Command{\n\tUse:   \"start\",\n\tShort: \"Start the server\",\n\tLong:  `Start the server with the public IP of the machine. If not provided, the internal IP will be used automatically.`,\n\tExample: `  mdz server start\n  mdz server start -v\n  mdz server start 1.2.3.4`,\n\tPreRunE: preRunE,\n\tArgs:    cobra.RangeArgs(0, 1),\n\tRunE:    commandServerStart,\n}\n\nfunc init() {\n\tserverCmd.AddCommand(serverStartCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n\t// serverStartCmd.Flags().StringVarP(&serverStartRuntime, \"runtime\", \"r\", \"k3s\", \"Runtime to use (k3s, docker) in the started server\")\n\tserverStartCmd.Flags().StringVarP(&serverStartVersion, \"version\", \"\",\n\t\tversion.HelmChartVersion, \"Version of the server to start\")\n\tserverStartCmd.Flags().MarkHidden(\"version\")\n\tserverStartCmd.Flags().BoolVarP(&serverStartWithGPU, \"force-gpu\", \"g\",\n\t\tfalse, \"Start the server with GPU support (ignore the GPU detection)\")\n\tserverStartCmd.Flags().StringVarP(&serverRegistryMirrorName, \"mirror-name\", \"\",\n\t\t\"docker.io\", \"Mirror domain name of the registry\")\n\tserverStartCmd.Flags().StringArrayVarP(&serverRegistryMirrorEndpoints, \"mirror-endpoints\", \"\",\n\t\t[]string{}, \"Mirror URL endpoints of the registry like `https://quay.io`\")\n\tserverStartCmd.Flags().BoolVarP(&enableModelZCloud, \"modelzcloud-enabled\", \"\",\n\t\tfalse, \"Enable ModelZ Cloud Management\")\n\tserverStartCmd.Flags().StringVarP(&modelzCloudUrl, \"modelzcloud-url\", \"\",\n\t\t\"https://cloud.modelz.ai\", \"ModelZ Cloud URL\")\n\tserverStartCmd.Flags().StringVarP(&modelzCloudAgentToken, \"modelzcloud-agent-token\", \"\",\n\t\t\"\", \"ModelZ Cloud Agent Token\")\n\tserverStartCmd.Flags().StringVarP(&modelzCloudRegion, \"modelzcloud-region\", \"\",\n\t\t\"on-premises\", \"ModelZ Cloud Region\")\n}\n\nfunc preRunE(cmd *cobra.Command, args []string) error {\n\terr := commandInitLog(cmd, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If enabled modelzcloud control plane, you need make configuration\n\tif enableModelZCloud {\n\t\tif modelzCloudUrl == \"\" || modelzCloudAgentToken == \"\" || modelzCloudRegion == \"\" {\n\t\t\treturn fmt.Errorf(\"modelzcloud configuration is not complete\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc commandServerStart(cmd *cobra.Command, args []string) error {\n\tvar domain *string\n\tif len(args) > 0 {\n\t\tdomainWithSuffix := fmt.Sprintf(\"%s.%s\", args[0], serverStartDomain)\n\t\tdomain = &domainWithSuffix\n\t}\n\tdefer func(start time.Time) {\n\t\ttelemetry.GetTelemetry().Record(\n\t\t\t\"server start\",\n\t\t\ttelemetry.AddField(\"duration\", time.Since(start).Seconds()),\n\t\t)\n\t}(time.Now())\n\tengine, err := server.NewStart(server.Options{\n\t\tVerbose:       serverVerbose,\n\t\tRuntime:       server.Runtime(serverStartRuntime),\n\t\tOutputStream:  cmd.ErrOrStderr(),\n\t\tRetryInternal: serverPollingInterval,\n\t\tDomain:        domain,\n\t\tVersion:       serverStartVersion,\n\t\tForceGPU:      serverStartWithGPU,\n\t\tMirror: server.Mirror{\n\t\t\tName:      serverRegistryMirrorName,\n\t\t\tEndpoints: serverRegistryMirrorEndpoints,\n\t\t},\n\t\tModelZCloud: server.ModelZCloud{\n\t\t\tEnabled: enableModelZCloud,\n\t\t\tURL:     modelzCloudUrl,\n\t\t\tToken:   modelzCloudAgentToken,\n\t\t\tRegion:  modelzCloudRegion,\n\t\t},\n\t})\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to start the server: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\n\tresult, err := engine.Run()\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to start the server: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\tmdzURL = result.MDZURL\n\tif err := commandInit(cmd, args); err != nil {\n\t\tcmd.PrintErrf(\"Failed to start the server: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\n\tcmd.Printf(\"🐋 Checking if the server is running...\\n\")\n\t// Retry until verify success.\n\tticker := time.NewTicker(serverPollingInterval)\n\tfor range ticker.C {\n\t\tif err := printServerVersion(cmd); err != nil {\n\t\t\tcmd.Printf(\"🐋 The server is not ready yet, retrying...\\n\")\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tcmd.Printf(\"🐳 The server is running at %s\\n\", mdzURL)\n\tcmd.Printf(\"🎉 You could set the environment variable to get started!\\n\\n\")\n\tcmd.Printf(\"export MDZ_URL=%s\\n\", mdzURL)\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/server_stop.go",
    "content": "package cmd\n\nimport (\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/server\"\n)\n\n// serverStopCmd represents the server stop command\nvar serverStopCmd = &cobra.Command{\n\tUse:     \"stop\",\n\tShort:   \"Stop the server\",\n\tLong:    `Stop the server`,\n\tExample: `  mdz server stop`,\n\tPreRunE: commandInitLog,\n\tRunE:    commandServerStop,\n}\n\nfunc init() {\n\tserverCmd.AddCommand(serverStopCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n}\n\nfunc commandServerStop(cmd *cobra.Command, args []string) error {\n\tengine, err := server.NewStop(server.Options{\n\t\tVerbose:       serverVerbose,\n\t\tOutputStream:  cmd.ErrOrStderr(),\n\t\tRetryInternal: serverPollingInterval,\n\t})\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to stop the server: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\n\t_, err = engine.Run()\n\tif err != nil {\n\t\tcmd.PrintErrf(\"Failed to stop the server: %s\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\tcmd.Printf(\"✅ Server stopped\\n\")\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/streams/in.go",
    "content": "package streams\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com/moby/term\"\n)\n\n// In is an input stream to read user input. It implements [io.ReadCloser]\n// with additional utilities, such as putting the terminal in raw mode.\ntype In struct {\n\tcommonStream\n\tin io.ReadCloser\n}\n\n// Read implements the [io.Reader] interface.\nfunc (i *In) Read(p []byte) (int, error) {\n\treturn i.in.Read(p)\n}\n\n// Close implements the [io.Closer] interface.\nfunc (i *In) Close() error {\n\treturn i.in.Close()\n}\n\n// SetRawTerminal sets raw mode on the input terminal. It is a no-op if In\n// is not a TTY, or if the \"NORAW\" environment variable is set to a non-empty\n// value.\nfunc (i *In) SetRawTerminal() (err error) {\n\tif !i.isTerminal || os.Getenv(\"NORAW\") != \"\" {\n\t\treturn nil\n\t}\n\ti.state, err = term.SetRawTerminal(i.fd)\n\treturn err\n}\n\n// CheckTty checks if we are trying to attach to a container TTY\n// from a non-TTY client input stream, and if so, returns an error.\nfunc (i *In) CheckTty(attachStdin, ttyMode bool) error {\n\t// In order to attach to a container tty, input stream for the client must\n\t// be a tty itself: redirecting or piping the client standard input is\n\t// incompatible with `docker run -t`, `docker exec -t` or `docker attach`.\n\tif ttyMode && attachStdin && !i.isTerminal {\n\t\tconst eText = \"the input device is not a TTY\"\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\treturn errors.New(eText + \".  If you are using mintty, try prefixing the command with 'winpty'\")\n\t\t}\n\t\treturn errors.New(eText)\n\t}\n\treturn nil\n}\n\n// NewIn returns a new [In] from an [io.ReadCloser].\nfunc NewIn(in io.ReadCloser) *In {\n\ti := &In{in: in}\n\ti.fd, i.isTerminal = term.GetFdInfo(in)\n\treturn i\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/streams/out.go",
    "content": "package streams\n\nimport (\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/moby/term\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// Out is an output stream to write normal program output. It implements\n// an [io.Writer], with additional utilities for detecting whether a terminal\n// is connected, getting the TTY size, and putting the terminal in raw mode.\ntype Out struct {\n\tcommonStream\n\tout io.Writer\n}\n\nfunc (o *Out) Write(p []byte) (int, error) {\n\treturn o.out.Write(p)\n}\n\n// SetRawTerminal puts the output of the terminal connected to the stream\n// into raw mode.\n//\n// On UNIX, this does nothing. On Windows, it disables LF -> CRLF/ translation.\n// It is a no-op if Out is not a TTY, or if the \"NORAW\" environment variable is\n// set to a non-empty value.\nfunc (o *Out) SetRawTerminal() (err error) {\n\tif !o.isTerminal || os.Getenv(\"NORAW\") != \"\" {\n\t\treturn nil\n\t}\n\to.state, err = term.SetRawTerminalOutput(o.fd)\n\treturn err\n}\n\n// GetTtySize returns the height and width in characters of the TTY, or\n// zero for both if no TTY is connected.\nfunc (o *Out) GetTtySize() (height uint, width uint) {\n\tif !o.isTerminal {\n\t\treturn 0, 0\n\t}\n\tws, err := term.GetWinsize(o.fd)\n\tif err != nil {\n\t\tlogrus.WithError(err).Debug(\"Error getting TTY size\")\n\t\tif ws == nil {\n\t\t\treturn 0, 0\n\t\t}\n\t}\n\treturn uint(ws.Height), uint(ws.Width)\n}\n\n// NewOut returns a new [Out] from an [io.Writer].\nfunc NewOut(out io.Writer) *Out {\n\to := &Out{out: out}\n\to.fd, o.isTerminal = term.GetFdInfo(out)\n\treturn o\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/streams/stream.go",
    "content": "package streams\n\nimport (\n\t\"github.com/moby/term\"\n)\n\ntype commonStream struct {\n\tfd         uintptr\n\tisTerminal bool\n\tstate      *term.State\n}\n\n// FD returns the file descriptor number for this stream.\nfunc (s *commonStream) FD() uintptr {\n\treturn s.fd\n}\n\n// IsTerminal returns true if this stream is connected to a terminal.\nfunc (s *commonStream) IsTerminal() bool {\n\treturn s.isTerminal\n}\n\n// RestoreTerminal restores normal mode to the terminal.\nfunc (s *commonStream) RestoreTerminal() {\n\tif s.state != nil {\n\t\t_ = term.RestoreTerminal(s.fd, s.state)\n\t}\n}\n\n// SetIsTerminal overrides whether a terminal is connected. It is used to\n// override this property in unit-tests, and should not be depended on for\n// other purposes.\nfunc (s *commonStream) SetIsTerminal(isTerminal bool) {\n\ts.isTerminal = isTerminal\n}\n"
  },
  {
    "path": "mdz/pkg/cmd/version.go",
    "content": "package cmd\n\nimport (\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/version\"\n)\n\n// versionCmd represents the versionCmd\nvar versionCmd = &cobra.Command{\n\tUse:     \"version\",\n\tShort:   \"Print the client and agent version information\",\n\tLong:    `Print the client and server version information`,\n\tExample: `  mdz version`,\n\tPreRunE: commandInit,\n\tRunE:    commandVersion,\n}\n\nfunc init() {\n\trootCmd.AddCommand(versionCmd)\n\n\t// Here you will define your flags and configuration settings.\n\n\t// Cobra supports Persistent Flags which will work for this command\n\t// and all subcommands, e.g.:\n\n\t// Cobra supports local flags which will only run when this command\n\t// is called directly, e.g.:\n}\n\nfunc commandVersion(cmd *cobra.Command, args []string) error {\n\tv := version.GetVersion()\n\tcmd.Println(\"Client:\")\n\tcmd.Printf(\" Version: \\t%s\\n\", v.Version)\n\tcmd.Printf(\" Build Date: \\t%s\\n\", v.BuildDate)\n\tcmd.Printf(\" Git Commit: \\t%s\\n\", v.GitCommit)\n\tcmd.Printf(\" Git State: \\t%s\\n\", v.GitTreeState)\n\tcmd.Printf(\" Go Version: \\t%s\\n\", v.GoVersion)\n\tcmd.Printf(\" Compiler: \\t%s\\n\", v.Compiler)\n\tcmd.Printf(\" Platform: \\t%s\\n\", v.Platform)\n\n\tif err := printServerVersion(cmd); err != nil {\n\t\tcmd.PrintErrf(\"Failed to get server version: %v\\n\", errors.Cause(err))\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc printServerVersion(cmd *cobra.Command) error {\n\tinfo, err := agentClient.InfoGet(cmd.Context())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Println(\"Server:\")\n\tcmd.Printf(\" Version: \\t%s\\n\", info.Version.Version)\n\tcmd.Printf(\" Build Date: \\t%s\\n\", info.Version.BuildDate)\n\tcmd.Printf(\" Git Commit: \\t%s\\n\", info.Version.GitCommit)\n\tcmd.Printf(\" Git State: \\t%s\\n\", info.Version.GitTreeState)\n\tcmd.Printf(\" Go Version: \\t%s\\n\", info.Version.GoVersion)\n\tcmd.Printf(\" Compiler: \\t%s\\n\", info.Version.Compiler)\n\tcmd.Printf(\" Platform: \\t%s\\n\", info.Version.Platform)\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/server/agentd_run.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"os/exec\"\n\t\"syscall\"\n)\n\ntype agentDRunStep struct {\n\toptions Options\n}\n\n// TODO(gaocegege): There is still a bug, thus it cannot be used actually.\n// The process will exit after the command returns. We need to put it in systemd.\nfunc (s *agentDRunStep) Run() error {\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Running the agent for docker runtime...\\n\")\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"mdz local-agent &\")\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tPdeathsig: syscall.SIGKILL,\n\t}\n\tif s.options.Verbose {\n\t\tcmd.Stderr = s.options.OutputStream\n\t\tcmd.Stdout = s.options.OutputStream\n\t} else {\n\t\tcmd.Stdout = nil\n\t\tcmd.Stderr = nil\n\t}\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *agentDRunStep) Verify() error {\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/server/engine.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n)\n\nconst (\n\tAgentPort = 31112\n)\n\ntype Options struct {\n\tVerbose       bool\n\tOutputStream  io.Writer\n\tRuntime       Runtime\n\tMirror        Mirror\n\tRetryInternal time.Duration\n\tServerIP      string\n\tDomain        *string\n\tVersion       string\n\tForceGPU      bool\n\tModelZCloud   ModelZCloud\n}\n\ntype ModelZCloud struct {\n\tEnabled bool\n\tURL     string\n\tToken   string\n\tRegion  string\n}\n\ntype Mirror struct {\n\tName      string\n\tEndpoints []string\n}\n\nfunc (m *Mirror) Configured() bool {\n\treturn m.Name != \"\" && len(m.Endpoints) > 0\n}\n\ntype Runtime string\n\nvar (\n\tRuntimeK3s    Runtime = \"k3s\"\n\tRuntimeDocker Runtime = \"docker\"\n)\n\ntype Engine struct {\n\toptions Options\n\tSteps   []Step\n}\n\ntype Result struct {\n\tMDZURL string\n}\n\nfunc NewStart(o Options) (*Engine, error) {\n\tif o.Verbose {\n\t\tfmt.Fprintf(o.OutputStream, \"Starting the server with config: %+v\\n\", o)\n\t}\n\tvar engine *Engine\n\tswitch o.Runtime {\n\tcase RuntimeDocker:\n\t\tengine = &Engine{\n\t\t\toptions: o,\n\t\t\tSteps: []Step{\n\t\t\t\t&agentDRunStep{\n\t\t\t\t\toptions: o,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\tdefault:\n\t\tengine = &Engine{\n\t\t\toptions: o,\n\t\t\tSteps: []Step{\n\t\t\t\t// Install k3s and related tools.\n\t\t\t\t&k3sPrepare{\n\t\t\t\t\toptions: o,\n\t\t\t\t},\n\t\t\t\t&k3sInstallStep{\n\t\t\t\t\toptions: o,\n\t\t\t\t},\n\t\t\t\t&nginxInstallStep{\n\t\t\t\t\toptions: o,\n\t\t\t\t},\n\t\t\t\t&gpuInstallStep{\n\t\t\t\t\toptions: o,\n\t\t\t\t},\n\t\t\t\t&openModelZInstallStep{\n\t\t\t\t\toptions: o,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\treturn engine, nil\n}\n\nfunc NewStop(o Options) (*Engine, error) {\n\treturn &Engine{\n\t\toptions: o,\n\t\tSteps: []Step{\n\t\t\t// Kill all k3s and related tools.\n\t\t\t&k3sKillAllStep{\n\t\t\t\toptions: o,\n\t\t\t},\n\t\t},\n\t}, nil\n}\n\nfunc NewDestroy(o Options) (*Engine, error) {\n\treturn &Engine{\n\t\toptions: o,\n\t\tSteps: []Step{\n\t\t\t// Destroy all k3s and related tools.\n\t\t\t&k3sDestroyAllStep{\n\t\t\t\toptions: o,\n\t\t\t},\n\t\t},\n\t}, nil\n}\n\nfunc NewJoin(o Options) (*Engine, error) {\n\treturn &Engine{\n\t\toptions: o,\n\t\tSteps: []Step{\n\t\t\t&k3sJoinStep{\n\t\t\t\toptions: o,\n\t\t\t},\n\t\t},\n\t}, nil\n}\n\ntype Step interface {\n\tRun() error\n\tVerify() error\n}\n\nfunc (e *Engine) Run() (*Result, error) {\n\tfor _, step := range e.Steps {\n\t\tif err := step.Run(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Retry until verify success.\n\t\tticker := time.NewTicker(e.options.RetryInternal)\n\t\tfor range ticker.C {\n\t\t\tif err := step.Verify(); err == nil {\n\t\t\t\tticker.Stop()\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif e.options.Domain != nil {\n\t\treturn &Result{\n\t\t\tMDZURL: fmt.Sprintf(\"http://%s\", *e.options.Domain),\n\t\t}, nil\n\t}\n\t// Get the server IP.\n\tif resultDomain != \"\" {\n\t\treturn &Result{\n\t\t\tMDZURL: fmt.Sprintf(\"http://%s\", resultDomain),\n\t\t}, nil\n\t}\n\treturn &Result{\n\t\tMDZURL: fmt.Sprintf(\"http://0.0.0.0:%d\", AgentPort),\n\t}, nil\n}\n"
  },
  {
    "path": "mdz/pkg/server/gpu-resource.yaml",
    "content": "apiVersion: v1\nkind: Namespace\nmetadata:\n  name: gpu-operator\n---\napiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n  name: nvidia\n  namespace: gpu-operator\nspec:\n  chart: gpu-operator\n  repo: https://helm.ngc.nvidia.com/nvidia\n  targetNamespace: gpu-operator\n  set:\n  valuesContent: |-\n    toolkit:\n      env:\n      - name: CONTAINERD_CONFIG\n        value: /var/lib/rancher/k3s/agent/etc/containerd/config.toml\n      - name: CONTAINERD_SOCKET\n        value: /run/k3s/containerd/containerd.sock\n"
  },
  {
    "path": "mdz/pkg/server/gpu_install.go",
    "content": "package server\n\nimport (\n\t_ \"embed\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"syscall\"\n)\n\n//go:embed gpu-resource.yaml\nvar gpuYamlContent string\n\n// gpuInstallStep installs the GPU related resources.\ntype gpuInstallStep struct {\n\toptions Options\n}\n\n// check if the Nvidia Toolkit is installed on the host\nfunc (s *gpuInstallStep) hasNvidiaToolkit() bool {\n\tlocations := []string{\n\t\t\"/usr/local/nvidia/toolkit\",\n\t\t\"/usr/bin\",\n\t}\n\tbinaryNames := []string{\n\t\t\"nvidia-container-runtime\",\n\t\t\"nvidia-container-runtime-experimental\",\n\t}\n\tfor _, location := range locations {\n\t\tfor _, name := range binaryNames {\n\t\t\tpath := filepath.Join(location, name)\n\t\t\tif _, err := os.Stat(path); err == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (s *gpuInstallStep) hasNvidiaDevice() bool {\n\toutput, err := exec.Command(\"/bin/sh\", \"-c\", \"lspci\").Output()\n\tif err != nil {\n\t\treturn false\n\t}\n\tregexNvidia := regexp.MustCompile(\"(?i)nvidia\")\n\treturn regexNvidia.Match(output)\n}\n\nfunc (s *gpuInstallStep) Run() error {\n\tif !s.options.ForceGPU {\n\t\t// detect GPU\n\t\tif !(s.hasNvidiaDevice() || s.hasNvidiaToolkit()) {\n\t\t\tfmt.Fprintf(s.options.OutputStream, \"🚧 Nvidia Toolkit is missing, skip the GPU initialization.\\n\")\n\t\t\treturn nil\n\t\t}\n\t}\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Initializing the GPU resource...\\n\")\n\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"sudo k3s kubectl apply -f -\")\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tPdeathsig: syscall.SIGKILL,\n\t}\n\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif s.options.Verbose {\n\t\tcmd.Stderr = s.options.OutputStream\n\t\tcmd.Stdout = s.options.OutputStream\n\t} else {\n\t\tcmd.Stdout = nil\n\t\tcmd.Stderr = nil\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\tif _, err := io.WriteString(stdin, gpuYamlContent); err != nil {\n\t\treturn err\n\t}\n\t// Close the input stream to finish the pipe. Then the command will use the\n\t// input from the pipe to start the next process.\n\tstdin.Close()\n\n\tif err := cmd.Wait(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *gpuInstallStep) Verify() error {\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/server/k3s-install.sh",
    "content": "#!/bin/sh\nset -e\nset -o noglob\n\n# Usage:\n#   curl ... | ENV_VAR=... sh -\n#       or\n#   ENV_VAR=... ./install.sh\n#\n# Example:\n#   Installing a server without traefik:\n#     curl ... | INSTALL_K3S_EXEC=\"--disable=traefik\" sh -\n#   Installing an agent to point at a server:\n#     curl ... | K3S_TOKEN=xxx K3S_URL=https://server-url:6443 sh -\n#\n# Environment variables:\n#   - K3S_*\n#     Environment variables which begin with K3S_ will be preserved for the\n#     systemd service to use. Setting K3S_URL without explicitly setting\n#     a systemd exec command will default the command to \"agent\", and we\n#     enforce that K3S_TOKEN is also set.\n#\n#   - INSTALL_K3S_SKIP_DOWNLOAD\n#     If set to true will not download k3s hash or binary.\n#\n#   - INSTALL_K3S_FORCE_RESTART\n#     If set to true will always restart the K3s service\n#\n#   - INSTALL_K3S_SYMLINK\n#     If set to 'skip' will not create symlinks, 'force' will overwrite,\n#     default will symlink if command does not exist in path.\n#\n#   - INSTALL_K3S_SKIP_ENABLE\n#     If set to true will not enable or start k3s service.\n#\n#   - INSTALL_K3S_SKIP_START\n#     If set to true will not start k3s service.\n#\n#   - INSTALL_K3S_VERSION\n#     Version of k3s to download from github. Will attempt to download from the\n#     stable channel if not specified.\n#\n#   - INSTALL_K3S_COMMIT\n#     Commit of k3s to download from temporary cloud storage.\n#     * (for developer & QA use)\n#\n#   - INSTALL_K3S_BIN_DIR\n#     Directory to install k3s binary, links, and uninstall script to, or use\n#     /usr/local/bin as the default\n#\n#   - INSTALL_K3S_BIN_DIR_READ_ONLY\n#     If set to true will not write files to INSTALL_K3S_BIN_DIR, forces\n#     setting INSTALL_K3S_SKIP_DOWNLOAD=true\n#\n#   - INSTALL_K3S_SYSTEMD_DIR\n#     Directory to install systemd service and environment files to, or use\n#     /etc/systemd/system as the default\n#\n#   - INSTALL_K3S_EXEC or script arguments\n#     Command with flags to use for launching k3s in the systemd service, if\n#     the command is not specified will default to \"agent\" if K3S_URL is set\n#     or \"server\" if not. The final systemd command resolves to a combination\n#     of EXEC and script args ($@).\n#\n#     The following commands result in the same behavior:\n#       curl ... | INSTALL_K3S_EXEC=\"--disable=traefik\" sh -s -\n#       curl ... | INSTALL_K3S_EXEC=\"server --disable=traefik\" sh -s -\n#       curl ... | INSTALL_K3S_EXEC=\"server\" sh -s - --disable=traefik\n#       curl ... | sh -s - server --disable=traefik\n#       curl ... | sh -s - --disable=traefik\n#\n#   - INSTALL_K3S_NAME\n#     Name of systemd service to create, will default from the k3s exec command\n#     if not specified. If specified the name will be prefixed with 'k3s-'.\n#\n#   - INSTALL_K3S_TYPE\n#     Type of systemd service to create, will default from the k3s exec command\n#     if not specified.\n#\n#   - INSTALL_K3S_SELINUX_WARN\n#     If set to true will continue if k3s-selinux policy is not found.\n#\n#   - INSTALL_K3S_SKIP_SELINUX_RPM\n#     If set to true will skip automatic installation of the k3s RPM.\n#\n#   - INSTALL_K3S_CHANNEL_URL\n#     Channel URL for fetching k3s download URL.\n#     Defaults to 'https://update.k3s.io/v1-release/channels'.\n#\n#   - INSTALL_K3S_CHANNEL\n#     Channel to use for fetching k3s download URL.\n#     Defaults to 'stable'.\n\nGITHUB_URL=https://github.com/k3s-io/k3s/releases\nSTORAGE_URL=https://k3s-ci-builds.s3.amazonaws.com\nDOWNLOADER=\n\n# --- helper functions for logs ---\ninfo()\n{\n    echo '[INFO] ' \"$@\"\n}\nwarn()\n{\n    echo '[WARN] ' \"$@\" >&2\n}\nfatal()\n{\n    echo '[ERROR] ' \"$@\" >&2\n    exit 1\n}\n\n# --- fatal if no systemd or openrc ---\nverify_system() {\n    if [ -x /sbin/openrc-run ]; then\n        HAS_OPENRC=true\n        return\n    fi\n    if [ -x /bin/systemctl ] || type systemctl > /dev/null 2>&1; then\n        HAS_SYSTEMD=true\n        return\n    fi\n    fatal 'Can not find systemd or openrc to use as a process supervisor for k3s'\n}\n\n# --- add quotes to command arguments ---\nquote() {\n    for arg in \"$@\"; do\n        printf '%s\\n' \"$arg\" | sed \"s/'/'\\\\\\\\''/g;1s/^/'/;\\$s/\\$/'/\"\n    done\n}\n\n# --- add indentation and trailing slash to quoted args ---\nquote_indent() {\n    printf ' \\\\\\n'\n    for arg in \"$@\"; do\n        printf '\\t%s \\\\\\n' \"$(quote \"$arg\")\"\n    done\n}\n\n# --- escape most punctuation characters, except quotes, forward slash, and space ---\nescape() {\n    printf '%s' \"$@\" | sed -e 's/\\([][!#$%&()*;<=>?\\_`{|}]\\)/\\\\\\1/g;'\n}\n\n# --- escape double quotes ---\nescape_dq() {\n    printf '%s' \"$@\" | sed -e 's/\"/\\\\\"/g'\n}\n\n# --- ensures $K3S_URL is empty or begins with https://, exiting fatally otherwise ---\nverify_k3s_url() {\n    case \"${K3S_URL}\" in\n        \"\")\n            ;;\n        https://*)\n            ;;\n        *)\n            fatal \"Only https:// URLs are supported for K3S_URL (have ${K3S_URL})\"\n            ;;\n    esac\n}\n\n# --- define needed environment variables ---\nsetup_env() {\n    # --- use command args if passed or create default ---\n    case \"$1\" in\n        # --- if we only have flags discover if command should be server or agent ---\n        (-*|\"\")\n            if [ -z \"${K3S_URL}\" ]; then\n                CMD_K3S=server\n            else\n                if [ -z \"${K3S_TOKEN}\" ] && [ -z \"${K3S_TOKEN_FILE}\" ]; then\n                    fatal \"Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN or K3S_TOKEN_FILE is not defined.\"\n                fi\n                CMD_K3S=agent\n            fi\n        ;;\n        # --- command is provided ---\n        (*)\n            CMD_K3S=$1\n            shift\n        ;;\n    esac\n\n    verify_k3s_url\n\n    CMD_K3S_EXEC=\"${CMD_K3S}$(quote_indent \"$@\")\"\n\n    # --- use systemd name if defined or create default ---\n    if [ -n \"${INSTALL_K3S_NAME}\" ]; then\n        SYSTEM_NAME=k3s-${INSTALL_K3S_NAME}\n    else\n        if [ \"${CMD_K3S}\" = server ]; then\n            SYSTEM_NAME=k3s\n        else\n            SYSTEM_NAME=k3s-${CMD_K3S}\n        fi\n    fi\n\n    # --- check for invalid characters in system name ---\n    valid_chars=$(printf '%s' \"${SYSTEM_NAME}\" | sed -e 's/[][!#$%&()*;<=>?\\_`{|}/[:space:]]/^/g;' )\n    if [ \"${SYSTEM_NAME}\" != \"${valid_chars}\"  ]; then\n        invalid_chars=$(printf '%s' \"${valid_chars}\" | sed -e 's/[^^]/ /g')\n        fatal \"Invalid characters for system name:\n            ${SYSTEM_NAME}\n            ${invalid_chars}\"\n    fi\n\n    # --- use sudo if we are not already root ---\n    SUDO=sudo\n    if [ $(id -u) -eq 0 ]; then\n        SUDO=\n    fi\n\n    # --- use systemd type if defined or create default ---\n    if [ -n \"${INSTALL_K3S_TYPE}\" ]; then\n        SYSTEMD_TYPE=${INSTALL_K3S_TYPE}\n    else\n        SYSTEMD_TYPE=notify\n    fi\n\n    # --- use binary install directory if defined or create default ---\n    if [ -n \"${INSTALL_K3S_BIN_DIR}\" ]; then\n        BIN_DIR=${INSTALL_K3S_BIN_DIR}\n    else\n        # --- use /usr/local/bin if root can write to it, otherwise use /opt/bin if it exists\n        BIN_DIR=/usr/local/bin\n        if ! $SUDO sh -c \"touch ${BIN_DIR}/k3s-ro-test && rm -rf ${BIN_DIR}/k3s-ro-test\"; then\n            if [ -d /opt/bin ]; then\n                BIN_DIR=/opt/bin\n            fi\n        fi\n    fi\n\n    # --- use systemd directory if defined or create default ---\n    if [ -n \"${INSTALL_K3S_SYSTEMD_DIR}\" ]; then\n        SYSTEMD_DIR=\"${INSTALL_K3S_SYSTEMD_DIR}\"\n    else\n        SYSTEMD_DIR=/etc/systemd/system\n    fi\n\n    # --- set related files from system name ---\n    SERVICE_K3S=${SYSTEM_NAME}.service\n    UNINSTALL_K3S_SH=${UNINSTALL_K3S_SH:-${BIN_DIR}/${SYSTEM_NAME}-uninstall.sh}\n    KILLALL_K3S_SH=${KILLALL_K3S_SH:-${BIN_DIR}/k3s-killall.sh}\n\n    # --- use service or environment location depending on systemd/openrc ---\n    if [ \"${HAS_SYSTEMD}\" = true ]; then\n        FILE_K3S_SERVICE=${SYSTEMD_DIR}/${SERVICE_K3S}\n        FILE_K3S_ENV=${SYSTEMD_DIR}/${SERVICE_K3S}.env\n    elif [ \"${HAS_OPENRC}\" = true ]; then\n        $SUDO mkdir -p /etc/rancher/k3s\n        FILE_K3S_SERVICE=/etc/init.d/${SYSTEM_NAME}\n        FILE_K3S_ENV=/etc/rancher/k3s/${SYSTEM_NAME}.env\n    fi\n\n    # --- get hash of config & exec for currently installed k3s ---\n    PRE_INSTALL_HASHES=$(get_installed_hashes)\n\n    # --- if bin directory is read only skip download ---\n    if [ \"${INSTALL_K3S_BIN_DIR_READ_ONLY}\" = true ]; then\n        INSTALL_K3S_SKIP_DOWNLOAD=true\n    fi\n\n    # --- setup channel values\n    INSTALL_K3S_CHANNEL_URL=${INSTALL_K3S_CHANNEL_URL:-'https://update.k3s.io/v1-release/channels'}\n    INSTALL_K3S_CHANNEL=${INSTALL_K3S_CHANNEL:-'stable'}\n}\n\n# --- check if skip download environment variable set ---\ncan_skip_download_binary() {\n    if [ \"${INSTALL_K3S_SKIP_DOWNLOAD}\" != true ] && [ \"${INSTALL_K3S_SKIP_DOWNLOAD}\" != binary ]; then\n        return 1\n    fi\n}\n\ncan_skip_download_selinux() {                                                        \n    if [ \"${INSTALL_K3S_SKIP_DOWNLOAD}\" != true ] && [ \"${INSTALL_K3S_SKIP_DOWNLOAD}\" != selinux ]; then \n        return 1                                                                     \n    fi                                                                               \n}  \n\n# --- verify an executable k3s binary is installed ---\nverify_k3s_is_executable() {\n    if [ ! -x ${BIN_DIR}/k3s ]; then\n        fatal \"Executable k3s binary not found at ${BIN_DIR}/k3s\"\n    fi\n}\n\n# --- set arch and suffix, fatal if architecture not supported ---\nsetup_verify_arch() {\n    if [ -z \"$ARCH\" ]; then\n        ARCH=$(uname -m)\n    fi\n    case $ARCH in\n        amd64)\n            ARCH=amd64\n            SUFFIX=\n            ;;\n        x86_64)\n            ARCH=amd64\n            SUFFIX=\n            ;;\n        arm64)\n            ARCH=arm64\n            SUFFIX=-${ARCH}\n            ;;\n        s390x)\n            ARCH=s390x\n            SUFFIX=-${ARCH}\n            ;;\n        aarch64)\n            ARCH=arm64\n            SUFFIX=-${ARCH}\n            ;;\n        arm*)\n            ARCH=arm\n            SUFFIX=-${ARCH}hf\n            ;;\n        *)\n            fatal \"Unsupported architecture $ARCH\"\n    esac\n}\n\n# --- verify existence of network downloader executable ---\nverify_downloader() {\n    # Return failure if it doesn't exist or is no executable\n    [ -x \"$(command -v $1)\" ] || return 1\n\n    # Set verified executable as our downloader program and return success\n    DOWNLOADER=$1\n    return 0\n}\n\n# --- create temporary directory and cleanup when done ---\nsetup_tmp() {\n    TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX)\n    TMP_HASH=${TMP_DIR}/k3s.hash\n    TMP_BIN=${TMP_DIR}/k3s.bin\n    cleanup() {\n        code=$?\n        set +e\n        trap - EXIT\n        rm -rf ${TMP_DIR}\n        exit $code\n    }\n    trap cleanup INT EXIT\n}\n\n# --- use desired k3s version if defined or find version from channel ---\nget_release_version() {\n    if [ -n \"${INSTALL_K3S_COMMIT}\" ]; then\n        VERSION_K3S=\"commit ${INSTALL_K3S_COMMIT}\"\n    elif [ -n \"${INSTALL_K3S_VERSION}\" ]; then\n        VERSION_K3S=${INSTALL_K3S_VERSION}\n    else\n        info \"Finding release for channel ${INSTALL_K3S_CHANNEL}\"\n        version_url=\"${INSTALL_K3S_CHANNEL_URL}/${INSTALL_K3S_CHANNEL}\"\n        case $DOWNLOADER in\n            curl)\n                VERSION_K3S=$(curl -w '%{url_effective}' -L -s -S ${version_url} -o /dev/null | sed -e 's|.*/||')\n                ;;\n            wget)\n                VERSION_K3S=$(wget -SqO /dev/null ${version_url} 2>&1 | grep -i Location | sed -e 's|.*/||')\n                ;;\n            *)\n                fatal \"Incorrect downloader executable '$DOWNLOADER'\"\n                ;;\n        esac\n    fi\n    info \"Using ${VERSION_K3S} as release\"\n}\n\n# --- get k3s-selinux version ---\nget_k3s_selinux_version() {\n    available_version=\"k3s-selinux-1.2-2.${rpm_target}.noarch.rpm\"\n    info \"Finding available k3s-selinux versions\"\n    \n    # run verify_downloader in case it binary installation was skipped\n    verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files'\n\n    case $DOWNLOADER in\n        curl)\n            DOWNLOADER_OPTS=\"-s\"\n            ;;\n        wget)\n            DOWNLOADER_OPTS=\"-q -O -\"\n            ;;\n        *)\n            fatal \"Incorrect downloader executable '$DOWNLOADER'\"\n            ;;\n    esac\n    for i in {1..3}; do\n        set +e\n        if [ \"${rpm_channel}\" = \"testing\" ]; then\n            version=$(timeout 5 ${DOWNLOADER} ${DOWNLOADER_OPTS} https://api.github.com/repos/k3s-io/k3s-selinux/releases |  grep browser_download_url | awk '{ print $2 }' | grep -oE \"[^\\/]+${rpm_target}\\.noarch\\.rpm\" | head -n 1)\n        else\n            version=$(timeout 5 ${DOWNLOADER} ${DOWNLOADER_OPTS} https://api.github.com/repos/k3s-io/k3s-selinux/releases/latest |  grep browser_download_url | awk '{ print $2 }' | grep -oE \"[^\\/]+${rpm_target}\\.noarch\\.rpm\")\n        fi\n        set -e\n        if [ \"${version}\" != \"\" ]; then\n            break\n        fi\n        sleep 1\n    done\n    if [ \"${version}\" == \"\" ]; then\n        warn \"Failed to get available versions of k3s-selinux..defaulting to ${available_version}\"\n        return\n    fi\n    available_version=${version}\n}\n\n# --- download from github url ---\ndownload() {\n    [ $# -eq 2 ] || fatal 'download needs exactly 2 arguments'\n\n    case $DOWNLOADER in\n        curl)\n            curl -o $1 -skfL $2\n            ;;\n        wget)\n            wget -qO $1 $2\n            ;;\n        *)\n            fatal \"Incorrect executable '$DOWNLOADER'\"\n            ;;\n    esac\n\n    # Abort if download command failed\n    [ $? -eq 0 ] || fatal 'Download failed'\n}\n\n# --- download hash from github url ---\ndownload_hash() {\n    if [ -n \"${INSTALL_K3S_COMMIT}\" ]; then\n        HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum\n    else\n        HASH_URL=${GITHUB_URL}/download/${VERSION_K3S}/sha256sum-${ARCH}.txt\n    fi\n    info \"Downloading hash ${HASH_URL}\"\n    download ${TMP_HASH} ${HASH_URL}\n    HASH_EXPECTED=$(grep \" k3s${SUFFIX}$\" ${TMP_HASH})\n    HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*}\n}\n\n# --- check hash against installed version ---\ninstalled_hash_matches() {\n    if [ -x ${BIN_DIR}/k3s ]; then\n        HASH_INSTALLED=$(sha256sum ${BIN_DIR}/k3s)\n        HASH_INSTALLED=${HASH_INSTALLED%%[[:blank:]]*}\n        if [ \"${HASH_EXPECTED}\" = \"${HASH_INSTALLED}\" ]; then\n            return\n        fi\n    fi\n    return 1\n}\n\n# --- download binary from github url ---\ndownload_binary() {\n    if [ -n \"${INSTALL_K3S_COMMIT}\" ]; then\n        BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}\n    else\n        BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX}\n    fi\n    info \"Downloading binary ${BIN_URL}\"\n    download ${TMP_BIN} ${BIN_URL}\n}\n\n# --- verify downloaded binary hash ---\nverify_binary() {\n    info \"Verifying binary download\"\n    HASH_BIN=$(sha256sum ${TMP_BIN})\n    HASH_BIN=${HASH_BIN%%[[:blank:]]*}\n    if [ \"${HASH_EXPECTED}\" != \"${HASH_BIN}\" ]; then\n        fatal \"Download sha256 does not match ${HASH_EXPECTED}, got ${HASH_BIN}\"\n    fi\n}\n\n# --- setup permissions and move binary to system directory ---\nsetup_binary() {\n    chmod 755 ${TMP_BIN}\n    info \"Installing k3s to ${BIN_DIR}/k3s\"\n    $SUDO chown root:root ${TMP_BIN}\n    $SUDO mv -f ${TMP_BIN} ${BIN_DIR}/k3s\n}\n\n# --- setup selinux policy ---\nsetup_selinux() {\n    case ${INSTALL_K3S_CHANNEL} in \n        *testing)\n            rpm_channel=testing\n            ;;\n        *latest)\n            rpm_channel=latest\n            ;;\n        *)\n            rpm_channel=stable\n            ;;\n    esac\n\n    rpm_site=\"rpm.rancher.io\"\n    if [ \"${rpm_channel}\" = \"testing\" ]; then\n        rpm_site=\"rpm-testing.rancher.io\"\n    fi\n\n    [ -r /etc/os-release ] && . /etc/os-release\n    if [ `expr \"${ID_LIKE}\" : \".*suse.*\"` != 0 ]; then\n        rpm_target=sle\n        rpm_site_infix=microos\n        package_installer=zypper\n        if [ \"${ID_LIKE:-}\" = suse ] && [ \"${VARIANT_ID:-}\" = sle-micro ]; then\n            rpm_target=sle\n            rpm_site_infix=slemicro\n            package_installer=zypper\n        fi\n    elif [ \"${ID_LIKE:-}\" = coreos ] || [ \"${VARIANT_ID:-}\" = coreos ]; then\n        rpm_target=coreos\n        rpm_site_infix=coreos\n        package_installer=rpm-ostree\n    elif [ \"${VERSION_ID%%.*}\" = \"7\" ]; then\n        rpm_target=el7\n        rpm_site_infix=centos/7\n        package_installer=yum\n    elif [ \"${VERSION_ID%%.*}\" = \"8\" ] || [ \"${VERSION_ID%%.*}\" -gt \"36\" ]; then\n        rpm_target=el8\n        rpm_site_infix=centos/8\n        package_installer=yum\n    else\n        rpm_target=el9\n        rpm_site_infix=centos/9\n        package_installer=yum\n    fi\n\n    if [ \"${package_installer}\" = \"rpm-ostree\" ] && [ -x /bin/yum ]; then\n        package_installer=yum\n    fi\n\n    if [ \"${package_installer}\" = \"yum\" ] && [ -x /usr/bin/dnf ]; then\n        package_installer=dnf\n    fi\n\n    policy_hint=\"please install:\n    ${package_installer} install -y container-selinux\n    ${package_installer} install -y https://${rpm_site}/k3s/${rpm_channel}/common/${rpm_site_infix}/noarch/${available_version}\n\"\n\n    if [ \"$INSTALL_K3S_SKIP_SELINUX_RPM\" = true ] || can_skip_download_selinux || [ ! -d /usr/share/selinux ]; then\n        info \"Skipping installation of SELinux RPM\"\n    else\n        get_k3s_selinux_version\n        install_selinux_rpm ${rpm_site} ${rpm_channel} ${rpm_target} ${rpm_site_infix}\n    fi\n\n    policy_error=fatal\n    if [ \"$INSTALL_K3S_SELINUX_WARN\" = true ] || [ \"${ID_LIKE:-}\" = coreos ] || [ \"${VARIANT_ID:-}\" = coreos ]; then\n        policy_error=warn\n    fi\n\n    if ! $SUDO chcon -u system_u -r object_r -t container_runtime_exec_t ${BIN_DIR}/k3s >/dev/null 2>&1; then\n        if $SUDO grep '^\\s*SELINUX=enforcing' /etc/selinux/config >/dev/null 2>&1; then\n            $policy_error \"Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}\"\n        fi\n    elif [ ! -f /usr/share/selinux/packages/k3s.pp ]; then\n        if [ -x /usr/sbin/transactional-update ] || [ \"${ID_LIKE:-}\" = coreos ] || [ \"${VARIANT_ID:-}\" = coreos ]; then\n            warn \"Please reboot your machine to activate the changes and avoid data loss.\"\n        else\n            $policy_error \"Failed to find the k3s-selinux policy, ${policy_hint}\"\n        fi\n    fi\n}\n\ninstall_selinux_rpm() {\n    if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ] || [ -r /etc/fedora-release ] || [ \"${ID_LIKE%%[ ]*}\" = \"suse\" ]; then\n        repodir=/etc/yum.repos.d\n        if [ -d /etc/zypp/repos.d ]; then\n            repodir=/etc/zypp/repos.d\n        fi\n        set +o noglob\n        $SUDO rm -f ${repodir}/rancher-k3s-common*.repo\n        set -o noglob\n        if [ -r /etc/redhat-release ] && [ \"${3}\" = \"el7\" ]; then\n            $SUDO yum install -y yum-utils\n            $SUDO yum-config-manager --enable rhel-7-server-extras-rpms\n        fi\n        $SUDO tee ${repodir}/rancher-k3s-common.repo >/dev/null << EOF\n[rancher-k3s-common-${2}]\nname=Rancher K3s Common (${2})\nbaseurl=https://${1}/k3s/${2}/common/${4}/noarch\nenabled=1\ngpgcheck=1\nrepo_gpgcheck=0\ngpgkey=https://${1}/public.key\nEOF\n        case ${3} in\n        sle)\n            rpm_installer=\"zypper --gpg-auto-import-keys\"\n            if [ \"${TRANSACTIONAL_UPDATE=false}\" != \"true\" ] && [ -x /usr/sbin/transactional-update ]; then\n                transactional_update_run=\"transactional-update --no-selfupdate -d run\"\n                rpm_installer=\"transactional-update --no-selfupdate -d run ${rpm_installer}\"\n                : \"${INSTALL_K3S_SKIP_START:=true}\"\n            fi\n            # create the /var/lib/rpm-state in SLE systems to fix the prein selinux macro\n            ${transactional_update_run} mkdir -p /var/lib/rpm-state\n            ;;\n        coreos)\n            rpm_installer=\"rpm-ostree\"\n            # rpm_install_extra_args=\"--apply-live\"\n            : \"${INSTALL_K3S_SKIP_START:=true}\"\n            ;;\n        *)\n            rpm_installer=\"yum\"\n            ;;\n        esac\n        if [ \"${rpm_installer}\" = \"yum\" ] && [ -x /usr/bin/dnf ]; then\n            rpm_installer=dnf\n        fi\n\t    if rpm -q --quiet k3s-selinux; then \n            # remove k3s-selinux module before upgrade to allow container-selinux to upgrade safely\n            if check_available_upgrades container-selinux ${3} && check_available_upgrades k3s-selinux ${3}; then\n                MODULE_PRIORITY=$($SUDO semodule --list=full | grep k3s | cut -f1 -d\" \")\n                if [ -n \"${MODULE_PRIORITY}\" ]; then\n                    $SUDO semodule -X $MODULE_PRIORITY -r k3s || true\n                fi\n            fi\n        fi\n        # shellcheck disable=SC2086\n        $SUDO ${rpm_installer} install -y \"k3s-selinux\"\n    fi\n    return\n}\n\ncheck_available_upgrades() {\n    set +e\n    case ${2} in\n        sle)\n            available_upgrades=$($SUDO zypper -q -t -s 11 se -s -u --type package $1 | tail -n 1 | grep -v \"No matching\" | awk '{print $3}')\n            ;;\n        coreos)\n            # currently rpm-ostree does not support search functionality https://github.com/coreos/rpm-ostree/issues/1877\n            ;;\n        *)\n            available_upgrades=$($SUDO yum -q --refresh list $1 --upgrades | tail -n 1 | awk '{print $2}')\n            ;;\n    esac\n    set -e\n    if [ -n \"${available_upgrades}\" ]; then\n        return 0\n    fi\n    return 1\n}\n# --- download and verify k3s ---\ndownload_and_verify() {\n    if can_skip_download_binary; then\n       info 'Skipping k3s download and verify'\n       verify_k3s_is_executable\n       return\n    fi\n\n    setup_verify_arch\n    verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files'\n    setup_tmp\n    get_release_version\n    download_hash\n\n    if installed_hash_matches; then\n        info 'Skipping binary downloaded, installed k3s matches hash'\n        return\n    fi\n\n    download_binary\n    verify_binary\n    setup_binary\n}\n\n# --- add additional utility links ---\ncreate_symlinks() {\n    [ \"${INSTALL_K3S_BIN_DIR_READ_ONLY}\" = true ] && return\n    [ \"${INSTALL_K3S_SYMLINK}\" = skip ] && return\n\n    for cmd in kubectl crictl ctr; do\n        if [ ! -e ${BIN_DIR}/${cmd} ] || [ \"${INSTALL_K3S_SYMLINK}\" = force ]; then\n            which_cmd=$(command -v ${cmd} 2>/dev/null || true)\n            if [ -z \"${which_cmd}\" ] || [ \"${INSTALL_K3S_SYMLINK}\" = force ]; then\n                info \"Creating ${BIN_DIR}/${cmd} symlink to k3s\"\n                $SUDO ln -sf k3s ${BIN_DIR}/${cmd}\n            else\n                info \"Skipping ${BIN_DIR}/${cmd} symlink to k3s, command exists in PATH at ${which_cmd}\"\n            fi\n        else\n            info \"Skipping ${BIN_DIR}/${cmd} symlink to k3s, already exists\"\n        fi\n    done\n}\n\n# --- create killall script ---\ncreate_killall() {\n    [ \"${INSTALL_K3S_BIN_DIR_READ_ONLY}\" = true ] && return\n    info \"Creating killall script ${KILLALL_K3S_SH}\"\n    $SUDO tee ${KILLALL_K3S_SH} >/dev/null << \\EOF\n#!/bin/sh\n[ $(id -u) -eq 0 ] || exec sudo $0 $@\n\nfor bin in /var/lib/rancher/k3s/data/**/bin/; do\n    [ -d $bin ] && export PATH=$PATH:$bin:$bin/aux\ndone\n\nset -x\n\nfor service in /etc/systemd/system/k3s*.service; do\n    [ -s $service ] && systemctl stop $(basename $service)\ndone\n\nfor service in /etc/init.d/k3s*; do\n    [ -x $service ] && $service stop\ndone\n\npschildren() {\n    ps -e -o ppid= -o pid= | \\\n    sed -e 's/^\\s*//g; s/\\s\\s*/\\t/g;' | \\\n    grep -w \"^$1\" | \\\n    cut -f2\n}\n\npstree() {\n    for pid in $@; do\n        echo $pid\n        for child in $(pschildren $pid); do\n            pstree $child\n        done\n    done\n}\n\nkilltree() {\n    kill -9 $(\n        { set +x; } 2>/dev/null;\n        pstree $@;\n        set -x;\n    ) 2>/dev/null\n}\n\nremove_interfaces() {\n    # Delete network interface(s) that match 'master cni0'\n    ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do\n        iface=${iface%%@*}\n        [ -z \"$iface\" ] || ip link delete $iface\n    done\n\n    # Delete cni related interfaces\n    ip link delete cni0\n    ip link delete flannel.1\n    ip link delete flannel-v6.1\n    ip link delete kube-ipvs0\n    ip link delete flannel-wg\n    ip link delete flannel-wg-v6\n\n    # Restart tailscale\n    if [ -n \"$(command -v tailscale)\" ]; then\n        tailscale set --advertise-routes=\n    fi\n}\n\ngetshims() {\n    ps -e -o pid= -o args= | sed -e 's/^ *//; s/\\s\\s*/\\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1\n}\n\nkilltree $({ set +x; } 2>/dev/null; getshims; set -x)\n\ndo_unmount_and_remove() {\n    set +x\n    while read -r _ path _; do\n        case \"$path\" in $1*) echo \"$path\" ;; esac\n    done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount \"$0\" && rm -rf \"$0\"'\n    set -x\n}\n\ndo_unmount_and_remove '/run/k3s'\ndo_unmount_and_remove '/var/lib/rancher/k3s'\ndo_unmount_and_remove '/var/lib/kubelet/pods'\ndo_unmount_and_remove '/var/lib/kubelet/plugins'\ndo_unmount_and_remove '/run/netns/cni-'\n\n# Remove CNI namespaces\nip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete\n\nremove_interfaces\n\nrm -rf /var/lib/cni/\niptables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | iptables-restore\nip6tables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | ip6tables-restore\nEOF\n    $SUDO chmod 755 ${KILLALL_K3S_SH}\n    $SUDO chown root:root ${KILLALL_K3S_SH}\n}\n\n# --- create uninstall script ---\ncreate_uninstall() {\n    [ \"${INSTALL_K3S_BIN_DIR_READ_ONLY}\" = true ] && return\n    info \"Creating uninstall script ${UNINSTALL_K3S_SH}\"\n    $SUDO tee ${UNINSTALL_K3S_SH} >/dev/null << EOF\n#!/bin/sh\nset -x\n[ \\$(id -u) -eq 0 ] || exec sudo \\$0 \\$@\n\n${KILLALL_K3S_SH}\n\nif command -v systemctl; then\n    systemctl disable ${SYSTEM_NAME}\n    systemctl reset-failed ${SYSTEM_NAME}\n    systemctl daemon-reload\nfi\nif command -v rc-update; then\n    rc-update delete ${SYSTEM_NAME} default\nfi\n\nrm -f ${FILE_K3S_SERVICE}\nrm -f ${FILE_K3S_ENV}\n\nremove_uninstall() {\n    rm -f ${UNINSTALL_K3S_SH}\n}\ntrap remove_uninstall EXIT\n\nif (ls ${SYSTEMD_DIR}/k3s*.service || ls /etc/init.d/k3s*) >/dev/null 2>&1; then\n    set +x; echo 'Additional k3s services installed, skipping uninstall of k3s'; set -x\n    exit\nfi\n\nfor cmd in kubectl crictl ctr; do\n    if [ -L ${BIN_DIR}/\\$cmd ]; then\n        rm -f ${BIN_DIR}/\\$cmd\n    fi\ndone\n\nrm -rf /etc/rancher/k3s\nrm -rf /run/k3s\nrm -rf /run/flannel\nrm -rf /var/lib/rancher/k3s\nrm -rf /var/lib/kubelet\nrm -f ${BIN_DIR}/k3s\nrm -f ${KILLALL_K3S_SH}\n\nif type yum >/dev/null 2>&1; then\n    yum remove -y k3s-selinux\n    rm -f /etc/yum.repos.d/rancher-k3s-common*.repo\nelif type rpm-ostree >/dev/null 2>&1; then\n    rpm-ostree uninstall k3s-selinux\n    rm -f /etc/yum.repos.d/rancher-k3s-common*.repo\nelif type zypper >/dev/null 2>&1; then\n    uninstall_cmd=\"zypper remove -y k3s-selinux\"\n    if [ \"\\${TRANSACTIONAL_UPDATE=false}\" != \"true\" ] && [ -x /usr/sbin/transactional-update ]; then\n        uninstall_cmd=\"transactional-update --no-selfupdate -d run \\$uninstall_cmd\"\n    fi\n    \\$uninstall_cmd\n    rm -f /etc/zypp/repos.d/rancher-k3s-common*.repo\nfi\nEOF\n    $SUDO chmod 755 ${UNINSTALL_K3S_SH}\n    $SUDO chown root:root ${UNINSTALL_K3S_SH}\n}\n\n# --- disable current service if loaded --\nsystemd_disable() {\n    $SUDO systemctl disable ${SYSTEM_NAME} >/dev/null 2>&1 || true\n    $SUDO rm -f /etc/systemd/system/${SERVICE_K3S} || true\n    $SUDO rm -f /etc/systemd/system/${SERVICE_K3S}.env || true\n}\n\n# --- capture current env and create file containing k3s_ variables ---\ncreate_env_file() {\n    info \"env: Creating environment file ${FILE_K3S_ENV}\"\n    $SUDO touch ${FILE_K3S_ENV}\n    $SUDO chmod 0600 ${FILE_K3S_ENV}\n    sh -c export | while read x v; do echo $v; done | grep -E '^(K3S|CONTAINERD)_' | $SUDO tee ${FILE_K3S_ENV} >/dev/null\n    sh -c export | while read x v; do echo $v; done | grep -Ei '^(NO|HTTP|HTTPS)_PROXY' | $SUDO tee -a ${FILE_K3S_ENV} >/dev/null\n}\n\n# --- write systemd service file ---\ncreate_systemd_service_file() {\n    info \"systemd: Creating service file ${FILE_K3S_SERVICE}\"\n    $SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF\n[Unit]\nDescription=Lightweight Kubernetes\nDocumentation=https://k3s.io\nWants=network-online.target\nAfter=network-online.target\n\n[Install]\nWantedBy=multi-user.target\n\n[Service]\nType=${SYSTEMD_TYPE}\nEnvironmentFile=-/etc/default/%N\nEnvironmentFile=-/etc/sysconfig/%N\nEnvironmentFile=-${FILE_K3S_ENV}\nKillMode=process\nDelegate=yes\n# Having non-zero Limit*s causes performance problems due to accounting overhead\n# in the kernel. We recommend using cgroups to do container-local accounting.\nLimitNOFILE=1048576\nLimitNPROC=infinity\nLimitCORE=infinity\nTasksMax=infinity\nTimeoutStartSec=0\nRestart=always\nRestartSec=5s\nExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service'\nExecStartPre=-/sbin/modprobe br_netfilter\nExecStartPre=-/sbin/modprobe overlay\nExecStart=${BIN_DIR}/k3s \\\\\n    ${CMD_K3S_EXEC}\n\nEOF\n}\n\n# --- write openrc service file ---\ncreate_openrc_service_file() {\n    LOG_FILE=/var/log/${SYSTEM_NAME}.log\n\n    info \"openrc: Creating service file ${FILE_K3S_SERVICE}\"\n    $SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF\n#!/sbin/openrc-run\n\ndepend() {\n    after network-online\n    want cgroups\n}\n\nstart_pre() {\n    rm -f /tmp/k3s.*\n}\n\nsupervisor=supervise-daemon\nname=${SYSTEM_NAME}\ncommand=\"${BIN_DIR}/k3s\"\ncommand_args=\"$(escape_dq \"${CMD_K3S_EXEC}\")\n    >>${LOG_FILE} 2>&1\"\n\noutput_log=${LOG_FILE}\nerror_log=${LOG_FILE}\n\npidfile=\"/var/run/${SYSTEM_NAME}.pid\"\nrespawn_delay=5\nrespawn_max=0\n\nset -o allexport\nif [ -f /etc/environment ]; then . /etc/environment; fi\nif [ -f ${FILE_K3S_ENV} ]; then . ${FILE_K3S_ENV}; fi\nset +o allexport\nEOF\n    $SUDO chmod 0755 ${FILE_K3S_SERVICE}\n\n    $SUDO tee /etc/logrotate.d/${SYSTEM_NAME} >/dev/null << EOF\n${LOG_FILE} {\n\tmissingok\n\tnotifempty\n\tcopytruncate\n}\nEOF\n}\n\n# --- write systemd or openrc service file ---\ncreate_service_file() {\n    [ \"${HAS_SYSTEMD}\" = true ] && create_systemd_service_file\n    [ \"${HAS_OPENRC}\" = true ] && create_openrc_service_file\n    return 0\n}\n\n# --- get hashes of the current k3s bin and service files\nget_installed_hashes() {\n    $SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true\n}\n\n# --- enable and start systemd service ---\nsystemd_enable() {\n    info \"systemd: Enabling ${SYSTEM_NAME} unit\"\n    $SUDO systemctl enable ${FILE_K3S_SERVICE} >/dev/null\n    $SUDO systemctl daemon-reload >/dev/null\n}\n\nsystemd_start() {\n    info \"systemd: Starting ${SYSTEM_NAME}\"\n    $SUDO systemctl restart ${SYSTEM_NAME}\n}\n\n# --- enable and start openrc service ---\nopenrc_enable() {\n    info \"openrc: Enabling ${SYSTEM_NAME} service for default runlevel\"\n    $SUDO rc-update add ${SYSTEM_NAME} default >/dev/null\n}\n\nopenrc_start() {\n    info \"openrc: Starting ${SYSTEM_NAME}\"\n    $SUDO ${FILE_K3S_SERVICE} restart\n}\n\n# --- startup systemd or openrc service ---\nservice_enable_and_start() {\n    if [ -f \"/proc/cgroups\" ] && [ \"$(grep memory /proc/cgroups | while read -r n n n enabled; do echo $enabled; done)\" -eq 0 ];\n    then\n        info 'Failed to find memory cgroup, you may need to add \"cgroup_memory=1 cgroup_enable=memory\" to your linux cmdline (/boot/cmdline.txt on a Raspberry Pi)'\n    fi\n\n    [ \"${INSTALL_K3S_SKIP_ENABLE}\" = true ] && return\n\n    [ \"${HAS_SYSTEMD}\" = true ] && systemd_enable\n    [ \"${HAS_OPENRC}\" = true ] && openrc_enable\n\n    [ \"${INSTALL_K3S_SKIP_START}\" = true ] && return\n\n    POST_INSTALL_HASHES=$(get_installed_hashes)\n    if [ \"${PRE_INSTALL_HASHES}\" = \"${POST_INSTALL_HASHES}\" ] && [ \"${INSTALL_K3S_FORCE_RESTART}\" != true ]; then\n        info 'No change detected so skipping service start'\n        return\n    fi\n\n    if command -v iptables-save 1> /dev/null && command -v iptables-restore 1> /dev/null\n    then\n\t    $SUDO iptables-save | grep -v KUBE- | grep -iv flannel | $SUDO iptables-restore\n    fi\n    if command -v ip6tables-save 1> /dev/null && command -v ip6tables-restore 1> /dev/null\n    then\n\t    $SUDO ip6tables-save | grep -v KUBE- | grep -iv flannel | $SUDO ip6tables-restore\n    fi\n\n    [ \"${HAS_SYSTEMD}\" = true ] && systemd_start\n    [ \"${HAS_OPENRC}\" = true ] && openrc_start\n    return 0\n}\n\n# --- re-evaluate args to include env command ---\neval set -- $(escape \"${INSTALL_K3S_EXEC}\") $(quote \"$@\")\n\n# --- run the install process --\n{\n    verify_system\n    setup_env \"$@\"\n    download_and_verify\n    setup_selinux\n    create_symlinks\n    create_killall\n    create_uninstall\n    systemd_disable\n    create_env_file\n    create_service_file\n    service_enable_and_start\n}\n"
  },
  {
    "path": "mdz/pkg/server/k3s_destroy.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"os/exec\"\n\t\"syscall\"\n)\n\n// k3sDestroyAllStep installs k3s and related tools.\ntype k3sDestroyAllStep struct {\n\toptions Options\n}\n\nfunc (s *k3sDestroyAllStep) Run() error {\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Destroy the OpenModelz Cluster...\\n\")\n\t// TODO(gaocegege): Embed the script into the binary.\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"/usr/local/bin/k3s-uninstall.sh\")\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tPdeathsig: syscall.SIGKILL,\n\t}\n\tif s.options.Verbose {\n\t\tcmd.Stderr = s.options.OutputStream\n\t\tcmd.Stdout = s.options.OutputStream\n\t} else {\n\t\tcmd.Stdout = nil\n\t\tcmd.Stderr = nil\n\t}\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *k3sDestroyAllStep) Verify() error {\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/server/k3s_install.go",
    "content": "package server\n\nimport (\n\t_ \"embed\"\n\t\"fmt\"\n\t\"io\"\n\t\"os/exec\"\n\t\"syscall\"\n)\n\n//go:embed k3s-install.sh\nvar bashContent string\n\n// k3sInstallStep installs k3s and related tools.\ntype k3sInstallStep struct {\n\toptions Options\n}\n\nfunc (s *k3sInstallStep) Run() error {\n\tcheckCmd := exec.Command(\"/bin/sh\", \"-c\", \"sudo k3s kubectl get nodes\")\n\tcheckCmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tPdeathsig: syscall.SIGKILL,\n\t}\n\tcheckCmd.Stdout = nil\n\tcheckCmd.Stderr = nil\n\terr := checkCmd.Run()\n\tif err == nil {\n\t\tfmt.Fprintf(s.options.OutputStream, \"🚧 The server is already created, skip...\\n\")\n\t\treturn nil\n\t}\n\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Creating the server...\\n\")\n\t// TODO(gaocegege): Embed the script into the binary.\n\t// Always run start, do not check the hash to decide.\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"INSTALL_K3S_VERSION=v1.27.3+k3s1 INSTALL_K3S_EXEC='--disable=traefik' INSTALL_K3S_FORCE_RESTART=true K3S_KUBECONFIG_MODE=644 K3S_TOKEN=openmodelz sh -\")\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tPdeathsig: syscall.SIGKILL,\n\t}\n\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stdin.Close() // the doc says subProcess.Wait will close it, but I'm not sure, so I kept this line\n\n\tif s.options.Verbose {\n\t\tcmd.Stderr = s.options.OutputStream\n\t\tcmd.Stdout = s.options.OutputStream\n\t} else {\n\t\tcmd.Stdout = nil\n\t\tcmd.Stderr = nil\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := io.WriteString(stdin, bashContent); err != nil {\n\t\treturn err\n\t}\n\t// Close the input stream to finish the pipe. Then the command will use the\n\t// input from the pipe to start the next process.\n\tstdin.Close()\n\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Waiting for the server to be created...\\n\")\n\tif err := cmd.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *k3sInstallStep) Verify() error {\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/server/k3s_join.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os/exec\"\n\t\"syscall\"\n)\n\n// k3sJoinStep installs k3s and related tools.\ntype k3sJoinStep struct {\n\toptions Options\n}\n\nfunc (s *k3sJoinStep) Run() error {\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Joining the cluster...\\n\")\n\t// TODO(gaocegege): Embed the script into the binary.\n\tcmdStr := fmt.Sprintf(\"INSTALL_K3S_FORCE_RESTART=true K3S_KUBECONFIG_MODE=644 K3S_TOKEN=openmodelz K3S_URL=https://%s:6443 sh -\", s.options.ServerIP)\n\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", cmdStr)\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tPdeathsig: syscall.SIGKILL,\n\t}\n\tif s.options.Verbose {\n\t\tcmd.Stderr = s.options.OutputStream\n\t\tcmd.Stdout = s.options.OutputStream\n\t} else {\n\t\tcmd.Stdout = nil\n\t\tcmd.Stderr = nil\n\t}\n\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stdin.Close() // the doc says subProcess.Wait will close it, but I'm not sure, so I kept this line\n\n\tif s.options.Verbose {\n\t\tcmd.Stderr = s.options.OutputStream\n\t\tcmd.Stdout = s.options.OutputStream\n\t} else {\n\t\tcmd.Stdout = nil\n\t\tcmd.Stderr = nil\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := io.WriteString(stdin, bashContent); err != nil {\n\t\treturn err\n\t}\n\t// Close the input stream to finish the pipe. Then the command will use the\n\t// input from the pipe to start the next process.\n\tstdin.Close()\n\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Waiting for the server to be ready...\\n\")\n\tif err := cmd.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *k3sJoinStep) Verify() error {\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/server/k3s_killall.go",
    "content": "package server\n\nimport (\n\t\"fmt\"\n\t\"os/exec\"\n\t\"syscall\"\n)\n\n// k3sKillAllStep installs k3s and related tools.\ntype k3sKillAllStep struct {\n\toptions Options\n}\n\nfunc (s *k3sKillAllStep) Run() error {\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Stopping the OpenModelz Cluster...\\n\")\n\t// TODO(gaocegege): Embed the script into the binary.\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"/usr/local/bin/k3s-killall.sh\")\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tPdeathsig: syscall.SIGKILL,\n\t}\n\tif s.options.Verbose {\n\t\tcmd.Stderr = s.options.OutputStream\n\t\tcmd.Stdout = s.options.OutputStream\n\t} else {\n\t\tcmd.Stdout = nil\n\t\tcmd.Stderr = nil\n\t}\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *k3sKillAllStep) Verify() error {\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/server/k3s_prepare.go",
    "content": "package server\n\nimport (\n\t_ \"embed\"\n\t\"fmt\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"syscall\"\n\t\"text/template\"\n)\n\n//go:embed registries.yaml\nvar registriesContent string\n\nconst mirrorPath = \"/etc/rancher/k3s\"\nconst mirrorFile = \"registries.yaml\"\n\n// k3sPrepare install everything required by k3s.\ntype k3sPrepare struct {\n\toptions Options\n}\n\nfunc (s *k3sPrepare) Run() error {\n\tif !s.options.Mirror.Configured() {\n\t\treturn nil\n\t}\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Configure the mirror...\\n\")\n\n\ttmpl, err := template.New(\"registries\").Parse(registriesContent)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbuf := strings.Builder{}\n\terr = tmpl.Execute(&buf, s.options.Mirror)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", fmt.Sprintf(\n\t\t\"sudo mkdir -p %s && sudo tee %s > /dev/null << EOF\\n%s\\nEOF\",\n\t\tmirrorPath,\n\t\tfilepath.Join(mirrorPath, mirrorFile),\n\t\tbuf.String(),\n\t))\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tPdeathsig: syscall.SIGKILL,\n\t}\n\tif s.options.Verbose {\n\t\tcmd.Stderr = s.options.OutputStream\n\t\tcmd.Stdout = s.options.OutputStream\n\t} else {\n\t\tcmd.Stdout = nil\n\t\tcmd.Stderr = nil\n\t}\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *k3sPrepare) Verify() error {\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/server/nginx-dep.yaml",
    "content": "kind: Namespace\napiVersion: v1\nmetadata:\n  name: ingress-nginx\n---\napiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n  name: ingress-nginx\n  namespace: kube-system\nspec:\n  chart: ingress-nginx\n  repo: https://kubernetes.github.io/ingress-nginx\n  targetNamespace: ingress-nginx\n  version: v4.7.0\n  set:\n  valuesContent: |-\n    fullnameOverride: ingress-nginx\n    controller:\n      kind: DaemonSet\n      hostNetwork: true\n      hostPort:\n        enabled: true\n      service:\n        enabled: true\n        ports:\n          http: 9000\n          https: 9001\n      publishService:\n        enabled: false\n      metrics:\n        enabled: false\n        serviceMonitor:\n          enabled: false\n      config:\n        use-forwarded-headers: \"true\"\n"
  },
  {
    "path": "mdz/pkg/server/nginx_install.go",
    "content": "package server\n\nimport (\n\t_ \"embed\"\n\t\"fmt\"\n\t\"io\"\n\t\"os/exec\"\n\t\"syscall\"\n)\n\n//go:embed nginx-dep.yaml\nvar nginxYamlContent string\n\n// nginxInstallStep installs the nginx deployment.\ntype nginxInstallStep struct {\n\toptions Options\n}\n\nfunc (s *nginxInstallStep) Run() error {\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Initializing the load balancer...\\n\")\n\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"sudo k3s kubectl apply -f -\")\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tPdeathsig: syscall.SIGKILL,\n\t}\n\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stdin.Close() // the doc says subProcess.Wait will close it, but I'm not sure, so I kept this line\n\tif s.options.Verbose {\n\t\tcmd.Stderr = s.options.OutputStream\n\t\tcmd.Stdout = s.options.OutputStream\n\t} else {\n\t\tcmd.Stdout = nil\n\t\tcmd.Stderr = nil\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\tif _, err := io.WriteString(stdin, nginxYamlContent); err != nil {\n\t\treturn err\n\t}\n\t// Close the input stream to finish the pipe. Then the command will use the\n\t// input from the pipe to start the next process.\n\tstdin.Close()\n\n\tif err := cmd.Wait(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *nginxInstallStep) Verify() error {\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/server/openmodelz.yaml",
    "content": "apiVersion: v1\nkind: Namespace\nmetadata:\n  name: openmodelz\n  labels:\n    name: openmodelz\n---\napiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n  name: openmodelz\n  namespace: kube-system\nspec:\n  chart: openmodelz\n  repo: https://tensorchord.github.io/openmodelz-charts\n  targetNamespace: openmodelz\n  version: {{.Version}}\n  set:\n  valuesContent: |-\n    fullnameOverride: openmodelz\n    agent:\n      ingress:\n        enabled: true\n        ipToDomain: {{.IpToDomain}}\n        domain: \"{{.Domain}}\"\n      modelzCloud:\n        enabled: {{.EnableModelZCloud}}\n        url: {{.ModelZCloudUrl}}\n        token: {{.ModelZCloudAgentToken}}\n        region: {{.ModelZCloudRegion}} \n"
  },
  {
    "path": "mdz/pkg/server/openmodelz_install.go",
    "content": "package server\n\nimport (\n\t_ \"embed\"\n\t\"fmt\"\n\t\"html/template\"\n\t\"io\"\n\t\"os/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\n//go:embed openmodelz.yaml\nvar yamlContent string\n\nvar resultDomain string\n\n// openModelZInstallStep installs the OpenModelZ deployments.\ntype openModelZInstallStep struct {\n\toptions Options\n}\n\nfunc (s *openModelZInstallStep) Run() error {\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Initializing the server...\\n\")\n\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"sudo k3s kubectl apply -f -\")\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tPdeathsig: syscall.SIGKILL,\n\t}\n\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stdin.Close() // the doc says subProcess.Wait will close it, but I'm not sure, so I kept this line\n\tif s.options.Verbose {\n\t\tcmd.Stderr = s.options.OutputStream\n\t\tcmd.Stdout = s.options.OutputStream\n\t} else {\n\t\tcmd.Stdout = nil\n\t\tcmd.Stderr = nil\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tvariables := struct {\n\t\tDomain                string\n\t\tIpToDomain            bool\n\t\tVersion               string\n\t\tEnableModelZCloud     bool\n\t\tModelZCloudUrl        string\n\t\tModelZCloudAgentToken string\n\t\tModelZCloudRegion     string\n\t}{\n\t\tVersion: s.options.Version,\n\t}\n\tif s.options.Domain != nil {\n\t\tvariables.Domain = *s.options.Domain\n\t\tvariables.IpToDomain = false\n\t} else {\n\t\tfmt.Fprintf(s.options.OutputStream, \"🚧 No domain provided, using the server IP...\\n\")\n\t\tvariables.Domain = \"\"\n\t\tvariables.IpToDomain = true\n\t}\n\n\tif s.options.ModelZCloud.Enabled {\n\t\tvariables.EnableModelZCloud = true\n\t\tvariables.ModelZCloudUrl = s.options.ModelZCloud.URL\n\t\tvariables.ModelZCloudAgentToken = s.options.ModelZCloud.Token\n\t\tvariables.ModelZCloudRegion = s.options.ModelZCloud.Region\n\t} else {\n\t\tvariables.EnableModelZCloud = false\n\t\tvariables.ModelZCloudUrl = \"\"\n\t\tvariables.ModelZCloudAgentToken = \"\"\n\t\tvariables.ModelZCloudRegion = \"\"\n\t}\n\ttmpl, err := template.New(\"openmodelz\").Parse(yamlContent)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbuf := strings.Builder{}\n\terr = tmpl.Execute(&buf, variables)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlogrus.WithField(\"variables\", variables).\n\t\tDebugf(\"Deploying OpenModelZ with the following variables\")\n\n\tif _, err := io.WriteString(stdin, buf.String()); err != nil {\n\t\treturn err\n\t}\n\t// Close the input stream to finish the pipe. Then the command will use the\n\t// input from the pipe to start the next process.\n\tstdin.Close()\n\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Waiting for the server to be ready...\\n\")\n\tif err := cmd.Wait(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *openModelZInstallStep) Verify() error {\n\tfmt.Fprintf(s.options.OutputStream, \"🚧 Verifying the load balancer...\\n\")\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"sudo k3s kubectl get svc -n ingress-nginx ingress-nginx-controller -o jsonpath={@.status.loadBalancer.ingress}\")\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tPdeathsig: syscall.SIGKILL,\n\t}\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tlogrus.Debugf(\"failed to get the ingress ip: %v\", err)\n\t\treturn err\n\t}\n\tlogrus.Debugf(\"kubectl get cmd output: %s\\n\", output)\n\tif len(output) <= 4 {\n\t\treturn fmt.Errorf(\"cannot get the ingress ip: output is empty\")\n\t}\n\n\t// Get the IP from the output lie this: `[{\"ip\":\"192.168.71.93\"}]`\n\tre := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`)\n\tfound := re.MatchString(string(output))\n\tif !found {\n\t\treturn fmt.Errorf(\"cannot get the ingress ip\")\n\t}\n\n\tresultDomain = re.FindString(string(output))\n\treturn nil\n}\n"
  },
  {
    "path": "mdz/pkg/server/registries.yaml",
    "content": "mirrors:\n  {{ .Name }}:\n    endpoint:\n      {{ range $endpoint := .Endpoints }}- \"{{ $endpoint }}\"{{ end }}\n"
  },
  {
    "path": "mdz/pkg/telemetry/telemetry.go",
    "content": "package telemetry\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/cockroachdb/errors\"\n\t\"github.com/google/uuid\"\n\tsegmentio \"github.com/segmentio/analytics-go/v3\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"github.com/tensorchord/openmodelz/mdz/pkg/version\"\n)\n\ntype TelemetryField func(*segmentio.Properties)\n\ntype Telemetry interface {\n\tRecord(command string, args ...TelemetryField)\n}\n\ntype defaultTelemetry struct {\n\tclient  segmentio.Client\n\tuid     string\n\tenabled bool\n}\n\nconst telemetryToken = \"65WHA9bxCNX74K3HjgplMOmsio9LkYSI\"\n\nvar (\n\tonce                sync.Once\n\ttelemetry           *defaultTelemetry\n\ttelemetryConfigFile string\n)\n\nfunc init() {\n\thome, err := os.UserHomeDir()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttelemetryConfigFile = filepath.Join(home, \".config\", \"openmodelz\", \"telemetry\")\n}\n\nfunc GetTelemetry() Telemetry {\n\treturn telemetry\n}\n\nfunc Initialize(enabled bool) error {\n\tonce.Do(func() {\n\t\tclient, err := segmentio.NewWithConfig(telemetryToken, segmentio.Config{\n\t\t\tBatchSize: 1,\n\t\t})\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttelemetry = &defaultTelemetry{\n\t\t\tclient:  client,\n\t\t\tenabled: enabled,\n\t\t}\n\t})\n\treturn telemetry.init()\n}\n\nfunc (t *defaultTelemetry) init() error {\n\tif !t.enabled {\n\t\treturn nil\n\t}\n\t// detect if the config file already exists\n\t_, err := os.Stat(telemetryConfigFile)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn errors.Wrap(err, \"failed to stat telemetry config file\")\n\t\t}\n\t\tt.uid = uuid.New().String()\n\t\treturn t.dumpConfig()\n\t}\n\tif err = t.loadConfig(); err != nil {\n\t\treturn errors.Wrap(err, \"failed to load telemetry config\")\n\t}\n\tt.Idnetify()\n\treturn nil\n}\n\nfunc (t *defaultTelemetry) dumpConfig() error {\n\tif err := os.MkdirAll(filepath.Dir(telemetryConfigFile), os.ModeDir|0700); err != nil {\n\t\treturn errors.Wrap(err, \"failed to create telemetry config directory\")\n\t}\n\tfile, err := os.Create(telemetryConfigFile)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create telemetry config file\")\n\t}\n\tdefer file.Close()\n\t_, err = file.WriteString(t.uid)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to write telemetry config file\")\n\t}\n\treturn nil\n}\n\nfunc (t *defaultTelemetry) loadConfig() error {\n\tfile, err := os.Open(telemetryConfigFile)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to open telemetry config file\")\n\t}\n\tdefer file.Close()\n\tuid, err := io.ReadAll(file)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to read telemetry config file\")\n\t}\n\tt.uid = string(uid)\n\treturn nil\n}\n\nfunc (t *defaultTelemetry) Idnetify() {\n\tif !t.enabled {\n\t\treturn\n\t}\n\tv := version.GetOpenModelzVersion()\n\tif err := t.client.Enqueue(segmentio.Identify{\n\t\tAnonymousId: t.uid,\n\t\tContext: &segmentio.Context{\n\t\t\tOS: segmentio.OSInfo{\n\t\t\t\tName:    runtime.GOOS,\n\t\t\t\tVersion: runtime.GOARCH,\n\t\t\t},\n\t\t\tApp: segmentio.AppInfo{\n\t\t\t\tName:    \"openmodelz\",\n\t\t\t\tVersion: v,\n\t\t\t},\n\t\t},\n\t\tTimestamp: time.Now(),\n\t\tTraits:    segmentio.NewTraits(),\n\t}); err != nil {\n\t\tlogrus.WithError(err).Debug(\"failed to identify user\")\n\t\treturn\n\t}\n}\n\nfunc AddField(name string, value interface{}) TelemetryField {\n\treturn func(p *segmentio.Properties) {\n\t\tp.Set(name, value)\n\t}\n}\n\nfunc (t *defaultTelemetry) Record(command string, fields ...TelemetryField) {\n\tif !t.enabled {\n\t\treturn\n\t}\n\tlogrus.WithField(\"UID\", t.uid).WithField(\"command\", command).Debug(\"send telemetry\")\n\ttrack := segmentio.Track{\n\t\tAnonymousId: t.uid,\n\t\tEvent:       command,\n\t\tProperties:  segmentio.NewProperties(),\n\t}\n\tfor _, field := range fields {\n\t\tfield(&track.Properties)\n\t}\n\tif err := t.client.Enqueue(track); err != nil {\n\t\tlogrus.WithError(err).Debug(\"failed to send telemetry\")\n\t}\n\t// make sure the msg can be sent out\n\tt.client.Close()\n}\n"
  },
  {
    "path": "mdz/pkg/term/interrupt.go",
    "content": "/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage term\n\nimport (\n\t\"os\"\n\t\"os/signal\"\n\t\"sync\"\n\t\"syscall\"\n)\n\n// terminationSignals are signals that cause the program to exit in the\n// supported platforms (linux, darwin, windows).\nvar terminationSignals = []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT}\n\n// Handler guarantees execution of notifications after a critical section (the function passed\n// to a Run method), even in the presence of process termination. It guarantees exactly once\n// invocation of the provided notify functions.\ntype Handler struct {\n\tnotify []func()\n\tfinal  func(os.Signal)\n\tonce   sync.Once\n}\n\n// Chain creates a new handler that invokes all notify functions when the critical section exits\n// and then invokes the optional handler's notifications. This allows critical sections to be\n// nested without losing exactly once invocations. Notify functions can invoke any cleanup needed\n// but should not exit (which is the responsibility of the parent handler).\nfunc Chain(handler *Handler, notify ...func()) *Handler {\n\tif handler == nil {\n\t\treturn New(nil, notify...)\n\t}\n\treturn New(handler.Signal, append(notify, handler.Close)...)\n}\n\n// New creates a new handler that guarantees all notify functions are run after the critical\n// section exits (or is interrupted by the OS), then invokes the final handler. If no final\n// handler is specified, the default final is `os.Exit(1)`. A handler can only be used for\n// one critical section.\nfunc New(final func(os.Signal), notify ...func()) *Handler {\n\treturn &Handler{\n\t\tfinal:  final,\n\t\tnotify: notify,\n\t}\n}\n\n// Close executes all the notification handlers if they have not yet been executed.\nfunc (h *Handler) Close() {\n\th.once.Do(func() {\n\t\tfor _, fn := range h.notify {\n\t\t\tfn()\n\t\t}\n\t})\n}\n\n// Signal is called when an os.Signal is received, and guarantees that all notifications\n// are executed, then the final handler is executed. This function should only be called once\n// per Handler instance.\nfunc (h *Handler) Signal(s os.Signal) {\n\th.once.Do(func() {\n\t\tfor _, fn := range h.notify {\n\t\t\tfn()\n\t\t}\n\t\tif h.final == nil {\n\t\t\tos.Exit(1)\n\t\t}\n\t\th.final(s)\n\t})\n}\n\n// Run ensures that any notifications are invoked after the provided fn exits (even if the\n// process is interrupted by an OS termination signal). Notifications are only invoked once\n// per Handler instance, so calling Run more than once will not behave as the user expects.\nfunc (h *Handler) Run(fn func() error) error {\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, terminationSignals...)\n\tdefer func() {\n\t\tsignal.Stop(ch)\n\t\tclose(ch)\n\t}()\n\tgo func() {\n\t\tsig, ok := <-ch\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\th.Signal(sig)\n\t}()\n\tdefer h.Close()\n\treturn fn()\n}\n"
  },
  {
    "path": "mdz/pkg/term/term.go",
    "content": "/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage term\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com/moby/term\"\n)\n\n// SafeFunc is a function to be invoked by TTY.\ntype SafeFunc func() error\n\n// TTY helps invoke a function and preserve the state of the terminal, even if the process is\n// terminated during execution. It also provides support for terminal resizing for remote command\n// execution/attachment.\ntype TTY struct {\n\t// In is a reader representing stdin. It is a required field.\n\tIn io.Reader\n\t// Out is a writer representing stdout. It must be set to support terminal resizing. It is an\n\t// optional field.\n\tOut io.Writer\n\t// Raw is true if the terminal should be set raw.\n\tRaw bool\n\t// TryDev indicates the TTY should try to open /dev/tty if the provided input\n\t// is not a file descriptor.\n\tTryDev bool\n\t// Parent is an optional interrupt handler provided to this function - if provided\n\t// it will be invoked after the terminal state is restored. If it is not provided,\n\t// a signal received during the TTY will result in os.Exit(0) being invoked.\n\tParent *Handler\n\n\t// sizeQueue is set after a call to MonitorSize() and is used to monitor SIGWINCH signals when the\n\t// user's terminal resizes.\n\t// sizeQueue *sizeQueue\n}\n\nfunc NewTTY() *TTY {\n\ttty := &TTY{}\n\tstdin, stdout, _ := term.StdStreams()\n\ttty.In = stdin\n\ttty.Out = stdout\n\treturn tty\n}\n\n// IsTerminalIn returns true if t.In is a terminal. Does not check /dev/tty\n// even if TryDev is set.\nfunc (t TTY) IsTerminalIn() bool {\n\treturn IsTerminal(t.In)\n}\n\n// IsTerminalOut returns true if t.Out is a terminal. Does not check /dev/tty\n// even if TryDev is set.\nfunc (t TTY) IsTerminalOut() bool {\n\treturn IsTerminal(t.Out)\n}\n\n// IsTerminal returns whether the passed object is a terminal or not\nfunc IsTerminal(i interface{}) bool {\n\t_, terminal := term.GetFdInfo(i)\n\treturn terminal\n}\n\n// AllowsColorOutput returns true if the specified writer is a terminal and\n// the process environment indicates color output is supported and desired.\nfunc AllowsColorOutput(w io.Writer) bool {\n\tif !IsTerminal(w) {\n\t\treturn false\n\t}\n\n\t// https://en.wikipedia.org/wiki/Computer_terminal#Dumb_terminals\n\tif os.Getenv(\"TERM\") == \"dumb\" {\n\t\treturn false\n\t}\n\n\t// https://no-color.org/\n\tif _, nocolor := os.LookupEnv(\"NO_COLOR\"); nocolor {\n\t\treturn false\n\t}\n\n\t// On Windows WT_SESSION is set by the modern terminal component.\n\t// Older terminals have poor support for UTF-8, VT escape codes, etc.\n\tif runtime.GOOS == \"windows\" && os.Getenv(\"WT_SESSION\") == \"\" {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n// Safe invokes the provided function and will attempt to ensure that when the\n// function returns (or a termination signal is sent) that the terminal state\n// is reset to the condition it was in prior to the function being invoked. If\n// t.Raw is true the terminal will be put into raw mode prior to calling the function.\n// If the input file descriptor is not a TTY and TryDev is true, the /dev/tty file\n// will be opened (if available).\nfunc (t TTY) Safe(fn SafeFunc) error {\n\tinFd, isTerminal := term.GetFdInfo(t.In)\n\n\tif !isTerminal && t.TryDev {\n\t\tif f, err := os.Open(\"/dev/tty\"); err == nil {\n\t\t\tdefer f.Close()\n\t\t\tinFd = f.Fd()\n\t\t\tisTerminal = term.IsTerminal(inFd)\n\t\t}\n\t}\n\tif !isTerminal {\n\t\treturn fn()\n\t}\n\n\tvar state *term.State\n\tvar err error\n\tif t.Raw {\n\t\tstate, err = term.MakeRaw(inFd)\n\t} else {\n\t\tstate, err = term.SaveState(inFd)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Chain(t.Parent, func() {\n\t\tterm.RestoreTerminal(inFd, state)\n\t}).Run(fn)\n}\n"
  },
  {
    "path": "mdz/pkg/version/version.go",
    "content": "/*\n   Copyright The TensorChord Inc.\n   Copyright The BuildKit Authors.\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npackage version\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\t// Package is filled at linking time\n\tPackage = \"github.com/tensorchord/openmodelz/agent\"\n\n\tHelmChartVersion = \"0.0.15\"\n\n\t// Revision is filled with the VCS (e.g. git) revision being used to build\n\t// the program at linking time.\n\tRevision = \"\"\n\n\tversion         = \"0.0.0+unknown\"\n\tbuildDate       = \"1970-01-01T00:00:00Z\" // output from `date -u +'%Y-%m-%dT%H:%M:%SZ'`\n\tgitCommit       = \"\"                     // output from `git rev-parse HEAD`\n\tgitTag          = \"\"                     // output from `git describe --exact-match --tags HEAD` (if clean tree state)\n\tgitTreeState    = \"\"                     // determined from `git status --porcelain`. either 'clean' or 'dirty'\n\tdevelopmentFlag = \"false\"\n)\n\n// Version contains OpenModelz version information\ntype Version struct {\n\tVersion      string\n\tBuildDate    string\n\tGitCommit    string\n\tGitTag       string\n\tGitTreeState string\n\tGoVersion    string\n\tCompiler     string\n\tPlatform     string\n}\n\nfunc (v Version) String() string {\n\treturn v.Version\n}\n\n// SetGitTagForE2ETest sets the gitTag for test purpose.\nfunc SetGitTagForE2ETest(tag string) {\n\tgitTag = tag\n}\n\n// GetOpenModelzVersion gets OpenModelz version information\nfunc GetOpenModelzVersion() string {\n\tvar versionStr string\n\n\tif gitCommit != \"\" && gitTag != \"\" &&\n\t\tgitTreeState == \"clean\" && developmentFlag == \"false\" {\n\t\t// if we have a clean tree state and the current commit is tagged,\n\t\t// this is an official release.\n\t\tversionStr = gitTag\n\t} else {\n\t\t// otherwise formulate a version string based on as much metadata\n\t\t// information we have available.\n\t\tif strings.HasPrefix(version, \"v\") {\n\t\t\tversionStr = version\n\t\t} else {\n\t\t\tversionStr = \"v\" + version\n\t\t}\n\t\tif len(gitCommit) >= 7 {\n\t\t\tversionStr += \"+\" + gitCommit[0:7]\n\t\t\tif gitTreeState != \"clean\" {\n\t\t\t\tversionStr += \".dirty\"\n\t\t\t}\n\t\t} else {\n\t\t\tversionStr += \"+unknown\"\n\t\t}\n\t}\n\treturn versionStr\n}\n\n// GetVersion returns the version information\nfunc GetVersion() Version {\n\treturn Version{\n\t\tVersion:      GetOpenModelzVersion(),\n\t\tBuildDate:    buildDate,\n\t\tGitCommit:    gitCommit,\n\t\tGitTag:       gitTag,\n\t\tGitTreeState: gitTreeState,\n\t\tGoVersion:    runtime.Version(),\n\t\tCompiler:     runtime.Compiler,\n\t\tPlatform:     fmt.Sprintf(\"%s/%s\", runtime.GOOS, runtime.GOARCH),\n\t}\n}\n\nvar (\n\treRelease *regexp.Regexp\n\treDev     *regexp.Regexp\n\treOnce    sync.Once\n)\n\nfunc UserAgent() string {\n\tversion := GetVersion().String()\n\n\treOnce.Do(func() {\n\t\treRelease = regexp.MustCompile(`^(v[0-9]+\\.[0-9]+)\\.[0-9]+$`)\n\t\treDev = regexp.MustCompile(`^(v[0-9]+\\.[0-9]+)\\.[0-9]+`)\n\t})\n\n\tif matches := reRelease.FindAllStringSubmatch(version, 1); len(matches) > 0 {\n\t\tversion = matches[0][1]\n\t} else if matches := reDev.FindAllStringSubmatch(version, 1); len(matches) > 0 {\n\t\tversion = matches[0][1] + \"-dev\"\n\t}\n\n\treturn \"modelz/\" + version\n}\n"
  },
  {
    "path": "modelzetes/.dockerignore",
    "content": "./faas-netes\n/yaml\n/yaml_armhf\n/yaml_arm64\n/chart\n/contrib\n/artifacts\n/hack\n/docs\n/.git\n"
  },
  {
    "path": "modelzetes/.gitattributes",
    "content": "yaml/* linguist-generated=true\n"
  },
  {
    "path": "modelzetes/.gitignore",
    "content": "# Binaries for programs and plugins\n*.exe\n*.dll\n*.so\n*.dylib\n\n# Test binary, build with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736\n.glide/\n.idea\n\nbin/\n**/password.txt\n**/gateway-password.txt\n\n.vscode\nof_kind_portforward.pid\n/kind*\n/kubectl\n/yaml_armhf\n/yaml_arm64\n\n/broker-*\n\n/chart/pro-builder/out\n/chart/pro-builder/payload.txt\n/pgconnector.yaml\n\njwt_key\njwt_key.pub\n/*.pid\n.tools/\n"
  },
  {
    "path": "modelzetes/Dockerfile",
    "content": "FROM ubuntu:22.04\n\nLABEL maintainer=\"modelz-support@tensorchord.ai\"\n\nCOPY modelzetes /usr/bin/modelzetes\nENTRYPOINT [\"/usr/bin/modelzetes\"]\n"
  },
  {
    "path": "modelzetes/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 TensorChord Inc.\nCopyright (c) 2020 OpenFaaS Author(s)\nCopyright (c) 2017 Alex Ellis\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "modelzetes/Makefile",
    "content": "# Copyright 2022 TensorChord Inc.\n#\n# The old school Makefile, following are required targets. The Makefile is written\n# to allow building multiple binaries. You are free to add more targets or change\n# existing implementations, as long as the semantics are preserved.\n#\n#   make              - default to 'build' target\n#   make lint         - code analysis\n#   make test         - run unit test (or plus integration test)\n#   make build        - alias to build-local target\n#   make build-local  - build local binary targets\n#   make build-linux  - build linux binary targets\n#   make container    - build containers\n#   $ docker login registry -u username -p xxxxx\n#   make push         - push containers\n#   make clean        - clean up targets\n#\n# Not included but recommended targets:\n#   make e2e-test\n#\n# The makefile is also responsible to populate project version information.\n#\n\n#\n# Tweak the variables based on your project.\n#\n\n# This repo's root import path (under GOPATH).\nROOT := github.com/tensorchord/openmodelz/modelzetes\n\n# Target binaries. You can build multiple binaries for a single project.\nTARGETS := modelzetes\n\n# Container image prefix and suffix added to targets.\n# The final built images are:\n#   $[REGISTRY]/$[IMAGE_PREFIX]$[TARGET]$[IMAGE_SUFFIX]:$[VERSION]\n# $[REGISTRY] is an item from $[REGISTRIES], $[TARGET] is an item from $[TARGETS].\nIMAGE_PREFIX ?= $(strip )\nIMAGE_SUFFIX ?= $(strip )\n\n# Container registries.\nREGISTRY ?= ghcr.io/tensorchord\n\n# Container registry for base images.\nBASE_REGISTRY ?= docker.io\nBASE_REGISTRY_USER ?= modelzai\n\n# Disable CGO by default.\nCGO_ENABLED ?= 0\n\n#\n# These variables should not need tweaking.\n#\n\n# It's necessary to set this because some environments don't link sh -> bash.\nexport SHELL := bash\n\n# It's necessary to set the errexit flags for the bash shell.\nexport SHELLOPTS := errexit\n\nPACKAGE_NAME := github.com/tensorchord/openmodelz/modelzetes\nGOLANG_CROSS_VERSION  ?= v1.17.6\n\n# Project main package location (can be multiple ones).\nCMD_DIR := ./cmd\n\n# Project output directory.\nOUTPUT_DIR := ./bin\nDEBUG_DIR := ./debug-bin\n\n# Build directory.\nBUILD_DIR := ./build\n\n# Current version of the project.\nVERSION ?= $(shell git describe --match 'v[0-9]*' --always --tags --abbrev=0)\nBUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\nGIT_COMMIT=$(shell git rev-parse HEAD)\nGIT_TAG=$(shell if [ -z \"`git status --porcelain`\" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)\nGIT_TREE_STATE=$(shell if [ -z \"`git status --porcelain`\" ]; then echo \"clean\" ; else echo \"dirty\"; fi)\nGITSHA ?= $(shell git rev-parse --short HEAD)\n\n# Track code version with Docker Label.\nDOCKER_LABELS ?= git-describe=\"$(shell date -u +v%Y%m%d)-$(shell git describe --tags --always --dirty)\"\n\n# Golang standard bin directory.\nGOPATH ?= $(shell go env GOPATH)\nGOROOT ?= $(shell go env GOROOT)\nBIN_DIR := $(GOPATH)/bin\nGOLANGCI_LINT := $(BIN_DIR)/golangci-lint\n\n# check if we need embed the dashboard\nDASHBOARD_BUILD ?= debug\n\n# Default golang flags used in build and test\n# -mod=vendor: force go to use the vendor files instead of using the `$GOPATH/pkg/mod`\n# -p: the number of programs that can be run in parallel\n# -count: run each test and benchmark 1 times. Set this flag to disable test cache\nexport GOFLAGS ?= -count=1\n\n#\n# Define all targets. At least the following commands are required:\n#\n\n# All targets.\n.PHONY: help lint test build container push addlicense debug debug-local build-local generate clean test-local addlicense-install release build-image\n\n.DEFAULT_GOAL:=build\n\nbuild: build-local  ## Build the release version of envd\n\nhelp:  ## Display this help\n\t@awk 'BEGIN {FS = \":.*##\"; printf \"\\nUsage:\\n  make \\033[36m<target>\\033[0m\\n\"} /^[a-zA-Z0-9_-]+:.*?##/ { printf \"  \\033[36m%-15s\\033[0m %s\\n\", $$1, $$2 } /^##@/ { printf \"\\n\\033[1m%s\\033[0m\\n\", substr($$0, 5) } ' $(MAKEFILE_LIST)\n\ndebug: debug-local  ## Build the debug version of envd\n\n# more info about `GOGC` env: https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint\nlint: $(GOLANGCI_LINT)  ## Lint GO code\n\t@$(GOLANGCI_LINT) run\n\n$(GOLANGCI_LINT):\n\tcurl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$(go env GOPATH)/bin\n\nmockgen-install:\n\tgo install github.com/golang/mock/mockgen@v1.6.0\n\naddlicense-install:\n\tgo install github.com/google/addlicense@latest\n\nsqlc-install:\n\tgo install github.com/kyleconroy/sqlc/cmd/sqlc@latest\n\n# https://github.com/swaggo/swag/pull/1322, we should use master instead of latest for now.\nswag-install:\n\tgo install github.com/swaggo/swag/cmd/swag@v1.8.7\n\nbuild-local:\n\t@for target in $(TARGETS); do                                                      \\\n\t  CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build -tags $(DASHBOARD_BUILD)  -trimpath -v -o $(OUTPUT_DIR)/$${target}     \\\n\t    -ldflags \"-s -w -X $(ROOT)/pkg/version.version=$(VERSION) -X $(ROOT)/pkg/version.buildDate=$(BUILD_DATE) -X $(ROOT)/pkg/version.gitCommit=$(GIT_COMMIT) -X $(ROOT)/pkg/version.gitTreeState=$(GIT_TREE_STATE)\"                     \\\n\t    $(CMD_DIR)/$${target};                                                         \\\n\tdone\n\n# It is used by vscode to attach into the process.\ndebug-local:\n\t@for target in $(TARGETS); do                                                      \\\n\t  CGO_ENABLED=$(CGO_ENABLED) go build -tags $(DASHBOARD_BUILD) -trimpath                                    \\\n\t  \t-v -o $(DEBUG_DIR)/$${target}                                                  \\\n\t  \t-gcflags='all=-N -l'                                                           \\\n\t    $(CMD_DIR)/$${target};                                                         \\\n\tdone\n\naddlicense: addlicense-install  ## Add license to GO code files\n\taddlicense -l mpl -c \"TensorChord Inc.\" $$(find . -type f -name '*.go' | grep -v pkg/docs/docs.go)\n\ntest-local:\n\t@go test -tags=$(DASHBOARD_BUILD) -v -race -coverprofile=coverage.out ./...\n\ntest:  ## Run the tests\n\t@go test -tags=$(DASHBOARD_BUILD) -race -coverpkg=./pkg/... -coverprofile=coverage.out ./...\n\t@go tool cover -func coverage.out | tail -n 1 | awk '{ print \"Total coverage: \" $$3 }'\n\nclean:  ## Clean the outputs and artifacts\n\t@-rm -vrf ${OUTPUT_DIR}\n\t@-rm -vrf ${DEBUG_DIR}\n\t@-rm -vrf build dist .eggs *.egg-info\n\nfmt: swag-install ## Run go fmt against code.\n\tgo fmt ./...\n\tswag fmt\n\nvet: ## Run go vet against code.\n\tgo vet ./...\n\nswag: swag-install\n\tswag init -g ./cmd/modelzetes/main.go --parseDependency --output ./pkg/docs \n\nbuild-image: build-local\n\tdocker build -t ${BASE_REGISTRY}/${BASE_REGISTRY_USER}/modelzetes:dev -f Dockerfile ./bin\n\tdocker push ${BASE_REGISTRY}/${BASE_REGISTRY_USER}/modelzetes:dev\n\nrelease:\n\t@if [ ! -f \".release-env\" ]; then \\\n\t\techo \"\\033[91m.release-env is required for release\\033[0m\";\\\n\t\texit 1;\\\n\tfi\n\tdocker run \\\n\t\t--rm \\\n\t\t--privileged \\\n\t\t-e CGO_ENABLED=1 \\\n\t\t--env-file .release-env \\\n\t\t-v /var/run/docker.sock:/var/run/docker.sock \\\n\t\t-v `pwd`:/go/src/$(PACKAGE_NAME) \\\n\t\t-v `pwd`/sysroot:/sysroot \\\n\t\t-w /go/src/$(PACKAGE_NAME) \\\n\t\tgoreleaser/goreleaser-cross:${GOLANG_CROSS_VERSION} \\\n\t\trelease --rm-dist\n"
  },
  {
    "path": "modelzetes/artifacts/crds/tensorchord.ai_inferences.yaml",
    "content": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  annotations:\n    controller-gen.kubebuilder.io/version: v0.5.0\n  creationTimestamp: null\n  name: inferences.tensorchord.ai\nspec:\n  group: tensorchord.ai\n  names:\n    kind: Inference\n    listKind: InferenceList\n    plural: inferences\n    singular: inference\n  scope: Namespaced\n  versions:\n    - additionalPrinterColumns:\n        - jsonPath: .spec.image\n          name: Image\n          type: string\n      name: v2alpha1\n      schema:\n        openAPIV3Schema:\n          description: Inference describes an Inference\n          type: object\n          required:\n            - spec\n          properties:\n            apiVersion:\n              description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n              type: string\n            kind:\n              description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n              type: string\n            metadata:\n              type: object\n            spec:\n              description: InferenceSpec defines the desired state of Inference\n              type: object\n              required:\n                - image\n                - name\n              properties:\n                annotations:\n                  description: Annotations are metadata for inferences which may be used by the faas-provider or the gateway\n                  type: object\n                  additionalProperties:\n                    type: string\n                command:\n                  description: Command to run when starting the\n                  type: string\n                constraints:\n                  description: Constraints are specific to the operator.\n                  type: array\n                  items:\n                    type: string\n                envVars:\n                  description: EnvVars can be provided to set environment variables for the inference runtime.\n                  type: object\n                  additionalProperties:\n                    type: string\n                framework:\n                  description: Framework is the inference framework.\n                  type: string\n                http_probe_path:\n                  description: HTTPProbePath is the path of the http probe.\n                  type: string\n                image:\n                  type: string\n                labels:\n                  description: Labels are metadata for inferences which may be used by the faas-provider or the gateway\n                  type: object\n                  additionalProperties:\n                    type: string\n                name:\n                  type: string\n                port:\n                  description: Port is the port exposed by the inference.\n                  type: integer\n                  format: int32\n                resources:\n                  description: Limits for inference\n                  type: object\n                  properties:\n                    claims:\n                      description: \"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \\n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \\n This field is immutable. It can only be set for containers.\"\n                      type: array\n                      items:\n                        description: ResourceClaim references one entry in PodSpec.ResourceClaims.\n                        type: object\n                        required:\n                          - name\n                        properties:\n                          name:\n                            description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.\n                            type: string\n                      x-kubernetes-list-map-keys:\n                        - name\n                      x-kubernetes-list-type: map\n                    limits:\n                      description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'\n                      type: object\n                      additionalProperties:\n                        pattern: ^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$\n                        anyOf:\n                          - type: integer\n                          - type: string\n                        x-kubernetes-int-or-string: true\n                    requests:\n                      description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'\n                      type: object\n                      additionalProperties:\n                        pattern: ^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$\n                        anyOf:\n                          - type: integer\n                          - type: string\n                        x-kubernetes-int-or-string: true\n                scaling:\n                  description: Scaling is the scaling configuration for the inference.\n                  type: object\n                  properties:\n                    max_replicas:\n                      description: MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas. It defaults to 1.\n                      type: integer\n                      format: int32\n                    min_replicas:\n                      description: MinReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 0.\n                      type: integer\n                      format: int32\n                    startup_duration:\n                      description: StartupDuration is the duration of startup time.\n                      type: integer\n                      format: int32\n                    target_load:\n                      description: TargetLoad is the target load. In capacity mode, it is the expected number of the inflight requests per replica.\n                      type: integer\n                      format: int32\n                    type:\n                      description: Type is the scaling type. It can be either \"capacity\" or \"rps\". Default is \"capacity\".\n                      type: string\n                    zero_duration:\n                      description: ZeroDuration is the duration of zero load before scaling down to zero. Default is 5 minutes.\n                      type: integer\n                      format: int32\n                secrets:\n                  description: Secrets list of secrets to be made available to inference\n                  type: array\n                  items:\n                    type: string\n      served: true\n      storage: true\n      subresources: {}\nstatus:\n  acceptedNames:\n    kind: \"\"\n    plural: \"\"\n  conditions: []\n  storedVersions: []\n"
  },
  {
    "path": "modelzetes/artifacts/samples/v2alpha1.yaml",
    "content": "apiVersion: tensorchord.ai/v2alpha1\nkind: Inference\nmetadata:\n  name: demo\n  namespace: default\nspec:\n  name: demo\n  framework: mosec\n  image: modelzai/llm-bloomz-560m:23.06.13\n  scaling:\n    min_replicas: 0\n    max_replicas: 1\n    target_load: 100\n    type: capacity\n    zero_duration: 60\n    startup_duration: 600\n  resources:\n    requests:\n      cpu: \"3\"\n      memory: 12Gi\n"
  },
  {
    "path": "modelzetes/buf.yaml",
    "content": "apiVersion: v1\nitems:\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-02T07:37:53Z\"\n    generation: 1\n    labels:\n      inference: 895f81b5-7e60-49b0-a2c1-145fc884cac7\n    name: 895f81b5-7e60-49b0-a2c1-145fc884cac7\n    namespace: modelz-a39caa1c-0b03-4054-b75f-1f1cf8424b01\n    resourceVersion: \"76627989\"\n    uid: 8f0b842a-be19-4dae-9fa0-986e830b0670\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-imagebind:23.05.2\n      ai.tensorchord.domain: https://imagebind-ogeboq7ciyo3sq0t.modelz.tech\n      ai.tensorchord.source: docker\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-tesla-t4-4c-16g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-imagebind:23.05.2\n    labels:\n      ai.tensorchord.framework: mosec\n      ai.tensorchord.name: imagebind\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 1m0s\n      ai.tensorchord.server-resource: nvidia-tesla-t4-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 895f81b5-7e60-49b0-a2c1-145fc884cac7\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: \"3\"\n        memory: 12Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-05-06T09:28:13Z\"\n    generation: 1\n    labels:\n      inference: c5e72dcd-bee6-4b82-b08b-47cc9f4a2c1c\n    name: c5e72dcd-bee6-4b82-b08b-47cc9f4a2c1c\n    namespace: modelz-a39caa1c-0b03-4054-b75f-1f1cf8424b01\n    resourceVersion: \"50967692\"\n    uid: 1f201ac0-ed45-4cf3-b084-47752fabfb5c\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: docker.io/starkind/moss:v1.11\n      ai.tensorchord.domain: https://moss-jmclvinro4plugtp.modelz.tech\n      ai.tensorchord.source: docker\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\n    image: docker.io/starkind/moss:v1.11\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: moss\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-ada-l4-8c-32g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: c5e72dcd-bee6-4b82-b08b-47cc9f4a2c1c\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: 6500m\n        memory: 24Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-05-15T03:41:02Z\"\n    generation: 3\n    labels:\n      inference: 0b1e753e-9703-41c8-9311-b6bb943bcbda\n    name: 0b1e753e-9703-41c8-9311-b6bb943bcbda\n    namespace: modelz-a9d660cf-2537-4e48-bcaf-adf8470a83c0\n    resourceVersion: \"60441705\"\n    uid: ee8818d1-1e46-4d0b-bab3-3b1893ba4703\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stable-diffusion:23.04.4\n      ai.tensorchord.domain: https://sd-uif34dg3x17kb21j.modelz.tech\n      ai.tensorchord.inference.spec: '{\"name\":\"0b1e753e-9703-41c8-9311-b6bb943bcbda\",\"image\":\"us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stable-diffusion:23.04.4\",\"envVars\":{\"GRADIO_SERVER_NAME\":\"0.0.0.0\",\"GRADIO_SERVER_PORT\":\"7860\",\"HF_ENDPOINT\":\"http://hfserver.default:8080\"},\"constraints\":[\"ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\"],\"labels\":{\"ai.tensorchord.framework\":\"gradio\",\"ai.tensorchord.name\":\"sd\",\"ai.tensorchord.port\":\"7860\",\"ai.tensorchord.region\":\"us-central1\",\"ai.tensorchord.scale.max\":\"1\",\"ai.tensorchord.scale.min\":\"0\",\"ai.tensorchord.scale.target\":\"10\",\"ai.tensorchord.scale.type\":\"capacity\",\"ai.tensorchord.scale.zero-duration\":\"5m0s\",\"ai.tensorchord.server-resource\":\"nvidia-ada-l4-8c-32g\",\"ai.tensorchord.startup-duration\":\"10m0s\",\"ai.tensorchord.vendor\":\"gcp\",\"app\":\"0b1e753e-9703-41c8-9311-b6bb943bcbda\",\"controller\":\"0b1e753e-9703-41c8-9311-b6bb943bcbda\",\"inference\":\"0b1e753e-9703-41c8-9311-b6bb943bcbda\"},\"annotations\":{\"ai.tensorchord.docker.image\":\"us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stable-diffusion:23.04.4\",\"ai.tensorchord.domain\":\"https://sd-uif34dg3x17kb21j.modelz.tech\",\"ai.tensorchord.inference.spec\":\"{\\\"name\\\":\\\"0b1e753e-9703-41c8-9311-b6bb943bcbda\\\",\\\"image\\\":\\\"us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stable-diffusion:23.04.4\\\",\\\"constraints\\\":[\\\"ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\\\"],\\\"labels\\\":{\\\"ai.tensorchord.framework\\\":\\\"gradio\\\",\\\"ai.tensorchord.name\\\":\\\"sd\\\",\\\"ai.tensorchord.port\\\":\\\"7860\\\",\\\"ai.tensorchord.region\\\":\\\"us-central1\\\",\\\"ai.tensorchord.scale.max\\\":\\\"1\\\",\\\"ai.tensorchord.scale.min\\\":\\\"0\\\",\\\"ai.tensorchord.scale.target\\\":\\\"10\\\",\\\"ai.tensorchord.scale.type\\\":\\\"capacity\\\",\\\"ai.tensorchord.scale.zero-duration\\\":\\\"5m0s\\\",\\\"ai.tensorchord.server-resource\\\":\\\"nvidia-ada-l4-8c-32g\\\",\\\"ai.tensorchord.startup-duration\\\":\\\"10m0s\\\",\\\"ai.tensorchord.vendor\\\":\\\"gcp\\\"},\\\"annotations\\\":{\\\"ai.tensorchord.docker.image\\\":\\\"us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stable-diffusion:23.04.4\\\",\\\"ai.tensorchord.domain\\\":\\\"https://sd-uif34dg3x17kb21j.modelz.tech\\\",\\\"ai.tensorchord.source\\\":\\\"docker\\\"},\\\"resources\\\":{\\\"limits\\\":{\\\"nvidia.com/gpu\\\":\\\"1\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"6500m\\\",\\\"memory\\\":\\\"24Gi\\\",\\\"nvidia.com/gpu\\\":\\\"1\\\"}}}\",\"ai.tensorchord.source\":\"docker\",\"prometheus.io.scrape\":\"false\"},\"resources\":{\"limits\":{\"nvidia.com/gpu\":\"1\"},\"requests\":{\"cpu\":\"6500m\",\"memory\":\"24Gi\",\"nvidia.com/gpu\":\"1\"}}}'\n      ai.tensorchord.source: docker\n      prometheus.io.scrape: \"false\"\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\n    envVars:\n      GRADIO_SERVER_NAME: 0.0.0.0\n      GRADIO_SERVER_PORT: \"7860\"\n      HF_ENDPOINT: http://hfserver.default:8080\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stable-diffusion:23.04.4\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: sd\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-ada-l4-8c-32g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n      app: 0b1e753e-9703-41c8-9311-b6bb943bcbda\n      controller: 0b1e753e-9703-41c8-9311-b6bb943bcbda\n      inference: 0b1e753e-9703-41c8-9311-b6bb943bcbda\n    name: 0b1e753e-9703-41c8-9311-b6bb943bcbda\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: 6500m\n        memory: 24Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-05-18T11:50:26Z\"\n    generation: 1\n    labels:\n      inference: 2fb46857-d0ea-4237-8165-4defbe037bbd\n    name: 2fb46857-d0ea-4237-8165-4defbe037bbd\n    namespace: modelz-a9d660cf-2537-4e48-bcaf-adf8470a83c0\n    resourceVersion: \"62499285\"\n    uid: 7edbbfe2-809f-4196-9ac1-fc47f731e817\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-whisper:23.04.1\n      ai.tensorchord.domain: https://ws-c2om23pvkpaeirf9.modelz.tech\n      ai.tensorchord.source: docker\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\n    envVars:\n      HF_HUB_OFFLINE: \"true\"\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-whisper:23.04.1\n    labels:\n      ai.tensorchord.framework: mosec\n      ai.tensorchord.name: ws\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-ada-l4-8c-32g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 2fb46857-d0ea-4237-8165-4defbe037bbd\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: 6500m\n        memory: 24Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-02T12:55:10Z\"\n    generation: 1\n    labels:\n      inference: 41d0d82e-31e4-4704-9e0d-49eec10a455b\n    name: 41d0d82e-31e4-4704-9e0d-49eec10a455b\n    namespace: modelz-a9d660cf-2537-4e48-bcaf-adf8470a83c0\n    resourceVersion: \"76837944\"\n    uid: b562acde-3849-4ecb-ac8e-30ba5252c7a5\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-stable-diffusion:23.04.1\n      ai.tensorchord.domain: https://mosec-sd-3n7j7i3oh28sp2jw.modelz.tech\n      ai.tensorchord.source: docker\n      ai.tensorchord.template-id: 0d603dd5-6d74-4e94-bc70-e5f223dd9d81\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-tesla-t4-4c-16g\n    envVars:\n      HF_HUB_OFFLINE: \"true\"\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-stable-diffusion:23.04.1\n    labels:\n      ai.tensorchord.framework: mosec\n      ai.tensorchord.name: mosec-sd\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-tesla-t4-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 41d0d82e-31e4-4704-9e0d-49eec10a455b\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: \"3\"\n        memory: 12Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-02T12:53:20Z\"\n    generation: 1\n    labels:\n      inference: 431e5913-f5f3-45dd-a1b3-139917a07cf9\n    name: 431e5913-f5f3-45dd-a1b3-139917a07cf9\n    namespace: modelz-a9d660cf-2537-4e48-bcaf-adf8470a83c0\n    resourceVersion: \"76836748\"\n    uid: 682dec57-3eb6-4925-af3b-c56a81073009\n  spec:\n    annotations:\n      ai.tensorchord.command: python app.py\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-ramananth1-dolly-v2:23.05.1\n      ai.tensorchord.domain: https://ddddd2-wqkeotxfansceqp2.modelz.tech\n      ai.tensorchord.source: docker\n      ai.tensorchord.template-id: 074852a9-b324-4c02-95b1-da06ec98a964\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-ramananth1-dolly-v2:23.05.1\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: ddddd2\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-ada-l4-8c-32g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 431e5913-f5f3-45dd-a1b3-139917a07cf9\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: 6500m\n        memory: 24Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-02T07:38:54Z\"\n    generation: 1\n    labels:\n      inference: 660a2a0a-d9b1-4b86-823b-4dc86f89cd02\n    name: 660a2a0a-d9b1-4b86-823b-4dc86f89cd02\n    namespace: modelz-a9d660cf-2537-4e48-bcaf-adf8470a83c0\n    resourceVersion: \"76628665\"\n    uid: f89d820c-7c7e-43d3-a73b-cafe57627006\n  spec:\n    annotations:\n      ai.tensorchord.command: python app.py\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-ramananth1-dolly-v2:23.05.1\n      ai.tensorchord.domain: https://ddd-oky85wto4kxp0uyz.modelz.tech\n      ai.tensorchord.source: docker\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-ramananth1-dolly-v2:23.05.1\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: ddd\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-ada-l4-8c-32g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 660a2a0a-d9b1-4b86-823b-4dc86f89cd02\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: 6500m\n        memory: 24Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-02T07:29:34Z\"\n    generation: 1\n    labels:\n      inference: 744ef0a4-51ea-438c-b013-f5c3b1c1548f\n    name: 744ef0a4-51ea-438c-b013-f5c3b1c1548f\n    namespace: modelz-a9d660cf-2537-4e48-bcaf-adf8470a83c0\n    resourceVersion: \"76622421\"\n    uid: a7ae0743-e166-4591-aec0-33a23101506d\n  spec:\n    annotations:\n      ai.tensorchord.command: python app.py\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-ramananth1-dolly-v2:23.05.1\n      ai.tensorchord.domain: https://dollyv2-9fhuv221e3py7j8t.modelz.tech\n      ai.tensorchord.source: docker\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-ramananth1-dolly-v2:23.05.1\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: dollyv2\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-ada-l4-8c-32g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 744ef0a4-51ea-438c-b013-f5c3b1c1548f\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: 6500m\n        memory: 24Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-02T07:30:30Z\"\n    generation: 1\n    labels:\n      inference: aeaf50f9-1b7e-4a37-87a1-6947a65fc07c\n    name: aeaf50f9-1b7e-4a37-87a1-6947a65fc07c\n    namespace: modelz-a9d660cf-2537-4e48-bcaf-adf8470a83c0\n    resourceVersion: \"76623043\"\n    uid: 8ce0ace2-a710-4ba0-96ad-be2f100ad8fb\n  spec:\n    annotations:\n      ai.tensorchord.command: python app.py\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-ramananth1-dolly-v2:23.05.1\n      ai.tensorchord.domain: https://dollyv2-l3idk5o2yhzpgqmc.modelz.tech\n      ai.tensorchord.source: docker\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-ramananth1-dolly-v2:23.05.1\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: dollyv2\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-ada-l4-8c-32g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: aeaf50f9-1b7e-4a37-87a1-6947a65fc07c\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: 6500m\n        memory: 24Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-27T04:20:35Z\"\n    generation: 1\n    labels:\n      inference: 8ed9973e-8bf0-45d3-8f9a-24a228e1a1f9\n    name: 8ed9973e-8bf0-45d3-8f9a-24a228e1a1f9\n    namespace: modelz-ba5ce029-180a-445d-89a8-bb111b29c0fe\n    resourceVersion: \"98647435\"\n    uid: 92b09491-086f-4be0-baf1-c4a6d82cb2c5\n  spec:\n    annotations:\n      ai.tensorchord.command: python app.py\n      ai.tensorchord.domain: https://test-y5qie3qkgqba99fw.modelz.tech\n      ai.tensorchord.huggingface.space: https://huggingface.co/spaces/abidlabs/en2fr\n      ai.tensorchord.source: huggingface\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-tesla-t4-4c-16g\n    image: registry.hf.space/abidlabs-en2fr:latest\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: test\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"1\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-tesla-t4-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 8ed9973e-8bf0-45d3-8f9a-24a228e1a1f9\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: \"3\"\n        memory: 12Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-15T06:59:52Z\"\n    generation: 1\n    labels:\n      inference: b33863ba-9c0f-4edb-8f95-00a522a0c335\n    name: b33863ba-9c0f-4edb-8f95-00a522a0c335\n    namespace: modelz-ba5ce029-180a-445d-89a8-bb111b29c0fe\n    resourceVersion: \"88434776\"\n    uid: 48cf0cdc-defa-4986-ad55-13a4769252e3\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stable-diffusion:23.04.4\n      ai.tensorchord.domain: https://test-gradio-yf9ws7k1yimrgrcd.modelz.tech\n      ai.tensorchord.source: docker\n      ai.tensorchord.template-id: 9c0137e7-5732-49f9-b6bd-e920f1c6e4d4\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-tesla-t4-4c-16g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stable-diffusion:23.04.4\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: test-gradio\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-tesla-t4-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: b33863ba-9c0f-4edb-8f95-00a522a0c335\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: \"3\"\n        memory: 12Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-05T03:27:22Z\"\n    generation: 1\n    labels:\n      inference: 0741b1e5-f556-439d-adc7-9cba932d2d73\n    name: 0741b1e5-f556-439d-adc7-9cba932d2d73\n    namespace: modelz-cd4a928c-7c66-4934-beb2-98dd82d672ce\n    resourceVersion: \"79293297\"\n    uid: e2183d12-afa4-443c-ab59-01af0f581aba\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-whisper:23.04.1\n      ai.tensorchord.domain: https://whisper-9yx10apmsgdtb0di.modelz.tech\n      ai.tensorchord.source: docker\n      ai.tensorchord.template-id: c2974e48-7fc3-4690-8910-a2e98abc82d1\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-tesla-t4-4c-16g\n    envVars:\n      HF_HUB_OFFLINE: \"true\"\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-whisper:23.04.1\n    labels:\n      ai.tensorchord.framework: mosec\n      ai.tensorchord.name: whisper\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-tesla-t4-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 0741b1e5-f556-439d-adc7-9cba932d2d73\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: \"3\"\n        memory: 12Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-05T03:25:48Z\"\n    generation: 1\n    labels:\n      inference: 3c2328a1-4c61-41b8-b670-6df5813d8c51\n    name: 3c2328a1-4c61-41b8-b670-6df5813d8c51\n    namespace: modelz-cd4a928c-7c66-4934-beb2-98dd82d672ce\n    resourceVersion: \"79292168\"\n    uid: f0da8e43-2235-4910-98d2-76621fdd2e4a\n  spec:\n    annotations:\n      ai.tensorchord.command: python app.py\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-ramananth1-dolly-v2:23.05.1\n      ai.tensorchord.domain: https://dolly-p52q4f5qhpnayw00.modelz.tech\n      ai.tensorchord.source: docker\n      ai.tensorchord.template-id: 10a47537-d1cc-4aa5-a9bd-173060cf812e\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-ramananth1-dolly-v2:23.05.1\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: dolly\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-ada-l4-8c-32g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 3c2328a1-4c61-41b8-b670-6df5813d8c51\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: 6500m\n        memory: 24Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-29T03:27:54Z\"\n    generation: 1\n    labels:\n      inference: 480f37dc-57b5-4677-a2e7-9e34621dc4e3\n    name: 480f37dc-57b5-4677-a2e7-9e34621dc4e3\n    namespace: modelz-cd4a928c-7c66-4934-beb2-98dd82d672ce\n    resourceVersion: \"100326009\"\n    uid: fc090985-3971-41f3-8777-6785ea328fa5\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-tts-vits:23.06.7\n      ai.tensorchord.domain: https://vits-bench-f5sn326og2gb20lv.modelz.tech\n      ai.tensorchord.source: docker\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-tesla-t4-4c-16g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-tts-vits:23.06.7\n    labels:\n      ai.tensorchord.framework: mosec\n      ai.tensorchord.name: vits-bench\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"3\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"5\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 1m0s\n      ai.tensorchord.server-resource: nvidia-tesla-t4-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 480f37dc-57b5-4677-a2e7-9e34621dc4e3\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: \"3\"\n        memory: 12Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-05T03:26:49Z\"\n    generation: 1\n    labels:\n      inference: 5f10d4e3-e9b0-46e1-9265-6d2ecb98930e\n    name: 5f10d4e3-e9b0-46e1-9265-6d2ecb98930e\n    namespace: modelz-cd4a928c-7c66-4934-beb2-98dd82d672ce\n    resourceVersion: \"79292878\"\n    uid: d77bb280-b397-4e41-a2f5-9bf180f61596\n  spec:\n    annotations:\n      ai.tensorchord.command: python app.py\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stabilityai-stablelm-tuned-alpha-chat:23.05.1\n      ai.tensorchord.domain: https://stablelm-7p1b95fcxhyeiriy.modelz.tech\n      ai.tensorchord.source: docker\n      ai.tensorchord.template-id: 64462bf4-2373-4273-ae76-433707410bcd\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stabilityai-stablelm-tuned-alpha-chat:23.05.1\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: stablelm\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-ada-l4-8c-32g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 5f10d4e3-e9b0-46e1-9265-6d2ecb98930e\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: 6500m\n        memory: 24Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-05T03:27:12Z\"\n    generation: 1\n    labels:\n      inference: 81343170-1688-491c-a139-87037f3b8dcd\n    name: 81343170-1688-491c-a139-87037f3b8dcd\n    namespace: modelz-cd4a928c-7c66-4934-beb2-98dd82d672ce\n    resourceVersion: \"79293174\"\n    uid: 8d63f8de-7bf6-40a8-8997-fa69fce037d0\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-visual-chatgpt:23.04.1\n      ai.tensorchord.domain: https://visual-chatgpt-0e0iez8r17xzxksf.modelz.tech\n      ai.tensorchord.source: docker\n      ai.tensorchord.template-id: 78100725-18ca-41e9-b7a8-7ac573f6280b\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-tesla-t4-4c-16g\n    envVars:\n      OPENAI_API_KEY: a\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-visual-chatgpt:23.04.1\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: visual-chatgpt\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-tesla-t4-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 81343170-1688-491c-a139-87037f3b8dcd\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: \"3\"\n        memory: 12Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-09T10:50:48Z\"\n    generation: 1\n    labels:\n      inference: 8e365318-53ed-449e-ac15-f7ab8750d79f\n    name: 8e365318-53ed-449e-ac15-f7ab8750d79f\n    namespace: modelz-cd4a928c-7c66-4934-beb2-98dd82d672ce\n    resourceVersion: \"83183128\"\n    uid: 5fca43a9-a9e4-4bd0-8b9b-5ffa5b730ab8\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/llm-bloomz-560m:23.06.12\n      ai.tensorchord.domain: https://a-k7l6t2w8osvf4trq.modelz.tech\n      ai.tensorchord.source: docker\n      ai.tensorchord.template-id: 92721d83-2dba-460f-9bde-b7eee4ea4950\n    constraints:\n    - ai.tensorchord.server-resource=cpu-4c-16g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/llm-bloomz-560m:23.06.12\n    labels:\n      ai.tensorchord.framework: other\n      ai.tensorchord.name: a\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: cpu-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: 8e365318-53ed-449e-ac15-f7ab8750d79f\n    resources:\n      requests:\n        cpu: \"3\"\n        memory: 8Gi\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-19T09:15:48Z\"\n    generation: 1\n    labels:\n      inference: c6637a2e-0812-4600-b9d5-d1fbc01a68f8\n    name: c6637a2e-0812-4600-b9d5-d1fbc01a68f8\n    namespace: modelz-cd4a928c-7c66-4934-beb2-98dd82d672ce\n    resourceVersion: \"92057903\"\n    uid: b59cdece-e083-40bc-8a39-25d30ad9a753\n  spec:\n    annotations:\n      ai.tensorchord.command: python app.py\n      ai.tensorchord.domain: https://hf-9oln0cnymw5eyzzf.modelz.tech\n      ai.tensorchord.huggingface.space: https://huggingface.co/spaces/HuggingFaceH4/falcon-chat\n      ai.tensorchord.source: huggingface\n    constraints:\n    - ai.tensorchord.server-resource=cpu-4c-16g\n    image: registry.hf.space/huggingfaceh4-falcon-chat:latest\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: hf\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: cpu-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: c6637a2e-0812-4600-b9d5-d1fbc01a68f8\n    resources:\n      requests:\n        cpu: \"3\"\n        memory: 8Gi\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-05T03:26:36Z\"\n    generation: 1\n    labels:\n      inference: c7f18c10-4c99-471c-b9d3-dd906e02cc7f\n    name: c7f18c10-4c99-471c-b9d3-dd906e02cc7f\n    namespace: modelz-cd4a928c-7c66-4934-beb2-98dd82d672ce\n    resourceVersion: \"79292725\"\n    uid: 36d9a032-2cda-4abb-ad8e-aad182e6850f\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stable-diffusion:23.04.4\n      ai.tensorchord.domain: https://sd-web-co4ubmbo9k6c751q.modelz.tech\n      ai.tensorchord.source: docker\n      ai.tensorchord.template-id: 755b5ba6-ce50-4ef9-bbe0-e00c3cecb380\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-tesla-t4-4c-16g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-stable-diffusion:23.04.4\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: sd-web\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-tesla-t4-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: c7f18c10-4c99-471c-b9d3-dd906e02cc7f\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: \"3\"\n        memory: 12Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-05T03:26:16Z\"\n    generation: 1\n    labels:\n      inference: d0c06992-94b5-4a99-90b4-a9900335acfe\n    name: d0c06992-94b5-4a99-90b4-a9900335acfe\n    namespace: modelz-cd4a928c-7c66-4934-beb2-98dd82d672ce\n    resourceVersion: \"79292483\"\n    uid: 42b1485b-9ed6-49ed-81b7-fb9742931300\n  spec:\n    annotations:\n      ai.tensorchord.command: python app.py\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-microsoft-hugginggpt:23.05.1\n      ai.tensorchord.domain: https://huggingpt-5vi5th3l7llhgq4h.modelz.tech\n      ai.tensorchord.source: docker\n      ai.tensorchord.template-id: 8dbec890-ff7b-467e-ada2-e37f055f5c4f\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-ada-l4-8c-32g\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/gradio-microsoft-hugginggpt:23.05.1\n    labels:\n      ai.tensorchord.framework: gradio\n      ai.tensorchord.name: huggingpt\n      ai.tensorchord.port: \"7860\"\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-ada-l4-8c-32g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: d0c06992-94b5-4a99-90b4-a9900335acfe\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: 6500m\n        memory: 24Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-06-05T03:26:26Z\"\n    generation: 1\n    labels:\n      inference: f3d6bedf-447d-4f9b-b27f-1f03f43a1f97\n    name: f3d6bedf-447d-4f9b-b27f-1f03f43a1f97\n    namespace: modelz-cd4a928c-7c66-4934-beb2-98dd82d672ce\n    resourceVersion: \"79292608\"\n    uid: 68ee40dd-6366-4229-941a-16a5c019956d\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-stable-diffusion:23.04.1\n      ai.tensorchord.domain: https://sd-rl41fip7qafvrjzl.modelz.tech\n      ai.tensorchord.source: docker\n      ai.tensorchord.template-id: 6fe4cfb5-4e8b-4d34-ba03-de534343361e\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-tesla-t4-4c-16g\n    envVars:\n      HF_HUB_OFFLINE: \"true\"\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-stable-diffusion:23.04.1\n    labels:\n      ai.tensorchord.framework: mosec\n      ai.tensorchord.name: sd\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"1\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-tesla-t4-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n    name: f3d6bedf-447d-4f9b-b27f-1f03f43a1f97\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: \"3\"\n        memory: 12Gi\n        nvidia.com/gpu: \"1\"\n- apiVersion: tensorchord.ai/v1\n  kind: Inference\n  metadata:\n    creationTimestamp: \"2023-05-26T08:34:40Z\"\n    generation: 2\n    labels:\n      inference: 94e62a6d-adde-4ff2-9053-7b15b6f18727\n    name: 94e62a6d-adde-4ff2-9053-7b15b6f18727\n    namespace: modelz-d3524a71-c17c-4c92-8faf-8603f02f4713\n    resourceVersion: \"99387184\"\n    uid: 5b11d701-b474-4cad-8fff-bf107c5b3992\n  spec:\n    annotations:\n      ai.tensorchord.docker.image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-whisper:23.04.1\n      ai.tensorchord.domain: https://demo-e9ijwrk2il0kvzl7.modelz.tech\n      ai.tensorchord.inference.spec: '{\"name\":\"94e62a6d-adde-4ff2-9053-7b15b6f18727\",\"image\":\"us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-whisper:23.04.1\",\"envVars\":{\"HF_HUB_OFFLINE\":\"true\"},\"constraints\":[\"ai.tensorchord.server-resource=nvidia-tesla-t4-4c-16g\"],\"labels\":{\"ai.tensorchord.framework\":\"mosec\",\"ai.tensorchord.name\":\"demo\",\"ai.tensorchord.region\":\"us-central1\",\"ai.tensorchord.scale.max\":\"1\",\"ai.tensorchord.scale.min\":\"0\",\"ai.tensorchord.scale.target\":\"10\",\"ai.tensorchord.scale.type\":\"capacity\",\"ai.tensorchord.scale.zero-duration\":\"5m0s\",\"ai.tensorchord.server-resource\":\"nvidia-tesla-t4-4c-16g\",\"ai.tensorchord.startup-duration\":\"10m0s\",\"ai.tensorchord.vendor\":\"gcp\"},\"annotations\":{\"ai.tensorchord.docker.image\":\"us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-whisper:23.04.1\",\"ai.tensorchord.domain\":\"https://demo-e9ijwrk2il0kvzl7.modelz.tech\",\"ai.tensorchord.source\":\"docker\"},\"resources\":{\"limits\":{\"nvidia.com/gpu\":\"1\"},\"requests\":{\"cpu\":\"3\",\"memory\":\"12Gi\",\"nvidia.com/gpu\":\"1\"}}}'\n      ai.tensorchord.source: docker\n      prometheus.io.scrape: \"false\"\n    constraints:\n    - ai.tensorchord.server-resource=nvidia-tesla-t4-4c-16g\n    envVars:\n      HF_ENDPOINT: http://hfserver.default:8080\n      HF_HUB_OFFLINE: \"true\"\n      MOSEC_PORT: \"8080\"\n    image: us-central1-docker.pkg.dev/nth-guide-378813/modelzai/mosec-whisper:23.04.1\n    labels:\n      ai.tensorchord.framework: mosec\n      ai.tensorchord.name: demo\n      ai.tensorchord.region: us-central1\n      ai.tensorchord.scale.max: \"0\"\n      ai.tensorchord.scale.min: \"0\"\n      ai.tensorchord.scale.target: \"10\"\n      ai.tensorchord.scale.type: capacity\n      ai.tensorchord.scale.zero-duration: 5m0s\n      ai.tensorchord.server-resource: nvidia-tesla-t4-4c-16g\n      ai.tensorchord.startup-duration: 10m0s\n      ai.tensorchord.vendor: gcp\n      app: 94e62a6d-adde-4ff2-9053-7b15b6f18727\n      controller: 94e62a6d-adde-4ff2-9053-7b15b6f18727\n      inference: 94e62a6d-adde-4ff2-9053-7b15b6f18727\n    name: 94e62a6d-adde-4ff2-9053-7b15b6f18727\n    resources:\n      limits:\n        nvidia.com/gpu: \"1\"\n      requests:\n        cpu: \"3\"\n        memory: 12Gi\n        nvidia.com/gpu: \"1\"\nkind: List\nmetadata:\n  resourceVersion: \"\"\n  selfLink: \"\"\n"
  },
  {
    "path": "modelzetes/cmd/modelzetes/main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\tcli \"github.com/urfave/cli/v2\"\n\t\"k8s.io/klog\"\n\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/app\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/version\"\n)\n\nfunc run(args []string) error {\n\tcli.VersionPrinter = func(c *cli.Context) {\n\t\tfmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision)\n\t}\n\tklog.InitFlags(nil)\n\n\ta := app.New()\n\treturn a.Run(args)\n}\n\nfunc handleErr(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tklog.Error(err)\n\tos.Exit(1)\n}\n\nfunc main() {\n\terr := run(os.Args)\n\thandleErr(err)\n}\n"
  },
  {
    "path": "modelzetes/hack/boilerplate.go.txt",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n"
  },
  {
    "path": "modelzetes/hack/print-codegen-version.sh",
    "content": "#!/bin/bash\n\n# This scripts exists primarily so that it can be used in the Makefile.\n# It is needed because the `($shell ...)` command was having issues with the pipe.\n# Extracting it to a script was the simplest solution.\n\ngrep 'k8s.io/code-generator' go.mod | awk '{print $2}'\n"
  },
  {
    "path": "modelzetes/hack/update-codegen.sh",
    "content": "#!/usr/bin/env bash\n\n# copied from: https://github.com/weaveworks/flagger/tree/master/hack\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\nSCRIPT_ROOT=$(git rev-parse --show-toplevel)/modelzetes\n\necho \">> SCRIPT_ROOT ${SCRIPT_ROOT}\"\n\nGET_PKG_LOCATION() {\n  pkg_name=\"${1:-}\"\n\n  pkg_location=\"$(go list -m -f '{{.Dir}}' \"${pkg_name}\" 2>/dev/null)\"\n  if [ \"${pkg_location}\" = \"\" ]; then\n    echo \"${pkg_name} is missing. Running 'go mod download'.\"\n\n    go mod download\n    pkg_location=$(go list -m -f '{{.Dir}}' \"${pkg_name}\")\n  fi\n  echo \"${pkg_location}\"\n}\n\n# Grab code-generator version from go.sum\nCODEGEN_PKG=\"$(GET_PKG_LOCATION \"k8s.io/code-generator\")\"\necho \">> Using ${CODEGEN_PKG}\"\n\n# Grab openapi-gen version from go.mod\nOPENAPI_PKG=\"$(GET_PKG_LOCATION 'k8s.io/kube-openapi')\"\necho \">> Using ${OPENAPI_PKG}\"\n\necho \">> Using ${CODEGEN_PKG}\"\n\n# code-generator does work with go.mod but makes assumptions about\n# the project living in `$GOPATH/src`. To work around this and support\n# any location; create a temporary directory, use this as an output\n# base, and copy everything back once generated.\nTEMP_DIR=$(mktemp -d)\ncleanup() {\n    echo \">> Removing ${TEMP_DIR}\"\n    rm -rf ${TEMP_DIR}\n}\ntrap \"cleanup\" EXIT SIGINT\n\necho \">> Temporary output directory ${TEMP_DIR}\"\n\n# Ensure we can execute.\nchmod +x ${CODEGEN_PKG}/generate-groups.sh\n\n${CODEGEN_PKG}/generate-groups.sh all \\\n    github.com/tensorchord/openmodelz/modelzetes/pkg/client github.com/tensorchord/openmodelz/modelzetes/pkg/apis \\\n    modelzetes:v2alpha1 \\\n    --output-base \"${TEMP_DIR}\" \\\n    --go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt\n\n# Copy everything back.\ncp -r \"${TEMP_DIR}/github.com/tensorchord/openmodelz/modelzetes/.\" \"${SCRIPT_ROOT}/\"\n"
  },
  {
    "path": "modelzetes/hack/update-crds.sh",
    "content": "#!/bin/bash\n\nexport controllergen=\"$GOPATH/bin/controller-gen\"\nexport PKG=sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0\n\nif [ ! -e \"$controllergen\" ]; then\n  echo \"Getting $PKG\"\n  go install $PKG\nfi\n\n\"$controllergen\" \\\n  crd \\\n  schemapatch:manifests=./artifacts/crds \\\n  paths=./pkg/apis/modelzetes/v2alpha1 \\\n  output:dir=./artifacts/crds\n"
  },
  {
    "path": "modelzetes/hack/verify-codegen.sh",
    "content": "#!/usr/bin/env bash\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\nSCRIPT_ROOT=$(git rev-parse --show-toplevel)/modelzetes\n\nDIFFROOT=\"${SCRIPT_ROOT}/pkg\"\nTMP_DIFFROOT=\"${SCRIPT_ROOT}/_tmp/pkg\"\n_tmp=\"${SCRIPT_ROOT}/_tmp\"\n\ncleanup() {\n  rm -rf \"${_tmp}\"\n}\ntrap \"cleanup\" EXIT SIGINT\n\ncleanup\n\nmkdir -p \"${TMP_DIFFROOT}\"\ncp -a \"${DIFFROOT}\"/* \"${TMP_DIFFROOT}\"\n\n\"${SCRIPT_ROOT}/hack/update-codegen.sh\"\necho \"diffing ${DIFFROOT} against freshly generated codegen\"\nret=0\ndiff -Naupr \"${DIFFROOT}\" \"${TMP_DIFFROOT}\" || ret=$?\ncp -a \"${TMP_DIFFROOT}\"/* \"${DIFFROOT}\"\nif [[ $ret -eq 0 ]]\nthen\n  echo \"${DIFFROOT} up to date.\"\nelse\n  echo \"${DIFFROOT} is out of date. Please run hack/update-codegen.sh\"\n  exit 1\nfi\n"
  },
  {
    "path": "modelzetes/pkg/apis/modelzetes/register.go",
    "content": "package modelzetes\n\nconst (\n\tGroupName = \"tensorchord.ai\"\n)\n"
  },
  {
    "path": "modelzetes/pkg/apis/modelzetes/v2alpha1/doc.go",
    "content": "// +k8s:deepcopy-gen=package,register\n\n// Package v2alpha1 is the modelzetes API.\n// +groupName=tensorchord.ai\npackage v2alpha1\n"
  },
  {
    "path": "modelzetes/pkg/apis/modelzetes/v2alpha1/register.go",
    "content": "package v2alpha1\n\nimport (\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\n\tcontroller \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes\"\n)\n\n// SchemeGroupVersion is group version used to register these objects\nvar SchemeGroupVersion = schema.GroupVersion{Group: controller.GroupName, Version: \"v2alpha1\"}\n\n// Resource takes an unqualified resource and returns a Group qualified GroupResource\nfunc Resource(resource string) schema.GroupResource {\n\treturn SchemeGroupVersion.WithResource(resource).GroupResource()\n}\n\nvar (\n\t// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.\n\tSchemeBuilder      runtime.SchemeBuilder\n\tlocalSchemeBuilder = &SchemeBuilder\n\tAddToScheme        = localSchemeBuilder.AddToScheme\n\tKind               = \"Inference\"\n)\n\nfunc init() {\n\t// We only register manually written functions here. The registration of the\n\t// generated functions takes place in the generated files. The separation\n\t// makes the code compile even when the generated files are missing.\n\tlocalSchemeBuilder.Register(addKnownTypes)\n}\n\n// Adds the list of known types to api.Scheme.\nfunc addKnownTypes(scheme *runtime.Scheme) error {\n\tscheme.AddKnownTypes(SchemeGroupVersion,\n\t\t&Inference{},\n\t\t&InferenceList{},\n\t)\n\tmetav1.AddToGroupVersion(scheme, SchemeGroupVersion)\n\treturn nil\n}\n"
  },
  {
    "path": "modelzetes/pkg/apis/modelzetes/v2alpha1/types.go",
    "content": "package v2alpha1\n\nimport (\n\tv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\n// +genclient\n// +genclient:noStatus\n// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n// +kubebuilder:printcolumn:name=\"Image\",type=string,JSONPath=`.spec.image`\n\n// Inference describes an Inference\ntype Inference struct {\n\tmetav1.TypeMeta   `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n\tSpec InferenceSpec `json:\"spec\"`\n}\n\n// InferenceSpec defines the desired state of Inference\ntype InferenceSpec struct {\n\tName string `json:\"name\"`\n\n\tImage string `json:\"image\"`\n\n\t// Scaling is the scaling configuration for the inference.\n\tScaling *ScalingConfig `json:\"scaling,omitempty\"`\n\n\t// Framework is the inference framework.\n\tFramework Framework `json:\"framework,omitempty\"`\n\n\t// Port is the port exposed by the inference.\n\tPort *int32 `json:\"port,omitempty\"`\n\n\t// HTTPProbePath is the path of the http probe.\n\tHTTPProbePath *string `json:\"http_probe_path,omitempty\"`\n\n\t// Command to run when starting the\n\tCommand *string `json:\"command,omitempty\"`\n\n\t// EnvVars can be provided to set environment variables for the inference runtime.\n\tEnvVars map[string]string `json:\"envVars,omitempty\"`\n\n\t// Constraints are specific to the operator.\n\tConstraints []string `json:\"constraints,omitempty\"`\n\n\t// Secrets list of secrets to be made available to inference\n\tSecrets []string `json:\"secrets,omitempty\"`\n\n\t// Labels are metadata for inferences which may be used by the\n\t// faas-provider or the gateway\n\tLabels map[string]string `json:\"labels,omitempty\"`\n\n\t// Annotations are metadata for inferences which may be used by the\n\t// faas-provider or the gateway\n\tAnnotations map[string]string `json:\"annotations,omitempty\"`\n\n\t// Limits for inference\n\tResources *v1.ResourceRequirements `json:\"resources,omitempty\"`\n}\n\n// Framework is the inference framework. It is only used to set the default port\n// and command. For example, if the framework is \"gradio\", the default port is\n// 7860 and the default command is \"python app.py\". You could override these\n// defaults by setting the port and command fields and framework to `other`.\ntype Framework string\n\nconst (\n\tFrameworkGradio    Framework = \"gradio\"\n\tFrameworkStreamlit Framework = \"streamlit\"\n\tFrameworkMosec     Framework = \"mosec\"\n\tFrameworkOther     Framework = \"other\"\n)\n\ntype ScalingConfig struct {\n\t// MinReplicas is the lower limit for the number of replicas to which the\n\t// autoscaler can scale down. It defaults to 0.\n\tMinReplicas *int32 `json:\"min_replicas,omitempty\"`\n\t// MaxReplicas is the upper limit for the number of replicas to which the\n\t// autoscaler can scale up. It cannot be less that minReplicas. It defaults\n\t// to 1.\n\tMaxReplicas *int32 `json:\"max_replicas,omitempty\"`\n\t// TargetLoad is the target load. In capacity mode, it is the expected number of the inflight requests per replica.\n\tTargetLoad *int32 `json:\"target_load,omitempty\"`\n\t// Type is the scaling type. It can be either \"capacity\" or \"rps\". Default is \"capacity\".\n\tType *ScalingType `json:\"type,omitempty\"`\n\t// ZeroDuration is the duration of zero load before scaling down to zero. Default is 5 minutes.\n\tZeroDuration *int32 `json:\"zero_duration,omitempty\"`\n\t// StartupDuration is the duration of startup time.\n\tStartupDuration *int32 `json:\"startup_duration,omitempty\"`\n}\n\ntype ScalingType string\n\nconst (\n\tScalingTypeCapacity ScalingType = \"capacity\"\n\tScalingTypeRPS      ScalingType = \"rps\"\n)\n\n// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n\n// InferenceList is a list of inference resources\ntype InferenceList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\n\tItems []Inference `json:\"items\"`\n}\n"
  },
  {
    "path": "modelzetes/pkg/apis/modelzetes/v2alpha1/zz_generated.deepcopy.go",
    "content": "//go:build !ignore_autogenerated\n// +build !ignore_autogenerated\n\n/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by deepcopy-gen. DO NOT EDIT.\n\npackage v2alpha1\n\nimport (\n\tv1 \"k8s.io/api/core/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n)\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *Inference) DeepCopyInto(out *Inference) {\n\t*out = *in\n\tout.TypeMeta = in.TypeMeta\n\tin.ObjectMeta.DeepCopyInto(&out.ObjectMeta)\n\tin.Spec.DeepCopyInto(&out.Spec)\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Inference.\nfunc (in *Inference) DeepCopy() *Inference {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Inference)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n\n// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.\nfunc (in *Inference) DeepCopyObject() runtime.Object {\n\tif c := in.DeepCopy(); c != nil {\n\t\treturn c\n\t}\n\treturn nil\n}\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *InferenceList) DeepCopyInto(out *InferenceList) {\n\t*out = *in\n\tout.TypeMeta = in.TypeMeta\n\tin.ListMeta.DeepCopyInto(&out.ListMeta)\n\tif in.Items != nil {\n\t\tin, out := &in.Items, &out.Items\n\t\t*out = make([]Inference, len(*in))\n\t\tfor i := range *in {\n\t\t\t(*in)[i].DeepCopyInto(&(*out)[i])\n\t\t}\n\t}\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceList.\nfunc (in *InferenceList) DeepCopy() *InferenceList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(InferenceList)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n\n// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.\nfunc (in *InferenceList) DeepCopyObject() runtime.Object {\n\tif c := in.DeepCopy(); c != nil {\n\t\treturn c\n\t}\n\treturn nil\n}\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *InferenceSpec) DeepCopyInto(out *InferenceSpec) {\n\t*out = *in\n\tif in.Scaling != nil {\n\t\tin, out := &in.Scaling, &out.Scaling\n\t\t*out = new(ScalingConfig)\n\t\t(*in).DeepCopyInto(*out)\n\t}\n\tif in.Port != nil {\n\t\tin, out := &in.Port, &out.Port\n\t\t*out = new(int32)\n\t\t**out = **in\n\t}\n\tif in.HTTPProbePath != nil {\n\t\tin, out := &in.HTTPProbePath, &out.HTTPProbePath\n\t\t*out = new(string)\n\t\t**out = **in\n\t}\n\tif in.Command != nil {\n\t\tin, out := &in.Command, &out.Command\n\t\t*out = new(string)\n\t\t**out = **in\n\t}\n\tif in.EnvVars != nil {\n\t\tin, out := &in.EnvVars, &out.EnvVars\n\t\t*out = make(map[string]string, len(*in))\n\t\tfor key, val := range *in {\n\t\t\t(*out)[key] = val\n\t\t}\n\t}\n\tif in.Constraints != nil {\n\t\tin, out := &in.Constraints, &out.Constraints\n\t\t*out = make([]string, len(*in))\n\t\tcopy(*out, *in)\n\t}\n\tif in.Secrets != nil {\n\t\tin, out := &in.Secrets, &out.Secrets\n\t\t*out = make([]string, len(*in))\n\t\tcopy(*out, *in)\n\t}\n\tif in.Labels != nil {\n\t\tin, out := &in.Labels, &out.Labels\n\t\t*out = make(map[string]string, len(*in))\n\t\tfor key, val := range *in {\n\t\t\t(*out)[key] = val\n\t\t}\n\t}\n\tif in.Annotations != nil {\n\t\tin, out := &in.Annotations, &out.Annotations\n\t\t*out = make(map[string]string, len(*in))\n\t\tfor key, val := range *in {\n\t\t\t(*out)[key] = val\n\t\t}\n\t}\n\tif in.Resources != nil {\n\t\tin, out := &in.Resources, &out.Resources\n\t\t*out = new(v1.ResourceRequirements)\n\t\t(*in).DeepCopyInto(*out)\n\t}\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceSpec.\nfunc (in *InferenceSpec) DeepCopy() *InferenceSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(InferenceSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *ScalingConfig) DeepCopyInto(out *ScalingConfig) {\n\t*out = *in\n\tif in.MinReplicas != nil {\n\t\tin, out := &in.MinReplicas, &out.MinReplicas\n\t\t*out = new(int32)\n\t\t**out = **in\n\t}\n\tif in.MaxReplicas != nil {\n\t\tin, out := &in.MaxReplicas, &out.MaxReplicas\n\t\t*out = new(int32)\n\t\t**out = **in\n\t}\n\tif in.TargetLoad != nil {\n\t\tin, out := &in.TargetLoad, &out.TargetLoad\n\t\t*out = new(int32)\n\t\t**out = **in\n\t}\n\tif in.Type != nil {\n\t\tin, out := &in.Type, &out.Type\n\t\t*out = new(ScalingType)\n\t\t**out = **in\n\t}\n\tif in.ZeroDuration != nil {\n\t\tin, out := &in.ZeroDuration, &out.ZeroDuration\n\t\t*out = new(int32)\n\t\t**out = **in\n\t}\n\tif in.StartupDuration != nil {\n\t\tin, out := &in.StartupDuration, &out.StartupDuration\n\t\t*out = new(int32)\n\t\t**out = **in\n\t}\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingConfig.\nfunc (in *ScalingConfig) DeepCopy() *ScalingConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScalingConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n"
  },
  {
    "path": "modelzetes/pkg/app/config.go",
    "content": "package app\n\nimport (\n\tcli \"github.com/urfave/cli/v2\"\n\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/config\"\n)\n\nfunc configFromCLI(c *cli.Context) config.Config {\n\tcfg := config.Config{}\n\n\t// kubernetes\n\tcfg.KubeConfig.Kubeconfig = c.String(flagKubeConfig)\n\tcfg.KubeConfig.MasterURL = c.String(flagMasterURL)\n\tcfg.KubeConfig.QPS = c.Int(flagQPS)\n\tcfg.KubeConfig.Burst = c.Int(flagBurst)\n\tcfg.KubeConfig.ResyncPeriod = c.Duration(flagResyncPeriod)\n\n\t// controller\n\tcfg.Controller.ThreadCount = c.Int(flagControllerThreads)\n\n\t// metrics\n\tcfg.Metrics.ServerPort = c.Int(flagMetricsServerPort)\n\n\t// huggingface\n\tcfg.HuggingfaceProxy.Endpoint = c.String(flagHuggingfaceEndpoint)\n\n\t// probes\n\tcfg.Probes.Readiness.InitialDelaySeconds = c.Int(flagProbeReadinessInitialDelaySeconds)\n\tcfg.Probes.Readiness.PeriodSeconds = c.Int(flagProbeReadinessPeriodSeconds)\n\tcfg.Probes.Readiness.TimeoutSeconds = c.Int(flagProbeReadinessTimeoutSeconds)\n\n\tcfg.Probes.Liveness.InitialDelaySeconds = c.Int(flagProbeLivenessInitialDelaySeconds)\n\tcfg.Probes.Liveness.PeriodSeconds = c.Int(flagProbeLivenessPeriodSeconds)\n\tcfg.Probes.Liveness.TimeoutSeconds = c.Int(flagProbeLivenessTimeoutSeconds)\n\n\tcfg.Probes.Startup.InitialDelaySeconds = c.Int(flagProbeStartupInitialDelaySeconds)\n\tcfg.Probes.Startup.PeriodSeconds = c.Int(flagProbeStartupPeriodSeconds)\n\tcfg.Probes.Startup.TimeoutSeconds = c.Int(flagProbeStartupTimeoutSeconds)\n\n\t// inference\n\tcfg.Inference.ImagePullPolicy = c.String(flagInferenceImagePullPolicy)\n\tcfg.Inference.SetUpRuntimeClassNvidia = c.Bool(flagInferenceSetUpRuntimeClassNvidia)\n\treturn cfg\n}\n"
  },
  {
    "path": "modelzetes/pkg/app/root.go",
    "content": "// This Source Code Form is subject to the terms of the Mozilla Public\n// License, v. 2.0. If a copy of the MPL was not distributed with this\n// file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\npackage app\n\nimport (\n\t\"flag\"\n\t\"time\"\n\n\t\"github.com/cockroachdb/errors\"\n\tcli \"github.com/urfave/cli/v2\"\n\t\"k8s.io/klog\"\n\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/controller\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/signals\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/version\"\n)\n\nconst (\n\tflagDebug = \"debug\"\n\n\t// metrics\n\tflagMetricsServerPort = \"metrics-server-port\"\n\n\t// kubernetes\n\tflagMasterURL    = \"master-url\"\n\tflagKubeConfig   = \"kube-config\"\n\tflagQPS          = \"kube-qps\"\n\tflagBurst        = \"kube-burst\"\n\tflagResyncPeriod = \"kube-resync-period\"\n\n\t// controller\n\tflagControllerThreads = \"controller-thread-count\"\n\n\t// huggingface\n\tflagHuggingfaceEndpoint = \"huggingface-endpoint\"\n\n\t// probes\n\tflagProbeReadinessInitialDelaySeconds = \"probe-readiness-initial-delay-seconds\"\n\tflagProbeReadinessPeriodSeconds       = \"probe-readiness-period-seconds\"\n\tflagProbeReadinessTimeoutSeconds      = \"probe-readiness-timeout-seconds\"\n\n\tflagProbeLivenessInitialDelaySeconds = \"probe-liveness-initial-delay-seconds\"\n\tflagProbeLivenessPeriodSeconds       = \"probe-liveness-period-seconds\"\n\tflagProbeLivenessTimeoutSeconds      = \"probe-liveness-timeout-seconds\"\n\n\tflagProbeStartupInitialDelaySeconds = \"probe-startup-initial-delay-seconds\"\n\tflagProbeStartupPeriodSeconds       = \"probe-startup-period-seconds\"\n\tflagProbeStartupTimeoutSeconds      = \"probe-startup-timeout-seconds\"\n\n\t// inference\n\tflagInferenceImagePullPolicy         = \"inference-image-pull-policy\"\n\tflagInferenceSetUpRuntimeClassNvidia = \"inference-set-up-runtime-class-nvidia\"\n)\n\ntype App struct {\n\t*cli.App\n}\n\nfunc New() App {\n\tinternalApp := cli.NewApp()\n\tinternalApp.EnableBashCompletion = true\n\tinternalApp.Name = \"modelzetes\"\n\tinternalApp.Usage = \"kubernetes operator for modelz\"\n\tinternalApp.HideHelpCommand = true\n\tinternalApp.HideVersion = false\n\tinternalApp.Version = version.GetVersion().String()\n\tinternalApp.Flags = []cli.Flag{\n\t\t&cli.BoolFlag{\n\t\t\tName:    flagDebug,\n\t\t\tUsage:   \"enable debug output in logs\",\n\t\t\tEnvVars: []string{\"DEBUG\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagMetricsServerPort,\n\t\t\tValue:   8081,\n\t\t\tUsage:   \"port to listen on\",\n\t\t\tEnvVars: []string{\"MODELZETES_SERVER_PORT\"},\n\t\t\tAliases: []string{\"p\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagMasterURL,\n\t\t\tUsage:   \"URL to master for kubernetes cluster\",\n\t\t\tEnvVars: []string{\"MODELZETES_MASTER_URL\"},\n\t\t\tAliases: []string{\"mu\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagKubeConfig,\n\t\t\tUsage:   \"Path to kubeconfig file. If not provided, will use in-cluster config\",\n\t\t\tEnvVars: []string{\"MODELZETES_KUBE_CONFIG\"},\n\t\t\tAliases: []string{\"kc\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagQPS,\n\t\t\tUsage:   \"QPS for kubernetes client\",\n\t\t\tValue:   100,\n\t\t\tEnvVars: []string{\"MODELZETES_KUBE_QPS\"},\n\t\t\tAliases: []string{\"kq\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagBurst,\n\t\t\tValue:   250,\n\t\t\tUsage:   \"Burst for kubernetes client\",\n\t\t\tEnvVars: []string{\"MODELZETES_KUBE_BURST\"},\n\t\t\tAliases: []string{\"kb\"},\n\t\t},\n\t\t&cli.DurationFlag{\n\t\t\tName:    flagResyncPeriod,\n\t\t\tValue:   time.Minute * 5,\n\t\t\tUsage:   \"Resync period for kubernetes client\",\n\t\t\tEnvVars: []string{\"MODELZETES_KUBE_RESYNC_PERIOD\"},\n\t\t\tAliases: []string{\"kr\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagControllerThreads,\n\t\t\tValue:   1,\n\t\t\tUsage:   \"Number of threads to use for controller\",\n\t\t\tEnvVars: []string{\"MODELZETES_CONTROLLER_THREAD_COUNT\"},\n\t\t\tAliases: []string{\"ct\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: flagHuggingfaceEndpoint,\n\t\t\tUsage: \"Endpoint for huggingface modelz API. If not provided, will use \" +\n\t\t\t\t\"https://huggingface.co by default\",\n\t\t\tEnvVars: []string{\"MODELZETES_HUGGINGFACE_ENDPOINT\"},\n\t\t\tAliases: []string{\"he\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagProbeReadinessInitialDelaySeconds,\n\t\t\tValue:   2,\n\t\t\tUsage:   \"Initial delay for readiness probe\",\n\t\t\tEnvVars: []string{\"MODELZETES_PROBE_READINESS_INITIAL_DELAY_SECONDS\"},\n\t\t\tAliases: []string{\"prids\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagProbeReadinessPeriodSeconds,\n\t\t\tValue:   1,\n\t\t\tUsage:   \"Period for readiness probe\",\n\t\t\tEnvVars: []string{\"MODELZETES_PROBE_READINESS_PERIOD_SECONDS\"},\n\t\t\tAliases: []string{\"prps\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagProbeReadinessTimeoutSeconds,\n\t\t\tValue:   1,\n\t\t\tUsage:   \"Timeout for readiness probe\",\n\t\t\tEnvVars: []string{\"MODELZETES_PROBE_READINESS_TIMEOUT_SECONDS\"},\n\t\t\tAliases: []string{\"prts\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagProbeLivenessInitialDelaySeconds,\n\t\t\tValue:   2,\n\t\t\tUsage:   \"Initial delay for liveness probe\",\n\t\t\tEnvVars: []string{\"MODELZETES_PROBE_LIVENESS_INITIAL_DELAY_SECONDS\"},\n\t\t\tAliases: []string{\"plids\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagProbeLivenessPeriodSeconds,\n\t\t\tValue:   1,\n\t\t\tUsage:   \"Period for liveness probe\",\n\t\t\tEnvVars: []string{\"MODELZETES_PROBE_LIVENESS_PERIOD_SECONDS\"},\n\t\t\tAliases: []string{\"plps\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagProbeLivenessTimeoutSeconds,\n\t\t\tValue:   1,\n\t\t\tUsage:   \"Timeout for liveness probe\",\n\t\t\tEnvVars: []string{\"MODELZETES_PROBE_LIVENESS_TIMEOUT_SECONDS\"},\n\t\t\tAliases: []string{\"plts\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagProbeStartupInitialDelaySeconds,\n\t\t\tValue:   0,\n\t\t\tUsage:   \"Initial delay for startup probe\",\n\t\t\tEnvVars: []string{\"MODELZETES_PROBE_STARTUP_INITIAL_DELAY_SECONDS\"},\n\t\t\tAliases: []string{\"psids\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagProbeStartupPeriodSeconds,\n\t\t\tValue:   2,\n\t\t\tUsage:   \"Period for startup probe\",\n\t\t\tEnvVars: []string{\"MODELZETES_PROBE_STARTUP_PERIOD_SECONDS\"},\n\t\t\tAliases: []string{\"psps\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:    flagProbeStartupTimeoutSeconds,\n\t\t\tValue:   1,\n\t\t\tUsage:   \"Timeout for startup probe\",\n\t\t\tEnvVars: []string{\"MODELZETES_PROBE_STARTUP_TIMEOUT_SECONDS\"},\n\t\t\tAliases: []string{\"psts\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:    flagInferenceImagePullPolicy,\n\t\t\tUsage:   \"Image pull policy for inference service.\",\n\t\t\tValue:   \"IfNotPresent\",\n\t\t\tEnvVars: []string{\"MODELZETES_INFERENCE_IMAGE_PULL_POLICY\"},\n\t\t\tAliases: []string{\"iipp\"},\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName:    flagInferenceSetUpRuntimeClassNvidia,\n\t\t\tUsage:   \"If true, will set up the Nvidia RuntimeClassName to the inference deployment.\",\n\t\t\tEnvVars: []string{\"MODELZETES_INFERENCE_SET_UP_RUNTIME_CLASS_NVIDIA\"},\n\t\t},\n\t}\n\tinternalApp.Action = runServer\n\n\t// Deal with debug flag.\n\tvar debugEnabled bool\n\n\tinternalApp.Before = func(context *cli.Context) error {\n\t\tdebugEnabled = context.Bool(flagDebug)\n\n\t\tfs := flag.NewFlagSet(\"\", flag.PanicOnError)\n\t\tklog.InitFlags(fs)\n\n\t\tif debugEnabled {\n\t\t\tfs.Set(\"v\", \"10\")\n\t\t} else {\n\t\t\tfs.Set(\"v\", \"0\")\n\t\t}\n\n\t\treturn nil\n\t}\n\treturn App{\n\t\tApp: internalApp,\n\t}\n}\n\nfunc runServer(clicontext *cli.Context) error {\n\tc := configFromCLI(clicontext)\n\n\tcfgString, _ := c.GetString()\n\tklog.V(0).Info(\"config: \", cfgString)\n\n\tif err := c.Validate(); err != nil {\n\t\tif clicontext.Bool(flagDebug) {\n\t\t\treturn errors.Wrap(err, \"invalid config: \"+cfgString)\n\t\t} else {\n\t\t\treturn errors.Wrap(err, \"invalid config\")\n\t\t}\n\t}\n\n\t// set up signals so we handle the first shutdown signal gracefully\n\tstopCh := signals.SetupSignalHandler()\n\n\ts, err := controller.New(c, stopCh)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create server\")\n\t}\n\n\treturn s.Run(c.Controller.ThreadCount, stopCh)\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/clientset.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage versioned\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\n\ttensorchordv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1\"\n\tdiscovery \"k8s.io/client-go/discovery\"\n\trest \"k8s.io/client-go/rest\"\n\tflowcontrol \"k8s.io/client-go/util/flowcontrol\"\n)\n\ntype Interface interface {\n\tDiscovery() discovery.DiscoveryInterface\n\tTensorchordV2alpha1() tensorchordv2alpha1.TensorchordV2alpha1Interface\n}\n\n// Clientset contains the clients for groups.\ntype Clientset struct {\n\t*discovery.DiscoveryClient\n\ttensorchordV2alpha1 *tensorchordv2alpha1.TensorchordV2alpha1Client\n}\n\n// TensorchordV2alpha1 retrieves the TensorchordV2alpha1Client\nfunc (c *Clientset) TensorchordV2alpha1() tensorchordv2alpha1.TensorchordV2alpha1Interface {\n\treturn c.tensorchordV2alpha1\n}\n\n// Discovery retrieves the DiscoveryClient\nfunc (c *Clientset) Discovery() discovery.DiscoveryInterface {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.DiscoveryClient\n}\n\n// NewForConfig creates a new Clientset for the given config.\n// If config's RateLimiter is not set and QPS and Burst are acceptable,\n// NewForConfig will generate a rate-limiter in configShallowCopy.\n// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),\n// where httpClient was generated with rest.HTTPClientFor(c).\nfunc NewForConfig(c *rest.Config) (*Clientset, error) {\n\tconfigShallowCopy := *c\n\n\tif configShallowCopy.UserAgent == \"\" {\n\t\tconfigShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()\n\t}\n\n\t// share the transport between all clients\n\thttpClient, err := rest.HTTPClientFor(&configShallowCopy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewForConfigAndClient(&configShallowCopy, httpClient)\n}\n\n// NewForConfigAndClient creates a new Clientset for the given config and http client.\n// Note the http client provided takes precedence over the configured transport values.\n// If config's RateLimiter is not set and QPS and Burst are acceptable,\n// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.\nfunc NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {\n\tconfigShallowCopy := *c\n\tif configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {\n\t\tif configShallowCopy.Burst <= 0 {\n\t\t\treturn nil, fmt.Errorf(\"burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0\")\n\t\t}\n\t\tconfigShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)\n\t}\n\n\tvar cs Clientset\n\tvar err error\n\tcs.tensorchordV2alpha1, err = tensorchordv2alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &cs, nil\n}\n\n// NewForConfigOrDie creates a new Clientset for the given config and\n// panics if there is an error in the config.\nfunc NewForConfigOrDie(c *rest.Config) *Clientset {\n\tcs, err := NewForConfig(c)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn cs\n}\n\n// New creates a new Clientset for the given RESTClient.\nfunc New(c rest.Interface) *Clientset {\n\tvar cs Clientset\n\tcs.tensorchordV2alpha1 = tensorchordv2alpha1.New(c)\n\n\tcs.DiscoveryClient = discovery.NewDiscoveryClient(c)\n\treturn &cs\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/doc.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\n// This package has the automatically generated clientset.\npackage versioned\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/fake/clientset_generated.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage fake\n\nimport (\n\tclientset \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned\"\n\ttensorchordv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1\"\n\tfaketensorchordv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1/fake\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\t\"k8s.io/client-go/discovery\"\n\tfakediscovery \"k8s.io/client-go/discovery/fake\"\n\t\"k8s.io/client-go/testing\"\n)\n\n// NewSimpleClientset returns a clientset that will respond with the provided objects.\n// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,\n// without applying any validations and/or defaults. It shouldn't be considered a replacement\n// for a real clientset and is mostly useful in simple unit tests.\nfunc NewSimpleClientset(objects ...runtime.Object) *Clientset {\n\to := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())\n\tfor _, obj := range objects {\n\t\tif err := o.Add(obj); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tcs := &Clientset{tracker: o}\n\tcs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}\n\tcs.AddReactor(\"*\", \"*\", testing.ObjectReaction(o))\n\tcs.AddWatchReactor(\"*\", func(action testing.Action) (handled bool, ret watch.Interface, err error) {\n\t\tgvr := action.GetResource()\n\t\tns := action.GetNamespace()\n\t\twatch, err := o.Watch(gvr, ns)\n\t\tif err != nil {\n\t\t\treturn false, nil, err\n\t\t}\n\t\treturn true, watch, nil\n\t})\n\n\treturn cs\n}\n\n// Clientset implements clientset.Interface. Meant to be embedded into a\n// struct to get a default implementation. This makes faking out just the method\n// you want to test easier.\ntype Clientset struct {\n\ttesting.Fake\n\tdiscovery *fakediscovery.FakeDiscovery\n\ttracker   testing.ObjectTracker\n}\n\nfunc (c *Clientset) Discovery() discovery.DiscoveryInterface {\n\treturn c.discovery\n}\n\nfunc (c *Clientset) Tracker() testing.ObjectTracker {\n\treturn c.tracker\n}\n\nvar (\n\t_ clientset.Interface = &Clientset{}\n\t_ testing.FakeClient  = &Clientset{}\n)\n\n// TensorchordV2alpha1 retrieves the TensorchordV2alpha1Client\nfunc (c *Clientset) TensorchordV2alpha1() tensorchordv2alpha1.TensorchordV2alpha1Interface {\n\treturn &faketensorchordv2alpha1.FakeTensorchordV2alpha1{Fake: &c.Fake}\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/fake/doc.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\n// This package has the automatically generated fake clientset.\npackage fake\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/fake/register.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage fake\n\nimport (\n\ttensorchordv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tserializer \"k8s.io/apimachinery/pkg/runtime/serializer\"\n\tutilruntime \"k8s.io/apimachinery/pkg/util/runtime\"\n)\n\nvar scheme = runtime.NewScheme()\nvar codecs = serializer.NewCodecFactory(scheme)\n\nvar localSchemeBuilder = runtime.SchemeBuilder{\n\ttensorchordv2alpha1.AddToScheme,\n}\n\n// AddToScheme adds all types of this clientset into the given scheme. This allows composition\n// of clientsets, like in:\n//\n//\timport (\n//\t  \"k8s.io/client-go/kubernetes\"\n//\t  clientsetscheme \"k8s.io/client-go/kubernetes/scheme\"\n//\t  aggregatorclientsetscheme \"k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme\"\n//\t)\n//\n//\tkclientset, _ := kubernetes.NewForConfig(c)\n//\t_ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)\n//\n// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types\n// correctly.\nvar AddToScheme = localSchemeBuilder.AddToScheme\n\nfunc init() {\n\tv1.AddToGroupVersion(scheme, schema.GroupVersion{Version: \"v1\"})\n\tutilruntime.Must(AddToScheme(scheme))\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/scheme/doc.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\n// This package contains the scheme of the automatically generated clientset.\npackage scheme\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/scheme/register.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage scheme\n\nimport (\n\ttensorchordv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tserializer \"k8s.io/apimachinery/pkg/runtime/serializer\"\n\tutilruntime \"k8s.io/apimachinery/pkg/util/runtime\"\n)\n\nvar Scheme = runtime.NewScheme()\nvar Codecs = serializer.NewCodecFactory(Scheme)\nvar ParameterCodec = runtime.NewParameterCodec(Scheme)\nvar localSchemeBuilder = runtime.SchemeBuilder{\n\ttensorchordv2alpha1.AddToScheme,\n}\n\n// AddToScheme adds all types of this clientset into the given scheme. This allows composition\n// of clientsets, like in:\n//\n//\timport (\n//\t  \"k8s.io/client-go/kubernetes\"\n//\t  clientsetscheme \"k8s.io/client-go/kubernetes/scheme\"\n//\t  aggregatorclientsetscheme \"k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme\"\n//\t)\n//\n//\tkclientset, _ := kubernetes.NewForConfig(c)\n//\t_ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)\n//\n// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types\n// correctly.\nvar AddToScheme = localSchemeBuilder.AddToScheme\n\nfunc init() {\n\tv1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: \"v1\"})\n\tutilruntime.Must(AddToScheme(Scheme))\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1/doc.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\n// This package has the automatically generated typed clients.\npackage v2alpha1\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1/fake/doc.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\n// Package fake has the automatically generated clients.\npackage fake\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1/fake/fake_inference.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage fake\n\nimport (\n\t\"context\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tlabels \"k8s.io/apimachinery/pkg/labels\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\ttypes \"k8s.io/apimachinery/pkg/types\"\n\twatch \"k8s.io/apimachinery/pkg/watch\"\n\ttesting \"k8s.io/client-go/testing\"\n)\n\n// FakeInferences implements InferenceInterface\ntype FakeInferences struct {\n\tFake *FakeTensorchordV2alpha1\n\tns   string\n}\n\nvar inferencesResource = schema.GroupVersionResource{Group: \"tensorchord.ai\", Version: \"v2alpha1\", Resource: \"inferences\"}\n\nvar inferencesKind = schema.GroupVersionKind{Group: \"tensorchord.ai\", Version: \"v2alpha1\", Kind: \"Inference\"}\n\n// Get takes name of the inference, and returns the corresponding inference object, and an error if there is any.\nfunc (c *FakeInferences) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.Inference, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(inferencesResource, c.ns, name), &v2alpha1.Inference{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v2alpha1.Inference), err\n}\n\n// List takes label and field selectors, and returns the list of Inferences that match those selectors.\nfunc (c *FakeInferences) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.InferenceList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(inferencesResource, inferencesKind, c.ns, opts), &v2alpha1.InferenceList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v2alpha1.InferenceList{ListMeta: obj.(*v2alpha1.InferenceList).ListMeta}\n\tfor _, item := range obj.(*v2alpha1.InferenceList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}\n\n// Watch returns a watch.Interface that watches the requested inferences.\nfunc (c *FakeInferences) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(inferencesResource, c.ns, opts))\n\n}\n\n// Create takes the representation of a inference and creates it.  Returns the server's representation of the inference, and an error, if there is any.\nfunc (c *FakeInferences) Create(ctx context.Context, inference *v2alpha1.Inference, opts v1.CreateOptions) (result *v2alpha1.Inference, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(inferencesResource, c.ns, inference), &v2alpha1.Inference{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v2alpha1.Inference), err\n}\n\n// Update takes the representation of a inference and updates it. Returns the server's representation of the inference, and an error, if there is any.\nfunc (c *FakeInferences) Update(ctx context.Context, inference *v2alpha1.Inference, opts v1.UpdateOptions) (result *v2alpha1.Inference, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(inferencesResource, c.ns, inference), &v2alpha1.Inference{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v2alpha1.Inference), err\n}\n\n// Delete takes name of the inference and deletes it. Returns an error if one occurs.\nfunc (c *FakeInferences) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {\n\t_, err := c.Fake.\n\t\tInvokes(testing.NewDeleteActionWithOptions(inferencesResource, c.ns, name, opts), &v2alpha1.Inference{})\n\n\treturn err\n}\n\n// DeleteCollection deletes a collection of objects.\nfunc (c *FakeInferences) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {\n\taction := testing.NewDeleteCollectionAction(inferencesResource, c.ns, listOpts)\n\n\t_, err := c.Fake.Invokes(action, &v2alpha1.InferenceList{})\n\treturn err\n}\n\n// Patch applies the patch and returns the patched inference.\nfunc (c *FakeInferences) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.Inference, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(inferencesResource, c.ns, name, pt, data, subresources...), &v2alpha1.Inference{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v2alpha1.Inference), err\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1/fake/fake_modelzetes_client.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage fake\n\nimport (\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1\"\n\trest \"k8s.io/client-go/rest\"\n\ttesting \"k8s.io/client-go/testing\"\n)\n\ntype FakeTensorchordV2alpha1 struct {\n\t*testing.Fake\n}\n\nfunc (c *FakeTensorchordV2alpha1) Inferences(namespace string) v2alpha1.InferenceInterface {\n\treturn &FakeInferences{c, namespace}\n}\n\n// RESTClient returns a RESTClient that is used to communicate\n// with API server by this client implementation.\nfunc (c *FakeTensorchordV2alpha1) RESTClient() rest.Interface {\n\tvar ret *rest.RESTClient\n\treturn ret\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1/generated_expansion.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage v2alpha1\n\ntype InferenceExpansion interface{}\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1/inference.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage v2alpha1\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tscheme \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned/scheme\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\ttypes \"k8s.io/apimachinery/pkg/types\"\n\twatch \"k8s.io/apimachinery/pkg/watch\"\n\trest \"k8s.io/client-go/rest\"\n)\n\n// InferencesGetter has a method to return a InferenceInterface.\n// A group's client should implement this interface.\ntype InferencesGetter interface {\n\tInferences(namespace string) InferenceInterface\n}\n\n// InferenceInterface has methods to work with Inference resources.\ntype InferenceInterface interface {\n\tCreate(ctx context.Context, inference *v2alpha1.Inference, opts v1.CreateOptions) (*v2alpha1.Inference, error)\n\tUpdate(ctx context.Context, inference *v2alpha1.Inference, opts v1.UpdateOptions) (*v2alpha1.Inference, error)\n\tDelete(ctx context.Context, name string, opts v1.DeleteOptions) error\n\tDeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error\n\tGet(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.Inference, error)\n\tList(ctx context.Context, opts v1.ListOptions) (*v2alpha1.InferenceList, error)\n\tWatch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)\n\tPatch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.Inference, err error)\n\tInferenceExpansion\n}\n\n// inferences implements InferenceInterface\ntype inferences struct {\n\tclient rest.Interface\n\tns     string\n}\n\n// newInferences returns a Inferences\nfunc newInferences(c *TensorchordV2alpha1Client, namespace string) *inferences {\n\treturn &inferences{\n\t\tclient: c.RESTClient(),\n\t\tns:     namespace,\n\t}\n}\n\n// Get takes name of the inference, and returns the corresponding inference object, and an error if there is any.\nfunc (c *inferences) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.Inference, err error) {\n\tresult = &v2alpha1.Inference{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferences\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}\n\n// List takes label and field selectors, and returns the list of Inferences that match those selectors.\nfunc (c *inferences) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.InferenceList, err error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\tresult = &v2alpha1.InferenceList{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferences\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}\n\n// Watch returns a watch.Interface that watches the requested inferences.\nfunc (c *inferences) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferences\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}\n\n// Create takes the representation of a inference and creates it.  Returns the server's representation of the inference, and an error, if there is any.\nfunc (c *inferences) Create(ctx context.Context, inference *v2alpha1.Inference, opts v1.CreateOptions) (result *v2alpha1.Inference, err error) {\n\tresult = &v2alpha1.Inference{}\n\terr = c.client.Post().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferences\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tBody(inference).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}\n\n// Update takes the representation of a inference and updates it. Returns the server's representation of the inference, and an error, if there is any.\nfunc (c *inferences) Update(ctx context.Context, inference *v2alpha1.Inference, opts v1.UpdateOptions) (result *v2alpha1.Inference, err error) {\n\tresult = &v2alpha1.Inference{}\n\terr = c.client.Put().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferences\").\n\t\tName(inference.Name).\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tBody(inference).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}\n\n// Delete takes name of the inference and deletes it. Returns an error if one occurs.\nfunc (c *inferences) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferences\").\n\t\tName(name).\n\t\tBody(&opts).\n\t\tDo(ctx).\n\t\tError()\n}\n\n// DeleteCollection deletes a collection of objects.\nfunc (c *inferences) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {\n\tvar timeout time.Duration\n\tif listOpts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second\n\t}\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"inferences\").\n\t\tVersionedParams(&listOpts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tBody(&opts).\n\t\tDo(ctx).\n\t\tError()\n}\n\n// Patch applies the patch and returns the patched inference.\nfunc (c *inferences) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.Inference, err error) {\n\tresult = &v2alpha1.Inference{}\n\terr = c.client.Patch(pt).\n\t\tNamespace(c.ns).\n\t\tResource(\"inferences\").\n\t\tName(name).\n\t\tSubResource(subresources...).\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tBody(data).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1/modelzetes_client.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by client-gen. DO NOT EDIT.\n\npackage v2alpha1\n\nimport (\n\t\"net/http\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned/scheme\"\n\trest \"k8s.io/client-go/rest\"\n)\n\ntype TensorchordV2alpha1Interface interface {\n\tRESTClient() rest.Interface\n\tInferencesGetter\n}\n\n// TensorchordV2alpha1Client is used to interact with features provided by the tensorchord.ai group.\ntype TensorchordV2alpha1Client struct {\n\trestClient rest.Interface\n}\n\nfunc (c *TensorchordV2alpha1Client) Inferences(namespace string) InferenceInterface {\n\treturn newInferences(c, namespace)\n}\n\n// NewForConfig creates a new TensorchordV2alpha1Client for the given config.\n// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),\n// where httpClient was generated with rest.HTTPClientFor(c).\nfunc NewForConfig(c *rest.Config) (*TensorchordV2alpha1Client, error) {\n\tconfig := *c\n\tif err := setConfigDefaults(&config); err != nil {\n\t\treturn nil, err\n\t}\n\thttpClient, err := rest.HTTPClientFor(&config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewForConfigAndClient(&config, httpClient)\n}\n\n// NewForConfigAndClient creates a new TensorchordV2alpha1Client for the given config and http client.\n// Note the http client provided takes precedence over the configured transport values.\nfunc NewForConfigAndClient(c *rest.Config, h *http.Client) (*TensorchordV2alpha1Client, error) {\n\tconfig := *c\n\tif err := setConfigDefaults(&config); err != nil {\n\t\treturn nil, err\n\t}\n\tclient, err := rest.RESTClientForConfigAndClient(&config, h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TensorchordV2alpha1Client{client}, nil\n}\n\n// NewForConfigOrDie creates a new TensorchordV2alpha1Client for the given config and\n// panics if there is an error in the config.\nfunc NewForConfigOrDie(c *rest.Config) *TensorchordV2alpha1Client {\n\tclient, err := NewForConfig(c)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn client\n}\n\n// New creates a new TensorchordV2alpha1Client for the given RESTClient.\nfunc New(c rest.Interface) *TensorchordV2alpha1Client {\n\treturn &TensorchordV2alpha1Client{c}\n}\n\nfunc setConfigDefaults(config *rest.Config) error {\n\tgv := v2alpha1.SchemeGroupVersion\n\tconfig.GroupVersion = &gv\n\tconfig.APIPath = \"/apis\"\n\tconfig.NegotiatedSerializer = scheme.Codecs.WithoutConversion()\n\n\tif config.UserAgent == \"\" {\n\t\tconfig.UserAgent = rest.DefaultKubernetesUserAgent()\n\t}\n\n\treturn nil\n}\n\n// RESTClient returns a RESTClient that is used to communicate\n// with API server by this client implementation.\nfunc (c *TensorchordV2alpha1Client) RESTClient() rest.Interface {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.restClient\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/informers/externalversions/factory.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage externalversions\n\nimport (\n\treflect \"reflect\"\n\tsync \"sync\"\n\ttime \"time\"\n\n\tversioned \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned\"\n\tinternalinterfaces \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/informers/externalversions/internalinterfaces\"\n\tmodelzetes \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/informers/externalversions/modelzetes\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\n// SharedInformerOption defines the functional option type for SharedInformerFactory.\ntype SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory\n\ntype sharedInformerFactory struct {\n\tclient           versioned.Interface\n\tnamespace        string\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n\tlock             sync.Mutex\n\tdefaultResync    time.Duration\n\tcustomResync     map[reflect.Type]time.Duration\n\n\tinformers map[reflect.Type]cache.SharedIndexInformer\n\t// startedInformers is used for tracking which informers have been started.\n\t// This allows Start() to be called multiple times safely.\n\tstartedInformers map[reflect.Type]bool\n\t// wg tracks how many goroutines were started.\n\twg sync.WaitGroup\n\t// shuttingDown is true when Shutdown has been called. It may still be running\n\t// because it needs to wait for goroutines.\n\tshuttingDown bool\n}\n\n// WithCustomResyncConfig sets a custom resync period for the specified informer types.\nfunc WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {\n\treturn func(factory *sharedInformerFactory) *sharedInformerFactory {\n\t\tfor k, v := range resyncConfig {\n\t\t\tfactory.customResync[reflect.TypeOf(k)] = v\n\t\t}\n\t\treturn factory\n\t}\n}\n\n// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.\nfunc WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {\n\treturn func(factory *sharedInformerFactory) *sharedInformerFactory {\n\t\tfactory.tweakListOptions = tweakListOptions\n\t\treturn factory\n\t}\n}\n\n// WithNamespace limits the SharedInformerFactory to the specified namespace.\nfunc WithNamespace(namespace string) SharedInformerOption {\n\treturn func(factory *sharedInformerFactory) *sharedInformerFactory {\n\t\tfactory.namespace = namespace\n\t\treturn factory\n\t}\n}\n\n// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.\nfunc NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {\n\treturn NewSharedInformerFactoryWithOptions(client, defaultResync)\n}\n\n// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.\n// Listers obtained via this SharedInformerFactory will be subject to the same filters\n// as specified here.\n// Deprecated: Please use NewSharedInformerFactoryWithOptions instead\nfunc NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {\n\treturn NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))\n}\n\n// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.\nfunc NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {\n\tfactory := &sharedInformerFactory{\n\t\tclient:           client,\n\t\tnamespace:        v1.NamespaceAll,\n\t\tdefaultResync:    defaultResync,\n\t\tinformers:        make(map[reflect.Type]cache.SharedIndexInformer),\n\t\tstartedInformers: make(map[reflect.Type]bool),\n\t\tcustomResync:     make(map[reflect.Type]time.Duration),\n\t}\n\n\t// Apply all options\n\tfor _, opt := range options {\n\t\tfactory = opt(factory)\n\t}\n\n\treturn factory\n}\n\nfunc (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tif f.shuttingDown {\n\t\treturn\n\t}\n\n\tfor informerType, informer := range f.informers {\n\t\tif !f.startedInformers[informerType] {\n\t\t\tf.wg.Add(1)\n\t\t\t// We need a new variable in each loop iteration,\n\t\t\t// otherwise the goroutine would use the loop variable\n\t\t\t// and that keeps changing.\n\t\t\tinformer := informer\n\t\t\tgo func() {\n\t\t\t\tdefer f.wg.Done()\n\t\t\t\tinformer.Run(stopCh)\n\t\t\t}()\n\t\t\tf.startedInformers[informerType] = true\n\t\t}\n\t}\n}\n\nfunc (f *sharedInformerFactory) Shutdown() {\n\tf.lock.Lock()\n\tf.shuttingDown = true\n\tf.lock.Unlock()\n\n\t// Will return immediately if there is nothing to wait for.\n\tf.wg.Wait()\n}\n\nfunc (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {\n\tinformers := func() map[reflect.Type]cache.SharedIndexInformer {\n\t\tf.lock.Lock()\n\t\tdefer f.lock.Unlock()\n\n\t\tinformers := map[reflect.Type]cache.SharedIndexInformer{}\n\t\tfor informerType, informer := range f.informers {\n\t\t\tif f.startedInformers[informerType] {\n\t\t\t\tinformers[informerType] = informer\n\t\t\t}\n\t\t}\n\t\treturn informers\n\t}()\n\n\tres := map[reflect.Type]bool{}\n\tfor informType, informer := range informers {\n\t\tres[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)\n\t}\n\treturn res\n}\n\n// InternalInformerFor returns the SharedIndexInformer for obj using an internal\n// client.\nfunc (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tinformerType := reflect.TypeOf(obj)\n\tinformer, exists := f.informers[informerType]\n\tif exists {\n\t\treturn informer\n\t}\n\n\tresyncPeriod, exists := f.customResync[informerType]\n\tif !exists {\n\t\tresyncPeriod = f.defaultResync\n\t}\n\n\tinformer = newFunc(f.client, resyncPeriod)\n\tf.informers[informerType] = informer\n\n\treturn informer\n}\n\n// SharedInformerFactory provides shared informers for resources in all known\n// API group versions.\n//\n// It is typically used like this:\n//\n//\tctx, cancel := context.Background()\n//\tdefer cancel()\n//\tfactory := NewSharedInformerFactory(client, resyncPeriod)\n//\tdefer factory.WaitForStop()    // Returns immediately if nothing was started.\n//\tgenericInformer := factory.ForResource(resource)\n//\ttypedInformer := factory.SomeAPIGroup().V1().SomeType()\n//\tfactory.Start(ctx.Done())          // Start processing these informers.\n//\tsynced := factory.WaitForCacheSync(ctx.Done())\n//\tfor v, ok := range synced {\n//\t    if !ok {\n//\t        fmt.Fprintf(os.Stderr, \"caches failed to sync: %v\", v)\n//\t        return\n//\t    }\n//\t}\n//\n//\t// Creating informers can also be created after Start, but then\n//\t// Start must be called again:\n//\tanotherGenericInformer := factory.ForResource(resource)\n//\tfactory.Start(ctx.Done())\ntype SharedInformerFactory interface {\n\tinternalinterfaces.SharedInformerFactory\n\n\t// Start initializes all requested informers. They are handled in goroutines\n\t// which run until the stop channel gets closed.\n\tStart(stopCh <-chan struct{})\n\n\t// Shutdown marks a factory as shutting down. At that point no new\n\t// informers can be started anymore and Start will return without\n\t// doing anything.\n\t//\n\t// In addition, Shutdown blocks until all goroutines have terminated. For that\n\t// to happen, the close channel(s) that they were started with must be closed,\n\t// either before Shutdown gets called or while it is waiting.\n\t//\n\t// Shutdown may be called multiple times, even concurrently. All such calls will\n\t// block until all goroutines have terminated.\n\tShutdown()\n\n\t// WaitForCacheSync blocks until all started informers' caches were synced\n\t// or the stop channel gets closed.\n\tWaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool\n\n\t// ForResource gives generic access to a shared informer of the matching type.\n\tForResource(resource schema.GroupVersionResource) (GenericInformer, error)\n\n\t// InternalInformerFor returns the SharedIndexInformer for obj using an internal\n\t// client.\n\tInformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer\n\n\tTensorchord() modelzetes.Interface\n}\n\nfunc (f *sharedInformerFactory) Tensorchord() modelzetes.Interface {\n\treturn modelzetes.New(f, f.namespace, f.tweakListOptions)\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/informers/externalversions/generic.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage externalversions\n\nimport (\n\t\"fmt\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\n// GenericInformer is type of SharedIndexInformer which will locate and delegate to other\n// sharedInformers based on type\ntype GenericInformer interface {\n\tInformer() cache.SharedIndexInformer\n\tLister() cache.GenericLister\n}\n\ntype genericInformer struct {\n\tinformer cache.SharedIndexInformer\n\tresource schema.GroupResource\n}\n\n// Informer returns the SharedIndexInformer.\nfunc (f *genericInformer) Informer() cache.SharedIndexInformer {\n\treturn f.informer\n}\n\n// Lister returns the GenericLister.\nfunc (f *genericInformer) Lister() cache.GenericLister {\n\treturn cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)\n}\n\n// ForResource gives generic access to a shared informer of the matching type\n// TODO extend this to unknown resources with a client pool\nfunc (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {\n\tswitch resource {\n\t// Group=tensorchord.ai, Version=v2alpha1\n\tcase v2alpha1.SchemeGroupVersion.WithResource(\"inferences\"):\n\t\treturn &genericInformer{resource: resource.GroupResource(), informer: f.Tensorchord().V2alpha1().Inferences().Informer()}, nil\n\n\t}\n\n\treturn nil, fmt.Errorf(\"no informer found for %v\", resource)\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage internalinterfaces\n\nimport (\n\ttime \"time\"\n\n\tversioned \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\n// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.\ntype NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer\n\n// SharedInformerFactory a small interface to allow for adding an informer without an import cycle\ntype SharedInformerFactory interface {\n\tStart(stopCh <-chan struct{})\n\tInformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer\n}\n\n// TweakListOptionsFunc is a function that transforms a v1.ListOptions.\ntype TweakListOptionsFunc func(*v1.ListOptions)\n"
  },
  {
    "path": "modelzetes/pkg/client/informers/externalversions/modelzetes/interface.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage modelzetes\n\nimport (\n\tinternalinterfaces \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/informers/externalversions/internalinterfaces\"\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/informers/externalversions/modelzetes/v2alpha1\"\n)\n\n// Interface provides access to each of this group's versions.\ntype Interface interface {\n\t// V2alpha1 provides access to shared informers for resources in V2alpha1.\n\tV2alpha1() v2alpha1.Interface\n}\n\ntype group struct {\n\tfactory          internalinterfaces.SharedInformerFactory\n\tnamespace        string\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n}\n\n// New returns a new Interface.\nfunc New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {\n\treturn &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}\n}\n\n// V2alpha1 returns a new v2alpha1.Interface.\nfunc (g *group) V2alpha1() v2alpha1.Interface {\n\treturn v2alpha1.New(g.factory, g.namespace, g.tweakListOptions)\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/informers/externalversions/modelzetes/v2alpha1/inference.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage v2alpha1\n\nimport (\n\t\"context\"\n\ttime \"time\"\n\n\tmodelzetesv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tversioned \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned\"\n\tinternalinterfaces \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/informers/externalversions/internalinterfaces\"\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/listers/modelzetes/v2alpha1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\twatch \"k8s.io/apimachinery/pkg/watch\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\n// InferenceInformer provides access to a shared informer and lister for\n// Inferences.\ntype InferenceInformer interface {\n\tInformer() cache.SharedIndexInformer\n\tLister() v2alpha1.InferenceLister\n}\n\ntype inferenceInformer struct {\n\tfactory          internalinterfaces.SharedInformerFactory\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n\tnamespace        string\n}\n\n// NewInferenceInformer constructs a new informer for Inference type.\n// Always prefer using an informer factory to get a shared informer instead of getting an independent\n// one. This reduces memory footprint and number of connections to the server.\nfunc NewInferenceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {\n\treturn NewFilteredInferenceInformer(client, namespace, resyncPeriod, indexers, nil)\n}\n\n// NewFilteredInferenceInformer constructs a new informer for Inference type.\n// Always prefer using an informer factory to get a shared informer instead of getting an independent\n// one. This reduces memory footprint and number of connections to the server.\nfunc NewFilteredInferenceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {\n\treturn cache.NewSharedIndexInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: func(options v1.ListOptions) (runtime.Object, error) {\n\t\t\t\tif tweakListOptions != nil {\n\t\t\t\t\ttweakListOptions(&options)\n\t\t\t\t}\n\t\t\t\treturn client.TensorchordV2alpha1().Inferences(namespace).List(context.TODO(), options)\n\t\t\t},\n\t\t\tWatchFunc: func(options v1.ListOptions) (watch.Interface, error) {\n\t\t\t\tif tweakListOptions != nil {\n\t\t\t\t\ttweakListOptions(&options)\n\t\t\t\t}\n\t\t\t\treturn client.TensorchordV2alpha1().Inferences(namespace).Watch(context.TODO(), options)\n\t\t\t},\n\t\t},\n\t\t&modelzetesv2alpha1.Inference{},\n\t\tresyncPeriod,\n\t\tindexers,\n\t)\n}\n\nfunc (f *inferenceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\n\treturn NewFilteredInferenceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)\n}\n\nfunc (f *inferenceInformer) Informer() cache.SharedIndexInformer {\n\treturn f.factory.InformerFor(&modelzetesv2alpha1.Inference{}, f.defaultInformer)\n}\n\nfunc (f *inferenceInformer) Lister() v2alpha1.InferenceLister {\n\treturn v2alpha1.NewInferenceLister(f.Informer().GetIndexer())\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/informers/externalversions/modelzetes/v2alpha1/interface.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by informer-gen. DO NOT EDIT.\n\npackage v2alpha1\n\nimport (\n\tinternalinterfaces \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/informers/externalversions/internalinterfaces\"\n)\n\n// Interface provides access to all the informers in this group version.\ntype Interface interface {\n\t// Inferences returns a InferenceInformer.\n\tInferences() InferenceInformer\n}\n\ntype version struct {\n\tfactory          internalinterfaces.SharedInformerFactory\n\tnamespace        string\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n}\n\n// New returns a new Interface.\nfunc New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {\n\treturn &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}\n}\n\n// Inferences returns a InferenceInformer.\nfunc (v *version) Inferences() InferenceInformer {\n\treturn &inferenceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}\n"
  },
  {
    "path": "modelzetes/pkg/client/listers/modelzetes/v2alpha1/expansion_generated.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by lister-gen. DO NOT EDIT.\n\npackage v2alpha1\n\n// InferenceListerExpansion allows custom methods to be added to\n// InferenceLister.\ntype InferenceListerExpansion interface{}\n\n// InferenceNamespaceListerExpansion allows custom methods to be added to\n// InferenceNamespaceLister.\ntype InferenceNamespaceListerExpansion interface{}\n"
  },
  {
    "path": "modelzetes/pkg/client/listers/modelzetes/v2alpha1/inference.go",
    "content": "/*\nCopyright 2019-2023 TensorChord Inc.\n\nLicensed under the MIT license. See LICENSE file in the project root for full license information.\n*/\n\n// Code generated by lister-gen. DO NOT EDIT.\n\npackage v2alpha1\n\nimport (\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\t\"k8s.io/client-go/tools/cache\"\n)\n\n// InferenceLister helps list Inferences.\n// All objects returned here must be treated as read-only.\ntype InferenceLister interface {\n\t// List lists all Inferences in the indexer.\n\t// Objects returned here must be treated as read-only.\n\tList(selector labels.Selector) (ret []*v2alpha1.Inference, err error)\n\t// Inferences returns an object that can list and get Inferences.\n\tInferences(namespace string) InferenceNamespaceLister\n\tInferenceListerExpansion\n}\n\n// inferenceLister implements the InferenceLister interface.\ntype inferenceLister struct {\n\tindexer cache.Indexer\n}\n\n// NewInferenceLister returns a new InferenceLister.\nfunc NewInferenceLister(indexer cache.Indexer) InferenceLister {\n\treturn &inferenceLister{indexer: indexer}\n}\n\n// List lists all Inferences in the indexer.\nfunc (s *inferenceLister) List(selector labels.Selector) (ret []*v2alpha1.Inference, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v2alpha1.Inference))\n\t})\n\treturn ret, err\n}\n\n// Inferences returns an object that can list and get Inferences.\nfunc (s *inferenceLister) Inferences(namespace string) InferenceNamespaceLister {\n\treturn inferenceNamespaceLister{indexer: s.indexer, namespace: namespace}\n}\n\n// InferenceNamespaceLister helps list and get Inferences.\n// All objects returned here must be treated as read-only.\ntype InferenceNamespaceLister interface {\n\t// List lists all Inferences in the indexer for a given namespace.\n\t// Objects returned here must be treated as read-only.\n\tList(selector labels.Selector) (ret []*v2alpha1.Inference, err error)\n\t// Get retrieves the Inference from the indexer for a given namespace and name.\n\t// Objects returned here must be treated as read-only.\n\tGet(name string) (*v2alpha1.Inference, error)\n\tInferenceNamespaceListerExpansion\n}\n\n// inferenceNamespaceLister implements the InferenceNamespaceLister\n// interface.\ntype inferenceNamespaceLister struct {\n\tindexer   cache.Indexer\n\tnamespace string\n}\n\n// List lists all Inferences in the indexer for a given namespace.\nfunc (s inferenceNamespaceLister) List(selector labels.Selector) (ret []*v2alpha1.Inference, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v2alpha1.Inference))\n\t})\n\treturn ret, err\n}\n\n// Get retrieves the Inference from the indexer for a given namespace and name.\nfunc (s inferenceNamespaceLister) Get(name string) (*v2alpha1.Inference, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v2alpha1.Resource(\"inference\"), name)\n\t}\n\treturn obj.(*v2alpha1.Inference), nil\n}\n"
  },
  {
    "path": "modelzetes/pkg/config/config.go",
    "content": "package config\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"time\"\n)\n\ntype Config struct {\n\tMetrics          MetricsConfig          `json:\"metrics,omitempty\"`\n\tKubeConfig       KubeConfig             `json:\"kube_config,omitempty\"`\n\tController       ControllerConfig       `json:\"controller,omitempty\"`\n\tHuggingfaceProxy HuggingfaceProxyConfig `json:\"huggingface_proxy,omitempty\"`\n\tProbes           ProbesConfig           `json:\"probes,omitempty\"`\n\tInference        InferenceConfig        `json:\"inference,omitempty\"`\n}\n\ntype InferenceConfig struct {\n\tImagePullPolicy         string `json:\"image_pull_policy,omitempty\"`\n\tSetUpRuntimeClassNvidia bool   `json:\"set_up_runtime_class_nvidia,omitempty\"`\n}\n\ntype ProbesConfig struct {\n\tStartup   ProbeConfig `json:\"startup,omitempty\"`\n\tReadiness ProbeConfig `json:\"readiness,omitempty\"`\n\tLiveness  ProbeConfig `json:\"liveness,omitempty\"`\n}\n\ntype ProbeConfig struct {\n\tInitialDelaySeconds int `json:\"initial_delay_seconds,omitempty\"`\n\tPeriodSeconds       int `json:\"period_seconds,omitempty\"`\n\tTimeoutSeconds      int `json:\"timeout_seconds,omitempty\"`\n}\n\ntype HuggingfaceProxyConfig struct {\n\tEndpoint string `json:\"endpoint,omitempty\"`\n}\n\ntype ControllerConfig struct {\n\tThreadCount int `json:\"thread_count,omitempty\"`\n}\n\ntype MetricsConfig struct {\n\tServerPort int `json:\"server_port,omitempty\"`\n}\ntype KubeConfig struct {\n\tKubeconfig   string        `json:\"kubeconfig,omitempty\"`\n\tMasterURL    string        `json:\"master_url,omitempty\"`\n\tQPS          int           `json:\"qps,omitempty\"`\n\tBurst        int           `json:\"burst,omitempty\"`\n\tResyncPeriod time.Duration `json:\"resync_period,omitempty\"`\n}\n\nfunc New() Config {\n\treturn Config{}\n}\n\nfunc (c Config) GetString() (string, error) {\n\tbytes, err := json.Marshal(c)\n\treturn string(bytes), err\n}\n\nfunc (c Config) Validate() error {\n\tif c.KubeConfig.QPS == 0 ||\n\t\tc.KubeConfig.Burst == 0 ||\n\t\tc.KubeConfig.ResyncPeriod == 0 {\n\t\treturn errors.New(\"invalid kubeconfig\")\n\t}\n\n\tif c.Metrics.ServerPort <= 0 {\n\t\treturn errors.New(\"invalid metrics config\")\n\t}\n\n\tif c.Controller.ThreadCount == 0 {\n\t\treturn errors.New(\"invalid controller config\")\n\t}\n\n\tif c.Inference.ImagePullPolicy == \"\" {\n\t\treturn errors.New(\"invalid inference config\")\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "modelzetes/pkg/consts/consts.go",
    "content": "package consts\n\nconst (\n\tResourceNvidiaGPU = \"nvidia.com/gpu\"\n\n\tLabelInferenceName      = \"inference\"\n\tLabelInferenceNamespace = \"inference-namespace\"\n\tLabelBuildName          = \"ai.tensorchord.build\"\n\tLabelName               = \"ai.tensorchord.name\"\n\tLabelNamespace          = \"modelz.tensorchord.ai/namespace\"\n\tLabelServerResource     = \"ai.tensorchord.server-resource\"\n\n\tAnnotationBuilding        = \"ai.tensorchord.building\"\n\tAnnotationDockerImage     = \"ai.tensorchord.docker.image\"\n\tAnnotationControlPlaneKey = \"ai.tensorchord.control-plane\"\n\n\tModelzAnnotationValue = \"modelz\"\n\n\tTolerationGPU              = \"ai.tensorchord.gpu\"\n\tTolerationNvidiaGPUPresent = \"nvidia.com/gpu\"\n\n\t//OrchestrationIdentifier identifier string for provider orchestration\n\tOrchestrationIdentifier = \"kubernetes\"\n\t//ProviderName name of the provider\n\tProviderName = \"modelzetes\"\n\n\tDefaultServicePrefix = \"mdz-\"\n\n\tDefaultHTTPProbePath = \"/\"\n\n\t// MaxReplicas is the maximum number of replicas that can be set for a inference.\n\tMaxReplicas = 5\n)\n"
  },
  {
    "path": "modelzetes/pkg/controller/annotations_test.go",
    "content": "package controller\n\nimport (\n\t\"testing\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n)\n\nfunc Test_makeAnnotations_NoKeys(t *testing.T) {\n\tannotationVal := `{\"name\":\"\",\"image\":\"\"}`\n\n\tspec := v2alpha1.Inference{\n\t\tSpec: v2alpha1.InferenceSpec{},\n\t}\n\n\tannotations := makeAnnotations(&spec)\n\n\tif _, ok := annotations[\"prometheus.io.scrape\"]; !ok {\n\t\tt.Errorf(\"wanted annotation \" + \"prometheus.io.scrape\" + \" to be added\")\n\t\tt.Fail()\n\t}\n\tif val, _ := annotations[\"prometheus.io.scrape\"]; val != \"false\" {\n\t\tt.Errorf(\"wanted annotation \" + \"prometheus.io.scrape\" + ` to equal \"false\"`)\n\t\tt.Fail()\n\t}\n\n\tif _, ok := annotations[annotationInferenceSpec]; !ok {\n\t\tt.Errorf(\"wanted annotation \" + annotationInferenceSpec)\n\t\tt.Fail()\n\t}\n\n\tif val, _ := annotations[annotationInferenceSpec]; val != annotationVal {\n\t\tt.Errorf(\"Annotation \" + annotationInferenceSpec + \"\\nwant: '\" + annotationVal + \"'\\ngot: '\" + val + \"'\")\n\t\tt.Fail()\n\t}\n}\n\nfunc Test_makeAnnotations_WithKeyAndValue(t *testing.T) {\n\tannotationVal := `{\"name\":\"\",\"image\":\"\",\"annotations\":{\"key\":\"value\",\"key2\":\"value2\"}}`\n\n\tspec := v2alpha1.Inference{\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"key\":  \"value\",\n\t\t\t\t\"key2\": \"value2\",\n\t\t\t},\n\t\t},\n\t}\n\n\tannotations := makeAnnotations(&spec)\n\n\tif _, ok := annotations[\"prometheus.io.scrape\"]; !ok {\n\t\tt.Errorf(\"wanted annotation \" + \"prometheus.io.scrape\" + \" to be added\")\n\t\tt.Fail()\n\t}\n\tif val := annotations[\"prometheus.io.scrape\"]; val != \"false\" {\n\t\tt.Errorf(\"wanted annotation \" + \"prometheus.io.scrape\" + ` to equal \"false\"`)\n\t\tt.Fail()\n\t}\n\n\tif _, ok := annotations[annotationInferenceSpec]; !ok {\n\t\tt.Errorf(\"wanted annotation \" + annotationInferenceSpec)\n\t\tt.Fail()\n\t}\n\n\tif val := annotations[annotationInferenceSpec]; val != annotationVal {\n\t\tt.Errorf(\"Annotation \" + annotationInferenceSpec + \"\\nwant: '\" + annotationVal + \"'\\ngot: '\" + val + \"'\")\n\t\tt.Fail()\n\t}\n}\n\nfunc Test_makeAnnotationsDoesNotModifyOriginalSpec(t *testing.T) {\n\tspecAnnotations := map[string]string{\n\t\t\"test.foo\": \"bar\",\n\t}\n\tfunction := &v2alpha1.Inference{\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:        \"testfunc\",\n\t\t\tAnnotations: specAnnotations,\n\t\t},\n\t}\n\n\texpectedAnnotations := map[string]string{\n\t\t\"prometheus.io.scrape\":  \"false\",\n\t\t\"test.foo\":              \"bar\",\n\t\tannotationInferenceSpec: `{\"name\":\"testfunc\",\"image\":\"\",\"annotations\":{\"test.foo\":\"bar\"}}`,\n\t}\n\n\tmakeAnnotations(function)\n\tannotations := makeAnnotations(function)\n\n\tif len(specAnnotations) != 1 {\n\t\tt.Errorf(\"length of original spec annotations has changed, expected 1, got %d\", len(specAnnotations))\n\t}\n\n\tif specAnnotations[\"test.foo\"] != \"bar\" {\n\t\tt.Errorf(\"original spec annotation has changed\")\n\t}\n\n\tfor name, expectedValue := range expectedAnnotations {\n\t\tactualValue := annotations[name]\n\t\tif actualValue != expectedValue {\n\t\t\tt.Fatalf(\"incorrect annotation for '%s': \\nwant %q,\\ngot %q\", name, expectedValue, actualValue)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/controller.go",
    "content": "package controller\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/util/runtime\"\n\t\"k8s.io/apimachinery/pkg/util/wait\"\n\tkubeinformers \"k8s.io/client-go/informers\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/scheme\"\n\ttypedcorev1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n\tappslisters \"k8s.io/client-go/listers/apps/v1\"\n\t\"k8s.io/client-go/tools/cache\"\n\t\"k8s.io/client-go/tools/record\"\n\t\"k8s.io/client-go/util/workqueue\"\n\tglog \"k8s.io/klog\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tclientset \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned\"\n\tfaasscheme \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned/scheme\"\n\tinformers \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/informers/externalversions\"\n\tlisters \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/listers/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n)\n\nconst (\n\tcontrollerAgentName = \"modelz-operator\"\n\tfunctionPort        = 8080\n\t// SuccessSynced is used as part of the Event 'reason' when a Function is synced\n\tSuccessSynced = \"Synced\"\n\t// ErrResourceExists is used as part of the Event 'reason' when a Function fails\n\t// to sync due to a Deployment of the same name already existing.\n\tErrResourceExists = \"ErrResourceExists\"\n\n\t// MessageResourceExists is the message used for Events when a resource\n\t// fails to sync due to a Deployment already existing\n\tMessageResourceExists = \"Resource %q already exists and is not managed by OpenFaaS\"\n\t// MessageResourceSynced is the message used for an Event fired when a Function\n\t// is synced successfully\n\tMessageResourceSynced = \"Function synced successfully\"\n)\n\n// Controller is the controller implementation for Function resources\ntype Controller struct {\n\tBaseDomain string\n\t// kubeclientset is a standard kubernetes clientset\n\tkubeclientset kubernetes.Interface\n\t// faasclientset is a clientset for our own API group\n\tfaasclientset clientset.Interface\n\n\tdeploymentsLister appslisters.DeploymentLister\n\tdeploymentsSynced cache.InformerSynced\n\tinferenceLister   listers.InferenceLister\n\tinferencesSynced  cache.InformerSynced\n\n\t// workqueue is a rate limited work queue. This is used to queue work to be\n\t// processed instead of performing it as soon as a change happens. This\n\t// means we can ensure we only process a fixed amount of resources at a\n\t// time, and makes it easy to ensure we are never processing the same item\n\t// simultaneously in two different workers.\n\tworkqueue workqueue.RateLimitingInterface\n\t// recorder is an event recorder for recording Event resources to the\n\t// Kubernetes API.\n\trecorder record.EventRecorder\n\n\t// OpenFaaS function factory\n\tfactory FunctionFactory\n}\n\n// NewController returns a new OpenFaaS controller\nfunc NewController(\n\tkubeclientset kubernetes.Interface,\n\tinferenceclientset clientset.Interface,\n\tkubeInformerFactory kubeinformers.SharedInformerFactory,\n\tinferenceInformerFactory informers.SharedInformerFactory,\n\tfactory FunctionFactory) *Controller {\n\n\t// obtain references to shared index informers for the Deployment and Function types\n\tdeploymentInformer := kubeInformerFactory.Apps().V1().Deployments()\n\tinferenceInformer := inferenceInformerFactory.Tensorchord().V2alpha1().Inferences()\n\n\t// Create event broadcaster\n\t// Add o6s types to the default Kubernetes Scheme so Events can be\n\t// logged for faas-controller types.\n\tfaasscheme.AddToScheme(scheme.Scheme)\n\tglog.V(4).Info(\"Creating event broadcaster\")\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(glog.V(4).Infof)\n\teventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events(\"\")})\n\trecorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})\n\n\tcontroller := &Controller{\n\t\tkubeclientset:     kubeclientset,\n\t\tfaasclientset:     inferenceclientset,\n\t\tdeploymentsLister: deploymentInformer.Lister(),\n\t\tdeploymentsSynced: deploymentInformer.Informer().HasSynced,\n\t\tinferenceLister:   inferenceInformer.Lister(),\n\t\tinferencesSynced:  inferenceInformer.Informer().HasSynced,\n\t\tworkqueue:         workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"Functions\"),\n\t\trecorder:          recorder,\n\t\tfactory:           factory,\n\t}\n\n\tglog.Info(\"Setting up event handlers\")\n\n\t//  Add Function (OpenFaaS CRD-entry) Informer\n\t//\n\t// Set up an event handler for when Function resources change\n\tinferenceInformer.Informer().\n\t\tAddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: controller.enqueueFunction,\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tcontroller.enqueueFunction(new)\n\t\t\t},\n\t\t})\n\n\t// Set up an event handler for when functions related resources like pods, deployments, replica sets\n\t// can't be materialized. This logs abnormal events like ImagePullBackOff, back-off restarting failed container,\n\t// failed to start container, oci runtime errors, etc\n\t// Enable this with -v=3\n\tkubeInformerFactory.Core().V1().Events().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tkey, err := cache.MetaNamespaceKeyFunc(obj)\n\t\t\tif err == nil {\n\t\t\t\tevent := obj.(*corev1.Event)\n\t\t\t\tsince := time.Since(event.LastTimestamp.Time)\n\t\t\t\t// log abnormal events occurred in the last minute\n\t\t\t\tif since.Seconds() < 61 && strings.Contains(event.Type, \"Warning\") {\n\t\t\t\t\tglog.V(3).Infof(\"Abnormal event detected on %s %s: %s\", event.LastTimestamp, key, event.Message)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t})\n\n\treturn controller\n}\n\n// Run will set up the event handlers for types we are interested in, as well\n// as syncing informer caches and starting workers. It will block until stopCh\n// is closed, at which point it will shutdown the workqueue and wait for\n// workers to finish processing their current work items.\nfunc (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {\n\tdefer runtime.HandleCrash()\n\tdefer c.workqueue.ShutDown()\n\n\t// Start the informer factories to begin populating the informer caches\n\t// Wait for the caches to be synced before starting workers\n\tglog.Info(\"Waiting for informer caches to sync\")\n\tif ok := cache.WaitForCacheSync(stopCh,\n\t\tc.deploymentsSynced, c.inferencesSynced); !ok {\n\t\treturn fmt.Errorf(\"failed to wait for caches to sync\")\n\t}\n\n\tglog.Info(\"Starting workers\")\n\t// Launch two workers to process Function resources\n\tfor i := 0; i < threadiness; i++ {\n\t\tgo wait.Until(c.runWorker, time.Second, stopCh)\n\t}\n\n\tglog.Info(\"Started workers\")\n\t<-stopCh\n\tglog.Info(\"Shutting down workers\")\n\n\treturn nil\n}\n\n// runWorker is a long-running function that will continually call the\n// processNextWorkItem function in order to read and process a message on the workqueue.\nfunc (c *Controller) runWorker() {\n\tfor c.processNextWorkItem() {\n\t}\n}\n\n// processNextWorkItem will read a single work item off the workqueue and\n// attempt to process it, by calling the syncHandler.\nfunc (c *Controller) processNextWorkItem() bool {\n\tobj, shutdown := c.workqueue.Get()\n\n\tif shutdown {\n\t\treturn false\n\t}\n\n\terr := func(obj interface{}) error {\n\t\tdefer c.workqueue.Done(obj)\n\t\tvar key string\n\t\tvar ok bool\n\t\tif key, ok = obj.(string); !ok {\n\t\t\tc.workqueue.Forget(obj)\n\t\t\truntime.HandleError(fmt.Errorf(\"expected string in workqueue but got %#v\", obj))\n\t\t\treturn nil\n\t\t}\n\t\tif err := c.syncHandler(key); err != nil {\n\t\t\treturn fmt.Errorf(\"error syncing '%s': %s\", key, err.Error())\n\t\t}\n\t\tc.workqueue.Forget(obj)\n\t\treturn nil\n\t}(obj)\n\n\tif err != nil {\n\t\truntime.HandleError(err)\n\t\treturn true\n\t}\n\n\treturn true\n}\n\n// syncHandler compares the actual state with the desired, and attempts to\n// converge the two.\nfunc (c *Controller) syncHandler(key string) error {\n\t// Convert the namespace/name string into a distinct namespace and name\n\tnamespace, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\truntime.HandleError(fmt.Errorf(\"invalid resource key: %s\", key))\n\t\treturn nil\n\t}\n\n\t// Get the Function resource with this namespace/name\n\tfunction, err := c.inferenceLister.Inferences(namespace).Get(name)\n\tif err != nil {\n\t\t// The Function resource may no longer exist, in which case we stop processing.\n\t\tif errors.IsNotFound(err) {\n\t\t\truntime.HandleError(fmt.Errorf(\"function '%s' in work queue no longer exists\", key))\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\n\tdeploymentName := function.Spec.Name\n\tif deploymentName == \"\" {\n\t\t// We choose to absorb the error here as the worker would requeue the\n\t\t// resource otherwise. Instead, the next time the resource is updated\n\t\t// the resource will be queued again.\n\t\truntime.HandleError(fmt.Errorf(\"%s: deployment name must be specified\", key))\n\t\treturn nil\n\t}\n\n\tif function.Spec.Annotations != nil {\n\t\tif _, ok := function.Spec.Annotations[consts.AnnotationBuilding]; ok {\n\t\t\tglog.Infof(\"Function '%s' is still building\", function.Spec.Name)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Get the deployment with the name specified in Function.spec\n\tdeployment, err := c.deploymentsLister.\n\t\tDeployments(function.Namespace).Get(deploymentName)\n\t// If the resource doesn't exist, we'll create it\n\tif errors.IsNotFound(err) {\n\t\terr = nil\n\t\texistingSecrets, err := c.getSecrets(function.Namespace, function.Spec.Secrets)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tglog.Infof(\"Creating deployment for '%s'\", function.Spec.Name)\n\t\tdeployment, err = c.kubeclientset.AppsV1().Deployments(function.Namespace).Create(\n\t\t\tcontext.TODO(),\n\t\t\tnewDeployment(function, deployment, existingSecrets, c.factory),\n\t\t\tmetav1.CreateOptions{},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsvcGetOptions := metav1.GetOptions{}\n\tsvcName := consts.DefaultServicePrefix + deploymentName\n\t_, getSvcErr := c.kubeclientset.CoreV1().Services(function.Namespace).Get(context.TODO(), deploymentName, svcGetOptions)\n\tif errors.IsNotFound(getSvcErr) {\n\t\tglog.Infof(\"Creating ClusterIP service for '%s'\", function.Spec.Name)\n\t\tif _, err := c.kubeclientset.CoreV1().Services(function.Namespace).Create(context.TODO(), newService(function), metav1.CreateOptions{}); err != nil {\n\t\t\t// If an error occurs during Service Create, we'll requeue the item\n\t\t\tif errors.IsAlreadyExists(err) {\n\t\t\t\terr = nil\n\t\t\t\tglog.V(2).Infof(\"ClusterIP service '%s' already exists. Skipping creation.\", function.Spec.Name)\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// If an error occurs during Get/Create, we'll requeue the item so we can\n\t// attempt processing again later. This could have been caused by a\n\t// temporary network failure, or any other transient reason.\n\tif err != nil {\n\t\treturn fmt.Errorf(\"transient error: %v\", err)\n\t}\n\n\t// If the Deployment is not controlled by this Function resource, we should log\n\t// a warning to the event recorder and ret\n\tif !metav1.IsControlledBy(deployment, function) {\n\t\tmsg := fmt.Sprintf(MessageResourceExists, deployment.Name)\n\t\tc.recorder.Event(function, corev1.EventTypeWarning, ErrResourceExists, msg)\n\t\treturn fmt.Errorf(msg)\n\t}\n\n\t// Update the Deployment resource if the Function definition differs\n\tif deploymentNeedsUpdate(function, deployment) {\n\t\tglog.Infof(\"Updating deployment for '%s'\", function.Spec.Name)\n\n\t\texistingSecrets, err := c.getSecrets(function.Namespace, function.Spec.Secrets)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdeployment, err = c.kubeclientset.AppsV1().Deployments(function.Namespace).Update(\n\t\t\tcontext.TODO(),\n\t\t\tnewDeployment(function, deployment, existingSecrets, c.factory),\n\t\t\tmetav1.UpdateOptions{},\n\t\t)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Updating deployment for '%s' failed: %v\", function.Spec.Name, err)\n\t\t}\n\n\t\texistingService, err := c.kubeclientset.CoreV1().Services(function.Namespace).Get(context.TODO(), svcName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\texistingService.Annotations = makeAnnotations(function)\n\t\t_, err = c.kubeclientset.CoreV1().Services(function.Namespace).Update(context.TODO(), existingService, metav1.UpdateOptions{})\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Updating service for '%s' failed: %v\", function.Spec.Name, err)\n\t\t}\n\t}\n\n\t// If an error occurs during Update, we'll requeue the item so we can\n\t// attempt processing again later. THis could have been caused by a\n\t// temporary network failure, or any other transient reason.\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.recorder.Event(function, corev1.EventTypeNormal, SuccessSynced, MessageResourceSynced)\n\treturn nil\n}\n\n// enqueueFunction takes a Function resource and converts it into a namespace/name\n// string which is then put onto the work queue. This method should *not* be\n// passed resources of any type other than Function.\nfunc (c *Controller) enqueueFunction(obj interface{}) {\n\tvar key string\n\tvar err error\n\tif key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {\n\t\truntime.HandleError(err)\n\t\treturn\n\t}\n\tc.workqueue.AddRateLimited(key)\n}\n\n// handleObject will take any resource implementing metav1.Object and attempt\n// to find the Function resource that 'owns' it. It does this by looking at the\n// objects metadata.ownerReferences field for an appropriate OwnerReference.\n// It then enqueues that Function resource to be processed. If the object does not\n// have an appropriate OwnerReference, it will simply be skipped.\nfunc (c *Controller) handleObject(obj interface{}) {\n\tvar object metav1.Object\n\tvar ok bool\n\tif object, ok = obj.(metav1.Object); !ok {\n\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\tif !ok {\n\t\t\truntime.HandleError(fmt.Errorf(\"error decoding object, invalid type\"))\n\t\t\treturn\n\t\t}\n\t\tobject, ok = tombstone.Obj.(metav1.Object)\n\t\tif !ok {\n\t\t\truntime.HandleError(fmt.Errorf(\"error decoding object tombstone, invalid type\"))\n\t\t\treturn\n\t\t}\n\t\tglog.V(4).Infof(\"Recovered deleted object '%s' from tombstone\", object.GetName())\n\t}\n\tglog.V(4).Infof(\"Processing object: %s\", object.GetName())\n\tif ownerRef := metav1.GetControllerOf(object); ownerRef != nil {\n\t\t// If this object is not owned by a function, we should not do anything more\n\t\t// with it.\n\t\tif ownerRef.Kind != v2alpha1.Kind {\n\t\t\treturn\n\t\t}\n\n\t\tfunction, err := c.inferenceLister.Inferences(\n\t\t\tobject.GetNamespace()).Get(ownerRef.Name)\n\t\tif err != nil {\n\t\t\tglog.Infof(\"Function '%s' deleted. Ignoring orphaned object '%s'\", ownerRef.Name, object.GetSelfLink())\n\t\t\treturn\n\t\t}\n\n\t\tc.enqueueFunction(function)\n\t\treturn\n\t}\n}\n\n// getSecrets queries Kubernetes for a list of secrets by name in the given k8s namespace.\nfunc (c *Controller) getSecrets(namespace string,\n\tsecretNames []string) (map[string]*corev1.Secret, error) {\n\tsecrets := map[string]*corev1.Secret{}\n\n\tfor _, secretName := range secretNames {\n\t\tsecret, err := c.kubeclientset.CoreV1().\n\t\t\tSecrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn secrets, err\n\t\t}\n\t\tsecrets[secretName] = secret\n\t}\n\n\treturn secrets, nil\n}\n\n// getReplicas returns the desired number of replicas for a function taking into account\n// the min replicas label, HPA, the autoscaler and scaled to zero deployments\nfunc getReplicas(inference *v2alpha1.Inference, deployment *appsv1.Deployment) *int32 {\n\tvar minReplicas, maxReplicas *int32\n\tif inference.Spec.Scaling != nil {\n\t\tminReplicas = inference.Spec.Scaling.MinReplicas\n\t\tmaxReplicas = inference.Spec.Scaling.MaxReplicas\n\t}\n\n\t// extract current deployment replicas if specified\n\tvar deploymentReplicas *int32\n\tif deployment != nil {\n\t\tdeploymentReplicas = deployment.Spec.Replicas\n\t}\n\n\t// do not set replicas if min replicas is not set\n\t// and current deployment has no replicas count\n\tif minReplicas == nil && deploymentReplicas == nil {\n\t\treturn nil\n\t}\n\n\t// set replicas to min if deployment has no replicas and min replicas exists\n\tif minReplicas != nil && deploymentReplicas == nil {\n\t\treturn minReplicas\n\t}\n\n\t// do not override replicas when min is not specified\n\tif minReplicas == nil && deploymentReplicas != nil {\n\t\treturn deploymentReplicas\n\t}\n\n\tif minReplicas != nil && deploymentReplicas != nil {\n\t\tif maxReplicas == nil {\n\t\t\t// do not override HPA or OF autoscaler replicas if the value is greater than min\n\t\t\tif *deploymentReplicas >= *minReplicas {\n\t\t\t\treturn deploymentReplicas\n\t\t\t}\n\t\t} else {\n\t\t\t// do not override HPA or OF autoscaler replicas if the value is between min and max\n\t\t\tif *deploymentReplicas >= *minReplicas &&\n\t\t\t\t*deploymentReplicas <= *maxReplicas {\n\t\t\t\treturn deploymentReplicas\n\t\t\t} else if *deploymentReplicas > *maxReplicas {\n\t\t\t\treturn maxReplicas\n\t\t\t}\n\t\t}\n\t}\n\n\treturn minReplicas\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/deployment.go",
    "content": "package controller\n\nimport (\n\t\"encoding/json\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n\tglog \"k8s.io/klog\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/k8s\"\n\t. \"github.com/tensorchord/openmodelz/modelzetes/pkg/pointer\"\n)\n\nconst (\n\tannotationInferenceSpec = \"ai.tensorchord.inference.spec\"\n\tdefaultPort             = 8080\n)\n\nvar runtimeClassNvidia = \"nvidia\"\n\n// newDeployment creates a new Deployment for a Function resource. It also sets\n// the appropriate OwnerReferences on the resource so handleObject can discover\n// the Function resource that 'owns' it.\nfunc newDeployment(\n\tinference *v2alpha1.Inference,\n\texistingDeployment *appsv1.Deployment,\n\texistingSecrets map[string]*corev1.Secret,\n\tfactory FunctionFactory) *appsv1.Deployment {\n\n\t// Set replicas to 0 if the expected number of replicas is 0\n\treplicas := getReplicas(inference, existingDeployment)\n\n\tenvVars := makeEnvVars(inference)\n\tlabels := makeLabels(inference)\n\tnodeSelector := makeNodeSelector(inference.Spec.Constraints)\n\n\tport := makePort(inference)\n\tprobes, err := factory.MakeProbes(inference, port)\n\tif err != nil {\n\t\tglog.Warningf(\"Function %s probes parsing failed: %v\",\n\t\t\tinference.Spec.Name, err)\n\t}\n\tlabelMap := k8s.MakeLabelSelector(inference.Spec.Name)\n\t// Add a new env var HF_ENDPOINT if enabled.\n\thfEnvs := factory.MakeHuggingfacePullThroughCacheEnvVar()\n\tif hfEnvs != nil {\n\t\tenvVars = addEnvVarIfNotExists(envVars, hfEnvs.Name, hfEnvs.Value)\n\t}\n\n\tannotations := makeAnnotations(inference)\n\n\tcommand := makeCommand(inference)\n\n\tallowPrivilegeEscalation := false\n\n\tdeploymentSpec := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:        inference.Spec.Name,\n\t\t\tAnnotations: annotations,\n\t\t\tNamespace:   inference.Namespace,\n\t\t\tLabels:      labels,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(inference, schema.GroupVersionKind{\n\t\t\t\t\tGroup:   v2alpha1.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: v2alpha1.SchemeGroupVersion.Version,\n\t\t\t\t\tKind:    v2alpha1.Kind,\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: replicas,\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RollingUpdateDeploymentStrategyType,\n\t\t\t\tRollingUpdate: &appsv1.RollingUpdateDeployment{\n\t\t\t\t\tMaxUnavailable: &intstr.IntOrString{\n\t\t\t\t\t\tType:   intstr.String,\n\t\t\t\t\t\tStrVal: \"10%\",\n\t\t\t\t\t},\n\t\t\t\t\tMaxSurge: &intstr.IntOrString{\n\t\t\t\t\t\tType:   intstr.String,\n\t\t\t\t\t\tStrVal: \"10%\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labelMap,\n\t\t\t},\n\t\t\tRevisionHistoryLimit: Ptr(int32(5)),\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels:      labels,\n\t\t\t\t\tAnnotations: annotations,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tNodeSelector: nodeSelector,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  inference.Spec.Name,\n\t\t\t\t\t\t\tImage: inference.Spec.Image,\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{ContainerPort: int32(port), Protocol: corev1.ProtocolTCP},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCommand:         command,\n\t\t\t\t\t\t\tImagePullPolicy: corev1.PullPolicy(factory.Factory.Config.ImagePullPolicy),\n\t\t\t\t\t\t\tEnv:             envVars,\n\t\t\t\t\t\t\tSecurityContext: &corev1.SecurityContext{\n\t\t\t\t\t\t\t\tAllowPrivilegeEscalation: &allowPrivilegeEscalation,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t// TODO(xieydd): Add a function to set shm size\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"dshm\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/dev/shm\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"dshm\",\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{\n\t\t\t\t\t\t\t\t\tMedium: corev1.StorageMediumMemory,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif probes != nil {\n\t\tif probes.Liveness != nil {\n\t\t\tdeploymentSpec.Spec.Template.Spec.Containers[0].LivenessProbe = probes.Liveness\n\t\t}\n\t\tif probes.Readiness != nil {\n\t\t\tdeploymentSpec.Spec.Template.Spec.Containers[0].ReadinessProbe = probes.Readiness\n\t\t}\n\t\tif probes.Startup != nil {\n\t\t\tdeploymentSpec.Spec.Template.Spec.Containers[0].StartupProbe = probes.Startup\n\t\t\tif inference.Spec.Scaling != nil &&\n\t\t\t\tinference.Spec.Scaling.StartupDuration != nil {\n\t\t\t\t// Set the failure threshold to the number of seconds in the duration.\n\t\t\t\tdeploymentSpec.Spec.Template.Spec.Containers[0].\n\t\t\t\t\tStartupProbe.FailureThreshold = int32(\n\t\t\t\t\t*inference.Spec.Scaling.StartupDuration / probes.Startup.PeriodSeconds)\n\t\t\t}\n\t\t}\n\t}\n\n\tif inference.Spec.Resources != nil {\n\t\tdeploymentSpec.Spec.Template.Spec.Containers[0].Resources = *inference.Spec.Resources\n\t\tif q, ok := inference.Spec.Resources.Limits[consts.ResourceNvidiaGPU]; ok {\n\t\t\tif q.Value() > 0 {\n\t\t\t\t// If GPU is requested, add the GPU toleration.\n\t\t\t\tdeploymentSpec.Spec.Template.Spec.Tolerations = makeTolerationGPU()\n\t\t\t\tif factory.Factory.Config.RuntimeClassNvidia {\n\t\t\t\t\tdeploymentSpec.Spec.Template.Spec.RuntimeClassName = &runtimeClassNvidia\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// If GPU is not requested, set CUDA_VISIBLE_DEVICES to empty string.\n\t\t\t\tdeploymentSpec.Spec.Template.Spec.Containers[0].Env = append(\n\t\t\t\t\tdeploymentSpec.Spec.Template.Spec.Containers[0].Env,\n\t\t\t\t\tcorev1.EnvVar{\n\t\t\t\t\t\tName:  \"CUDA_VISIBLE_DEVICES\",\n\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n\n\tfactory.ConfigureReadOnlyRootFilesystem(inference, deploymentSpec)\n\tfactory.ConfigureContainerUserID(deploymentSpec)\n\n\treturn deploymentSpec\n}\n\nfunc makeTolerationGPU() []corev1.Toleration {\n\tres := []corev1.Toleration{\n\t\t{\n\t\t\tKey:      consts.TolerationGPU,\n\t\t\tOperator: corev1.TolerationOpEqual,\n\t\t\tValue:    \"true\",\n\t\t},\n\t\t{\n\t\t\tKey:      consts.TolerationNvidiaGPUPresent,\n\t\t\tOperator: corev1.TolerationOpEqual,\n\t\t\tValue:    \"present\",\n\t\t},\n\t}\n\treturn res\n}\n\nfunc makeCommand(inference *v2alpha1.Inference) []string {\n\tif inference.Spec.Command != nil {\n\t\tres := strings.Split(*inference.Spec.Command, \" \")\n\t\treturn res\n\t}\n\treturn nil\n}\n\nfunc makeEnvVars(inference *v2alpha1.Inference) []corev1.EnvVar {\n\tenvVars := []corev1.EnvVar{}\n\n\tif inference.Spec.EnvVars != nil {\n\t\tfor k, v := range inference.Spec.EnvVars {\n\t\t\tenvVars = append(envVars, corev1.EnvVar{\n\t\t\t\tName:  k,\n\t\t\t\tValue: v,\n\t\t\t})\n\t\t}\n\t}\n\n\t// Set environment variables for different frameworks.\n\tswitch inference.Spec.Framework {\n\tcase v2alpha1.FrameworkGradio:\n\t\tenvVars = addEnvVarIfNotExists(envVars,\n\t\t\t\"GRADIO_SERVER_NAME\", \"0.0.0.0\")\n\t\tenvVars = addEnvVarIfNotExists(envVars,\n\t\t\t\"GRADIO_SERVER_PORT\", \"7860\")\n\tcase v2alpha1.FrameworkMosec:\n\t\tenvVars = addEnvVarIfNotExists(envVars,\n\t\t\t\"MOSEC_PORT\", strconv.Itoa(defaultPort))\n\tcase v2alpha1.FrameworkStreamlit:\n\t\tenvVars = addEnvVarIfNotExists(envVars, \"STREAMLIT_SERVER_ENABLE_CORS\", \"false\")\n\t\tenvVars = addEnvVarIfNotExists(envVars, \"STREAMLIT_SERVER_ADDRESS\", \"0.0.0.0\")\n\t\tenvVars = addEnvVarIfNotExists(envVars, \"STREAMLIT_SERVER_ENABLE_XSRF_PROTECTION\", \"false\")\n\t}\n\n\treturn envVars\n}\n\nfunc addEnvVarIfNotExists(envVars []corev1.EnvVar, name, value string) []corev1.EnvVar {\n\tfor _, envVar := range envVars {\n\t\tif envVar.Name == name {\n\t\t\treturn envVars\n\t\t}\n\t}\n\n\treturn append(envVars, corev1.EnvVar{\n\t\tName:  name,\n\t\tValue: value,\n\t})\n}\n\nfunc makeLabels(inference *v2alpha1.Inference) map[string]string {\n\tlabels := map[string]string{\n\t\tconsts.LabelInferenceName: inference.Spec.Name,\n\t\t\"app\":                     inference.Spec.Name,\n\t\t\"controller\":              inference.Name,\n\t}\n\tif inference.Spec.Labels != nil {\n\t\tfor k, v := range inference.Spec.Labels {\n\t\t\tlabels[k] = v\n\t\t}\n\t}\n\n\treturn labels\n}\n\nfunc makePort(inference *v2alpha1.Inference) int {\n\tif inference.Spec.Port != nil {\n\t\treturn int(*inference.Spec.Port)\n\t}\n\n\treturn defaultPort\n}\n\nfunc makeAnnotations(inference *v2alpha1.Inference) map[string]string {\n\tannotations := make(map[string]string)\n\n\t// disable scraping since the watchdog doesn't expose a metrics endpoint\n\tannotations[\"prometheus.io.scrape\"] = \"false\"\n\n\t// copy inference annotations\n\tif inference.Spec.Annotations != nil {\n\t\tfor k, v := range inference.Spec.Annotations {\n\t\t\tannotations[k] = v\n\t\t}\n\t}\n\n\t// save inference spec in deployment annotations\n\t// used to detect changes in inference spec\n\tspecJSON, err := json.Marshal(inference.Spec)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to marshal inference spec: %s\", err.Error())\n\t\treturn annotations\n\t}\n\n\tannotations[annotationInferenceSpec] = string(specJSON)\n\treturn annotations\n}\n\nfunc makeNodeSelector(constraints []string) map[string]string {\n\tselector := make(map[string]string)\n\n\tif len(constraints) > 0 {\n\t\tfor _, constraint := range constraints {\n\t\t\tparts := strings.Split(constraint, \"=\")\n\n\t\t\tif len(parts) == 2 {\n\t\t\t\tselector[parts[0]] = parts[1]\n\t\t\t}\n\t\t}\n\t}\n\n\treturn selector\n}\n\n// deploymentNeedsUpdate determines if the inference spec is different from the deployment spec\nfunc deploymentNeedsUpdate(\n\tinference *v2alpha1.Inference, deployment *appsv1.Deployment) bool {\n\tprevFnSpecJson := deployment.ObjectMeta.Annotations[annotationInferenceSpec]\n\tif prevFnSpecJson == \"\" {\n\t\t// is a new deployment or is an old deployment that is missing the annotation\n\t\treturn true\n\t}\n\n\tprevFnSpec := &v2alpha1.InferenceSpec{}\n\terr := json.Unmarshal([]byte(prevFnSpecJson), prevFnSpec)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to parse previous inference spec: %s\", err.Error())\n\t\treturn true\n\t}\n\tprevFn := v2alpha1.Inference{\n\t\tSpec: *prevFnSpec,\n\t}\n\n\tif diff := cmp.Diff(prevFn.Spec, inference.Spec); diff != \"\" {\n\t\tglog.V(2).Infof(\"Change detected for %s diff\\n%s\", inference.Name, diff)\n\t\treturn true\n\t} else {\n\t\tglog.V(3).Infof(\"No changes detected for %s\", inference.Name)\n\t}\n\n\treturn false\n}\n\nfunc int32p(i int32) *int32 {\n\treturn &i\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/deployment_test.go",
    "content": "package controller\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\tcorev1 \"k8s.io/api/core/v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/k8s\"\n\t. \"github.com/tensorchord/openmodelz/modelzetes/pkg/pointer\"\n)\n\nvar defaultK8sConfig = k8s.DeploymentConfig{\n\tHTTPProbe:      true,\n\tSetNonRootUser: true,\n\tLivenessProbe: &k8s.ProbeConfig{\n\t\tPeriodSeconds:       1,\n\t\tTimeoutSeconds:      3,\n\t\tInitialDelaySeconds: 0,\n\t},\n\tReadinessProbe: &k8s.ProbeConfig{\n\t\tPeriodSeconds:       1,\n\t\tTimeoutSeconds:      3,\n\t\tInitialDelaySeconds: 0,\n\t},\n\tStartupProbe: &k8s.ProbeConfig{\n\t\tPeriodSeconds:       1,\n\t\tTimeoutSeconds:      3,\n\t\tInitialDelaySeconds: 0,\n\t},\n}\n\nfunc assertEnv(t *testing.T, expect map[string]string, real []v1.EnvVar) {\n\tfor _, env := range real {\n\t\tvalue, exist := expect[env.Name]\n\t\tif exist == false || value != env.Value {\n\t\t\tt.Errorf(\"Environment variables contains unexpected %s:%s\", env.Name, env.Value)\n\t\t\tt.Fail()\n\t\t}\n\t\tdelete(expect, env.Name)\n\t}\n\tif len(expect) != 0 {\n\t\tt.Errorf(\"Environment variables should contain %v\", expect)\n\t\tt.Fail()\n\t}\n}\n\nfunc Test_newDeployment(t *testing.T) {\n\tinference := &v2alpha1.Inference{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kubesec\",\n\t\t},\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:          \"kubesec\",\n\t\t\tImage:         \"docker.io/kubesec/kubesec\",\n\t\t\tHTTPProbePath: Ptr(\"/\"),\n\t\t\tAnnotations:   map[string]string{},\n\t\t},\n\t}\n\n\tfactory := NewFunctionFactory(fake.NewSimpleClientset(), defaultK8sConfig)\n\n\tsecrets := map[string]*corev1.Secret{}\n\n\tdeployment := newDeployment(inference, nil, secrets, factory)\n\n\tif deployment.Spec.Template.Spec.Containers[0].ReadinessProbe.HTTPGet.Path != \"/\" {\n\t\tt.Errorf(\"Readiness probe should have HTTPGet handler set to %s\", \"/\")\n\t\tt.Fail()\n\t}\n\n\tif deployment.Spec.Template.Spec.Containers[0].StartupProbe.InitialDelaySeconds != 0 {\n\t\tt.Errorf(\"Startup probe should have initial delay seconds set to %s\", \"0\")\n\t\tt.Fail()\n\t}\n\n\tif deployment.Spec.Template.Spec.Containers[0].LivenessProbe.InitialDelaySeconds != 0 {\n\t\tt.Errorf(\"Liveness probe should have initial delay seconds set to %s\", \"0\")\n\t\tt.Fail()\n\t}\n\n\tif *(deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser) != k8s.SecurityContextUserID {\n\t\tt.Errorf(\"RunAsUser should be %v\", k8s.SecurityContextUserID)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestNewDeploymentWithStartupDurationLabel(t *testing.T) {\n\tinf := &v2alpha1.Inference{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kubesec\",\n\t\t},\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:          \"kubesec\",\n\t\t\tImage:         \"docker.io/kubesec/kubesec\",\n\t\t\tHTTPProbePath: Ptr(\"/\"),\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"prometheus.io.scrape\": \"true\",\n\t\t\t},\n\t\t\tScaling: &v2alpha1.ScalingConfig{\n\t\t\t\tStartupDuration: Ptr(int32(600)),\n\t\t\t},\n\t\t},\n\t}\n\n\tfactory := NewFunctionFactory(fake.NewSimpleClientset(),\n\t\tk8s.DeploymentConfig{\n\t\t\tHTTPProbe:      true,\n\t\t\tSetNonRootUser: true,\n\t\t\tLivenessProbe: &k8s.ProbeConfig{\n\t\t\t\tPeriodSeconds:       1,\n\t\t\t\tTimeoutSeconds:      3,\n\t\t\t\tInitialDelaySeconds: 0,\n\t\t\t},\n\t\t\tReadinessProbe: &k8s.ProbeConfig{\n\t\t\t\tPeriodSeconds:       1,\n\t\t\t\tTimeoutSeconds:      3,\n\t\t\t\tInitialDelaySeconds: 0,\n\t\t\t},\n\t\t\tStartupProbe: &k8s.ProbeConfig{\n\t\t\t\tPeriodSeconds:       10,\n\t\t\t\tTimeoutSeconds:      3,\n\t\t\t\tInitialDelaySeconds: 0,\n\t\t\t},\n\t\t})\n\n\tsecrets := map[string]*corev1.Secret{}\n\n\texpectedPeriodSeconds := int32(10)\n\texpectedFailureThreshold := int32(60)\n\tdeployment := newDeployment(inf, nil, secrets, factory)\n\tif len(deployment.Spec.Template.Spec.Containers) == 0 {\n\t\tt.Errorf(\"Deployment should have at least one container\")\n\t\tt.Fail()\n\t}\n\tif deployment.Spec.Template.Spec.Containers[0].StartupProbe == nil {\n\t\tt.Errorf(\"Deployment should have a startup probe\")\n\t\tt.Fail()\n\t}\n\tif deployment.Spec.Template.Spec.Containers[0].StartupProbe.PeriodSeconds != expectedPeriodSeconds {\n\t\tt.Errorf(\"Startup probe should have timeout seconds set to %d\", expectedPeriodSeconds)\n\t\tt.Fail()\n\t}\n\tif deployment.Spec.Template.Spec.Containers[0].StartupProbe.FailureThreshold != expectedFailureThreshold {\n\t\tt.Errorf(\"Startup probe should have failure threshold set to %d\", expectedFailureThreshold)\n\t\tt.Fail()\n\t}\n}\n\nfunc Test_newDeployment_PrometheusScrape_NotOverridden(t *testing.T) {\n\tinference := &v2alpha1.Inference{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kubesec\",\n\t\t},\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:  \"kubesec\",\n\t\t\tImage: \"docker.io/kubesec/kubesec\",\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"prometheus.io.scrape\": \"true\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfactory := NewFunctionFactory(fake.NewSimpleClientset(),\n\t\tk8s.DeploymentConfig{\n\t\t\tHTTPProbe:      false,\n\t\t\tSetNonRootUser: true,\n\t\t\tLivenessProbe: &k8s.ProbeConfig{\n\t\t\t\tPeriodSeconds:       1,\n\t\t\t\tTimeoutSeconds:      3,\n\t\t\t\tInitialDelaySeconds: 0,\n\t\t\t},\n\t\t\tReadinessProbe: &k8s.ProbeConfig{\n\t\t\t\tPeriodSeconds:       1,\n\t\t\t\tTimeoutSeconds:      3,\n\t\t\t\tInitialDelaySeconds: 0,\n\t\t\t},\n\t\t\tStartupProbe: &k8s.ProbeConfig{\n\t\t\t\tPeriodSeconds:       1,\n\t\t\t\tTimeoutSeconds:      3,\n\t\t\t\tInitialDelaySeconds: 0,\n\t\t\t},\n\t\t})\n\n\tsecrets := map[string]*corev1.Secret{}\n\n\tdeployment := newDeployment(inference, nil, secrets, factory)\n\n\twant := \"true\"\n\n\tif deployment.Spec.Template.Annotations[\"prometheus.io.scrape\"] != want {\n\t\tt.Errorf(\"Annotation prometheus.io.scrape should be %s, was: %s\", want, deployment.Spec.Template.Annotations[\"prometheus.io.scrape\"])\n\t}\n}\n\nfunc Test_newDeployment_WithZeroResource(t *testing.T) {\n\tquantity, _ := resource.ParseQuantity(\"0\")\n\tinference := &v2alpha1.Inference{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kubesec\",\n\t\t},\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:          \"kubesec\",\n\t\t\tImage:         \"docker.io/kubesec/kubesec\",\n\t\t\tHTTPProbePath: Ptr(\"/\"),\n\t\t\tAnnotations:   map[string]string{},\n\t\t\tResources: &v1.ResourceRequirements{\n\t\t\t\tLimits: v1.ResourceList{consts.ResourceNvidiaGPU: quantity},\n\t\t\t},\n\t\t},\n\t}\n\n\tfactory := NewFunctionFactory(fake.NewSimpleClientset(), defaultK8sConfig)\n\n\tsecrets := map[string]*corev1.Secret{}\n\n\tdeployment := newDeployment(inference, nil, secrets, factory)\n\n\tif deployment.Spec.Template.Spec.Containers[0].Env[0].Name != \"CUDA_VISIBLE_DEVICES\" {\n\t\tt.Errorf(\"CUDA_VISIBLE_DEVICES should be set to environment variables\")\n\t\tt.Fail()\n\t}\n\n\tif deployment.Spec.Template.Spec.Containers[0].Env[0].Value != \"\" {\n\t\tt.Errorf(\"Empty value should be set to CUDA_VISIBLE_DEVICES\")\n\t\tt.Fail()\n\t}\n}\n\nfunc Test_newDeployment_WithNonZeroResource(t *testing.T) {\n\tquantity, _ := resource.ParseQuantity(\"1\")\n\tinference := &v2alpha1.Inference{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kubesec\",\n\t\t},\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:          \"kubesec\",\n\t\t\tImage:         \"docker.io/kubesec/kubesec\",\n\t\t\tHTTPProbePath: Ptr(\"/\"),\n\t\t\tAnnotations:   map[string]string{},\n\t\t\tResources: &v1.ResourceRequirements{\n\t\t\t\tLimits: v1.ResourceList{consts.ResourceNvidiaGPU: quantity},\n\t\t\t},\n\t\t},\n\t}\n\n\tfactory := NewFunctionFactory(fake.NewSimpleClientset(), defaultK8sConfig)\n\n\tsecrets := map[string]*corev1.Secret{}\n\n\tdeployment := newDeployment(inference, nil, secrets, factory)\n\n\tif deployment.Spec.Template.Spec.Tolerations[0].Key != consts.TolerationGPU {\n\t\tt.Errorf(\"Tolerations should contain %s\", consts.TolerationGPU)\n\t\tt.Fail()\n\t}\n\n\tif deployment.Spec.Template.Spec.Tolerations[1].Key != consts.TolerationNvidiaGPUPresent {\n\t\tt.Errorf(\"Tolerations should contain %s\", consts.TolerationNvidiaGPUPresent)\n\t\tt.Fail()\n\t}\n}\n\nfunc Test_newDeployment_WithCommandsAndEnvVars(t *testing.T) {\n\texpectEnv := map[string]string{\"MOCK\": \"TEST\"}\n\texpectCommand := \"python main.py\"\n\n\tinference := &v2alpha1.Inference{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kubesec\",\n\t\t},\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:          \"kubesec\",\n\t\t\tImage:         \"docker.io/kubesec/kubesec\",\n\t\t\tHTTPProbePath: Ptr(\"/\"),\n\t\t\tAnnotations:   map[string]string{},\n\t\t\tCommand:       Ptr(expectCommand),\n\t\t\tEnvVars:       expectEnv,\n\t\t},\n\t}\n\n\tfactory := NewFunctionFactory(fake.NewSimpleClientset(), defaultK8sConfig)\n\n\tsecrets := map[string]*corev1.Secret{}\n\n\tdeployment := newDeployment(inference, nil, secrets, factory)\n\n\tassertEnv(t, expectEnv, deployment.Spec.Template.Spec.Containers[0].Env)\n\n\tif strings.Join(deployment.Spec.Template.Spec.Containers[0].Command, \" \") != expectCommand {\n\t\tt.Errorf(\"Command should contain value %s\", expectCommand)\n\t\tt.Fail()\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/deployment_update_test.go",
    "content": "package controller\n\nimport (\n\t\"testing\"\n\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t. \"github.com/tensorchord/openmodelz/modelzetes/pkg/pointer\"\n)\n\nfunc Test_Deployment_Need_Update(t *testing.T) {\n\tscenarios := []struct {\n\t\tname      string\n\t\tinference *v2alpha1.Inference\n\t\tdeploy    *appsv1.Deployment\n\t\texpected  bool\n\t}{\n\t\t{\n\t\t\t\"empty deployment need update\",\n\t\t\t&v2alpha1.Inference{},\n\t\t\t&appsv1.Deployment{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tannotationInferenceSpec: \"\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\t\"bad deployment need update\",\n\t\t\t&v2alpha1.Inference{},\n\t\t\t&appsv1.Deployment{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tAnnotations: map[string]string{annotationInferenceSpec: \"bad\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\t\"equal deployment doesn't need update\",\n\t\t\t&v2alpha1.Inference{},\n\t\t\t&appsv1.Deployment{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tannotationInferenceSpec: \"{\\\"metadata\\\":{\\\"creationTimestamp\\\":null},\\\"spec\\\":{\\\"name\\\":\\\"\\\",\\\"image\\\":\\\"\\\"}}\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t\"unequal deployment need update\",\n\t\t\t&v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tScaling: &v2alpha1.ScalingConfig{\n\t\t\t\t\t\tMinReplicas: Ptr(int32(2)),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t&appsv1.Deployment{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tannotationInferenceSpec: \"{\\\"metadata\\\":{\\\"creationTimestamp\\\":null},\\\"spec\\\":{\\\"name\\\":\\\"\\\",\\\"image\\\":\\\"\\\"}}\",\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\ttrue,\n\t\t},\n\t}\n\n\tfor _, s := range scenarios {\n\t\tt.Run(s.name, func(t *testing.T) {\n\t\t\tneedUpdate := deploymentNeedsUpdate(s.inference, s.deploy)\n\t\t\tif needUpdate != s.expected {\n\t\t\t\tt.Errorf(\"incorrect judgement of need update: expected %v, got %v\", s.expected, needUpdate)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/factory.go",
    "content": "package controller\n\nimport (\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/k8s\"\n)\n\n// FunctionFactory wraps modelzetes factory\ntype FunctionFactory struct {\n\tFactory k8s.FunctionFactory\n}\n\nfunc NewFunctionFactory(clientset kubernetes.Interface, config k8s.DeploymentConfig) FunctionFactory {\n\treturn FunctionFactory{\n\t\tk8s.FunctionFactory{\n\t\t\tClient: clientset,\n\t\t\tConfig: config,\n\t\t},\n\t}\n}\n\nfunc functionToResourceRequirements(in *v2alpha1.Inference) types.ResourceRequirements {\n\tresources := types.ResourceRequirements{}\n\n\tif in.Spec.Resources == nil {\n\t\treturn resources\n\t}\n\n\tgpuLimit := in.Spec.Resources.Limits[consts.ResourceNvidiaGPU]\n\tgpuLimitPtr := &gpuLimit\n\n\tgpuRequest := in.Spec.Resources.Requests[consts.ResourceNvidiaGPU]\n\tgpuRequestsPtr := &gpuRequest\n\n\tresources = types.ResourceRequirements{\n\t\tLimits: types.ResourceList{\n\t\t\ttypes.ResourceCPU: types.Quantity(\n\t\t\t\tin.Spec.Resources.Limits.Cpu().String()),\n\t\t\ttypes.ResourceMemory: types.Quantity(\n\t\t\t\tin.Spec.Resources.Limits.Memory().String()),\n\t\t\ttypes.ResourceGPU: types.Quantity(gpuLimitPtr.String()),\n\t\t},\n\t\tRequests: types.ResourceList{\n\t\t\ttypes.ResourceCPU: types.Quantity(\n\t\t\t\tin.Spec.Resources.Requests.Cpu().String()),\n\t\t\ttypes.ResourceMemory: types.Quantity(\n\t\t\t\tin.Spec.Resources.Requests.Memory().String()),\n\t\t\ttypes.ResourceGPU: types.Quantity(gpuRequestsPtr.String()),\n\t\t},\n\t}\n\n\treturn resources\n}\n\nfunc (f *FunctionFactory) MakeHuggingfacePullThroughCacheEnvVar() *corev1.EnvVar {\n\tif f.Factory.Config.HuggingfacePullThroughCache {\n\t\treturn &corev1.EnvVar{\n\t\t\tName:  \"HF_ENDPOINT\",\n\t\t\tValue: f.Factory.Config.HuggingfacePullThroughCacheEndpoint,\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (f *FunctionFactory) MakeProbes(function *v2alpha1.Inference, port int) (\n\t*k8s.FunctionProbes, error) {\n\t// For old version inference without HTTPProbePath\n\thttpProbePath := consts.DefaultHTTPProbePath\n\tif (function.Spec.HTTPProbePath != nil) && (*function.Spec.HTTPProbePath != \"\") {\n\t\thttpProbePath = *function.Spec.HTTPProbePath\n\t}\n\n\treturn f.Factory.MakeProbes(port, httpProbePath)\n}\n\nfunc (f *FunctionFactory) ConfigureReadOnlyRootFilesystem(function *v2alpha1.Inference, deployment *appsv1.Deployment) {\n\tf.Factory.ConfigureReadOnlyRootFilesystem(deployment)\n}\n\nfunc (f *FunctionFactory) ConfigureContainerUserID(deployment *appsv1.Deployment) {\n\tf.Factory.ConfigureContainerUserID(deployment)\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/framework_test.go",
    "content": "package controller\n\nimport (\n\t\"strconv\"\n\t\"testing\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t. \"github.com/tensorchord/openmodelz/modelzetes/pkg/pointer\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\nfunc Test_newDeployment_FrameworkGradio(t *testing.T) {\n\texpectEnv := map[string]string{\"GRADIO_SERVER_NAME\": \"0.0.0.0\", \"GRADIO_SERVER_PORT\": \"7860\"}\n\n\tinference := &v2alpha1.Inference{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kubesec\",\n\t\t},\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:          \"kubesec\",\n\t\t\tImage:         \"docker.io/kubesec/kubesec\",\n\t\t\tHTTPProbePath: Ptr(\"/\"),\n\t\t\tAnnotations:   map[string]string{},\n\t\t\tFramework:     v2alpha1.FrameworkGradio,\n\t\t},\n\t}\n\n\tfactory := NewFunctionFactory(fake.NewSimpleClientset(), defaultK8sConfig)\n\n\tsecrets := map[string]*corev1.Secret{}\n\n\tdeployment := newDeployment(inference, nil, secrets, factory)\n\n\tassertEnv(t, expectEnv, deployment.Spec.Template.Spec.Containers[0].Env)\n}\n\nfunc Test_newDeployment_FrameworkMosec(t *testing.T) {\n\texpectEnv := map[string]string{\"MOSEC_PORT\": strconv.Itoa(defaultPort)}\n\n\tinference := &v2alpha1.Inference{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kubesec\",\n\t\t},\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:          \"kubesec\",\n\t\t\tImage:         \"docker.io/kubesec/kubesec\",\n\t\t\tHTTPProbePath: Ptr(\"/\"),\n\t\t\tAnnotations:   map[string]string{},\n\t\t\tFramework:     v2alpha1.FrameworkMosec,\n\t\t},\n\t}\n\n\tfactory := NewFunctionFactory(fake.NewSimpleClientset(), defaultK8sConfig)\n\n\tsecrets := map[string]*corev1.Secret{}\n\n\tdeployment := newDeployment(inference, nil, secrets, factory)\n\n\tassertEnv(t, expectEnv, deployment.Spec.Template.Spec.Containers[0].Env)\n}\n\nfunc Test_newDeployment_FrameworkStreamlit(t *testing.T) {\n\texpectEnv := map[string]string{\n\t\t\"STREAMLIT_SERVER_ENABLE_CORS\":            \"false\",\n\t\t\"STREAMLIT_SERVER_ADDRESS\":                \"0.0.0.0\",\n\t\t\"STREAMLIT_SERVER_ENABLE_XSRF_PROTECTION\": \"false\"}\n\n\tinference := &v2alpha1.Inference{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kubesec\",\n\t\t},\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:          \"kubesec\",\n\t\t\tImage:         \"docker.io/kubesec/kubesec\",\n\t\t\tHTTPProbePath: Ptr(\"/\"),\n\t\t\tAnnotations:   map[string]string{},\n\t\t\tFramework:     v2alpha1.FrameworkStreamlit,\n\t\t},\n\t}\n\n\tfactory := NewFunctionFactory(fake.NewSimpleClientset(), defaultK8sConfig)\n\n\tsecrets := map[string]*corev1.Secret{}\n\n\tdeployment := newDeployment(inference, nil, secrets, factory)\n\n\tassertEnv(t, expectEnv, deployment.Spec.Template.Spec.Containers[0].Env)\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/fromconfig.go",
    "content": "package controller\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\tkubeinformers \"k8s.io/client-go/informers\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/tools/cache\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n\n\tclientset \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned\"\n\tinformers \"github.com/tensorchord/openmodelz/modelzetes/pkg/client/informers/externalversions\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/config\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/k8s\"\n)\n\nfunc New(c config.Config, stopCh <-chan struct{}) (*Controller, error) {\n\tclientCmdConfig, err := clientcmd.BuildConfigFromFlags(\n\t\tc.KubeConfig.MasterURL, c.KubeConfig.Kubeconfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error building kubeconfig: %s\", err.Error())\n\t}\n\n\tclientCmdConfig.QPS = float32(c.KubeConfig.QPS)\n\tclientCmdConfig.Burst = c.KubeConfig.Burst\n\n\tkubeClient, err := kubernetes.NewForConfig(clientCmdConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error building Kubernetes clientset: %s\", err.Error())\n\t}\n\n\tinferenceClient, err := clientset.NewForConfig(clientCmdConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error building Inference clientset: %s\", err.Error())\n\t}\n\n\tdeployConfig := k8s.DeploymentConfig{\n\t\tHTTPProbe:      true,\n\t\tSetNonRootUser: false,\n\t\tReadinessProbe: &k8s.ProbeConfig{\n\t\t\tInitialDelaySeconds: int32(c.Probes.Readiness.InitialDelaySeconds),\n\t\t\tTimeoutSeconds:      int32(c.Probes.Readiness.TimeoutSeconds),\n\t\t\tPeriodSeconds:       int32(c.Probes.Readiness.PeriodSeconds),\n\t\t},\n\t\tLivenessProbe: &k8s.ProbeConfig{\n\t\t\tInitialDelaySeconds: int32(c.Probes.Liveness.InitialDelaySeconds),\n\t\t\tTimeoutSeconds:      int32(c.Probes.Liveness.TimeoutSeconds),\n\t\t\tPeriodSeconds:       int32(c.Probes.Liveness.PeriodSeconds),\n\t\t},\n\t\tStartupProbe: &k8s.ProbeConfig{\n\t\t\tInitialDelaySeconds: int32(c.Probes.Startup.InitialDelaySeconds),\n\t\t\tTimeoutSeconds:      int32(c.Probes.Startup.TimeoutSeconds),\n\t\t\tPeriodSeconds:       int32(c.Probes.Startup.PeriodSeconds),\n\t\t},\n\t\tImagePullPolicy:    c.Inference.ImagePullPolicy,\n\t\tRuntimeClassNvidia: c.Inference.SetUpRuntimeClassNvidia,\n\t\tProfilesNamespace:  \"default\",\n\t}\n\n\tif c.HuggingfaceProxy.Endpoint == \"\" {\n\t\tdeployConfig.HuggingfacePullThroughCache = false\n\t} else {\n\t\tdeployConfig.HuggingfacePullThroughCache = true\n\t\tdeployConfig.HuggingfacePullThroughCacheEndpoint = c.HuggingfaceProxy.Endpoint\n\t}\n\n\tkubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, c.KubeConfig.ResyncPeriod)\n\n\tinferenceInformerFactory := informers.NewSharedInformerFactoryWithOptions(inferenceClient, c.KubeConfig.ResyncPeriod)\n\n\tinferences := inferenceInformerFactory.Tensorchord().V2alpha1().Inferences()\n\tgo inferences.Informer().Run(stopCh)\n\tif ok := cache.WaitForNamedCacheSync(\n\t\tfmt.Sprintf(\"%s:inferences\", consts.ProviderName),\n\t\tstopCh, inferences.Informer().HasSynced); !ok {\n\t\treturn nil, errors.New(\"failed to wait for inference caches to sync\")\n\t}\n\n\tdeployments := kubeInformerFactory.Apps().V1().Deployments()\n\tgo deployments.Informer().Run(stopCh)\n\tif ok := cache.WaitForNamedCacheSync(\n\t\tfmt.Sprintf(\"%s:deployments\", consts.ProviderName),\n\t\tstopCh, deployments.Informer().HasSynced); !ok {\n\t\treturn nil, errors.New(\"failed to wait for deployment caches to sync\")\n\t}\n\n\tcontrollerFactory := NewFunctionFactory(kubeClient, deployConfig)\n\n\tctr := NewController(\n\t\tkubeClient, inferenceClient, kubeInformerFactory,\n\t\tinferenceInformerFactory, controllerFactory)\n\treturn ctr, nil\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/replicas_test.go",
    "content": "package controller\n\nimport (\n\t\"testing\"\n\n\tappsv1 \"k8s.io/api/apps/v1\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/k8s\"\n\t. \"github.com/tensorchord/openmodelz/modelzetes/pkg/pointer\"\n)\n\nfunc Test_Replicas(t *testing.T) {\n\tscenarios := []struct {\n\t\tname      string\n\t\tinference *v2alpha1.Inference\n\t\tdeploy    *appsv1.Deployment\n\t\texpected  *int32\n\t}{\n\t\t{\n\t\t\t\"return nil replicas when label is missing and deployment does not exist\",\n\t\t\t&v2alpha1.Inference{},\n\t\t\tnil,\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"return nil replicas when label is missing and deployment has no replicas\",\n\t\t\t&v2alpha1.Inference{},\n\t\t\t&appsv1.Deployment{},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"return min replicas when label is present and deployment has nil replicas\",\n\t\t\t&v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tScaling: &v2alpha1.ScalingConfig{\n\t\t\t\t\t\tMinReplicas: Ptr(int32(2)),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t&appsv1.Deployment{Spec: appsv1.DeploymentSpec{Replicas: nil}},\n\t\t\tint32p(2),\n\t\t},\n\t\t{\n\t\t\t\"return min replicas when label is present and deployment has replicas less than min\",\n\t\t\t&v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tScaling: &v2alpha1.ScalingConfig{\n\t\t\t\t\t\tMinReplicas: Ptr(int32(2)),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t&appsv1.Deployment{Spec: appsv1.DeploymentSpec{Replicas: int32p(1)}},\n\t\t\tint32p(2),\n\t\t},\n\t\t{\n\t\t\t\"return existing replicas when label is present and deployment has more replicas than min\",\n\t\t\t&v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tScaling: &v2alpha1.ScalingConfig{\n\t\t\t\t\t\tMinReplicas: Ptr(int32(2)),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t&appsv1.Deployment{Spec: appsv1.DeploymentSpec{Replicas: int32p(3)}},\n\t\t\tint32p(3),\n\t\t},\n\t\t{\n\t\t\t\"return existing replicas when label is missing and deployment has replicas set by HPA\",\n\t\t\t&v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tScaling: &v2alpha1.ScalingConfig{},\n\t\t\t\t},\n\t\t\t}, &appsv1.Deployment{Spec: appsv1.DeploymentSpec{Replicas: int32p(3)}},\n\t\t\tint32p(3),\n\t\t},\n\t\t{\n\t\t\t\"return zero replicas when label is present and deployment has zero replicas\",\n\t\t\t&v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tScaling: &v2alpha1.ScalingConfig{\n\t\t\t\t\t\tMinReplicas: Ptr(int32(2)),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, &appsv1.Deployment{Spec: appsv1.DeploymentSpec{Replicas: int32p(0)}},\n\t\t\tint32p(2),\n\t\t},\n\t}\n\n\tfactory := NewFunctionFactory(fake.NewSimpleClientset(),\n\t\tk8s.DeploymentConfig{\n\t\t\tLivenessProbe:  &k8s.ProbeConfig{},\n\t\t\tReadinessProbe: &k8s.ProbeConfig{},\n\t\t\tStartupProbe:   &k8s.ProbeConfig{},\n\t\t})\n\n\tfor _, s := range scenarios {\n\t\tt.Run(s.name, func(t *testing.T) {\n\t\t\tdeploy := newDeployment(s.inference, s.deploy, nil, factory)\n\t\t\tvalue := deploy.Spec.Replicas\n\n\t\t\tif s.expected != nil && value != nil {\n\t\t\t\tif *s.expected != *value {\n\t\t\t\t\tt.Errorf(\"incorrect replica count: expected %v, got %v\", *s.expected, *value)\n\t\t\t\t}\n\t\t\t} else if s.expected != value {\n\t\t\t\tt.Errorf(\"incorrect replica count: expected %v, got %v\", s.expected, value)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/secrets.go",
    "content": "package controller\n\nimport (\n\t\"fmt\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tcorev1 \"k8s.io/api/core/v1\"\n)\n\nconst (\n\tsecretsMountPath = \"/var/openfaas/secrets\"\n)\n\n// UpdateSecrets will update the Deployment spec to include secrets that have been deployed\n// in the kubernetes cluster.  For each requested secret, we inspect the type and add it to the\n// deployment spec as appropriate: secrets with type `SecretTypeDockercfg` are added as ImagePullSecrets\n// all other secrets are mounted as files in the deployments containers.\nfunc UpdateSecrets(function *v2alpha1.Inference, deployment *appsv1.Deployment, existingSecrets map[string]*corev1.Secret) error {\n\t// Add / reference pre-existing secrets within Kubernetes\n\tsecretVolumeProjections := []corev1.VolumeProjection{}\n\n\tfor _, secretName := range function.Spec.Secrets {\n\t\tdeployedSecret, ok := existingSecrets[secretName]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"required secret '%s' was not found in the cluster\", secretName)\n\t\t}\n\n\t\tswitch deployedSecret.Type {\n\n\t\tcase corev1.SecretTypeDockercfg,\n\t\t\tcorev1.SecretTypeDockerConfigJson:\n\n\t\t\tdeployment.Spec.Template.Spec.ImagePullSecrets = append(\n\t\t\t\tdeployment.Spec.Template.Spec.ImagePullSecrets,\n\t\t\t\tcorev1.LocalObjectReference{\n\t\t\t\t\tName: secretName,\n\t\t\t\t},\n\t\t\t)\n\n\t\tdefault:\n\n\t\t\tprojectedPaths := []corev1.KeyToPath{}\n\t\t\tfor secretKey := range deployedSecret.Data {\n\t\t\t\tprojectedPaths = append(projectedPaths, corev1.KeyToPath{Key: secretKey, Path: secretKey})\n\t\t\t}\n\n\t\t\tprojection := &corev1.SecretProjection{Items: projectedPaths}\n\t\t\tprojection.Name = secretName\n\t\t\tsecretProjection := corev1.VolumeProjection{\n\t\t\t\tSecret: projection,\n\t\t\t}\n\t\t\tsecretVolumeProjections = append(secretVolumeProjections, secretProjection)\n\t\t}\n\t}\n\n\tvolumeName := fmt.Sprintf(\"%s-projected-secrets\", function.Spec.Name)\n\tprojectedSecrets := corev1.Volume{\n\t\tName: volumeName,\n\t\tVolumeSource: corev1.VolumeSource{\n\t\t\tProjected: &corev1.ProjectedVolumeSource{\n\t\t\t\tSources: secretVolumeProjections,\n\t\t\t},\n\t\t},\n\t}\n\n\t// remove the existing secrets volume, if we can find it. The update volume will be\n\t// added below\n\texistingVolumes := removeVolume(volumeName, deployment.Spec.Template.Spec.Volumes)\n\tdeployment.Spec.Template.Spec.Volumes = existingVolumes\n\tif len(secretVolumeProjections) > 0 {\n\t\tdeployment.Spec.Template.Spec.Volumes = append(existingVolumes, projectedSecrets)\n\t}\n\n\t// add mount secret as a file\n\tupdatedContainers := []corev1.Container{}\n\tfor _, container := range deployment.Spec.Template.Spec.Containers {\n\t\tmount := corev1.VolumeMount{\n\t\t\tName:      volumeName,\n\t\t\tReadOnly:  true,\n\t\t\tMountPath: secretsMountPath,\n\t\t}\n\t\t// remove the existing secrets volume mount, if we can find it. We update it later.\n\t\tcontainer.VolumeMounts = removeVolumeMount(volumeName, container.VolumeMounts)\n\t\tif len(secretVolumeProjections) > 0 {\n\t\t\tcontainer.VolumeMounts = append(container.VolumeMounts, mount)\n\t\t}\n\n\t\tupdatedContainers = append(updatedContainers, container)\n\t}\n\n\tdeployment.Spec.Template.Spec.Containers = updatedContainers\n\n\treturn nil\n}\n\n// removeVolume returns a Volume slice with any volumes matching volumeName removed.\n// Uses the filter without allocation technique\n// https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating\nfunc removeVolume(volumeName string, volumes []corev1.Volume) []corev1.Volume {\n\tnewVolumes := volumes[:0]\n\tfor _, v := range volumes {\n\t\tif v.Name != volumeName {\n\t\t\tnewVolumes = append(newVolumes, v)\n\t\t}\n\t}\n\n\treturn newVolumes\n}\n\n// removeVolumeMount returns a VolumeMount slice with any mounts matching volumeName removed\n// Uses the filter without allocation technique\n// https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating\nfunc removeVolumeMount(volumeName string, mounts []corev1.VolumeMount) []corev1.VolumeMount {\n\tnewMounts := mounts[:0]\n\tfor _, v := range mounts {\n\t\tif v.Name != volumeName {\n\t\t\tnewMounts = append(newMounts, v)\n\t\t}\n\t}\n\n\treturn newMounts\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/secrets_test.go",
    "content": "package controller\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tcorev1 \"k8s.io/api/core/v1\"\n)\n\nfunc Test_UpdateSecrets_DoesNotAddVolumeIfRequestSecretsIsNil(t *testing.T) {\n\trequest := &v2alpha1.Inference{\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:    \"testfunc\",\n\t\t\tSecrets: nil,\n\t\t},\n\t}\n\texistingSecrets := map[string]*corev1.Secret{\n\t\t\"pullsecret\": {Type: corev1.SecretTypeDockercfg},\n\t\t\"testsecret\": {Type: corev1.SecretTypeOpaque, Data: map[string][]byte{\"filename\": []byte(\"contents\")}},\n\t}\n\n\tdeployment := &appsv1.Deployment{\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{Name: \"testfunc\", Image: \"alpine:latest\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\terr := UpdateSecrets(request, deployment, existingSecrets)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error %s\", err.Error())\n\t}\n\n\tvalidateEmptySecretVolumesAndMounts(t, deployment)\n\n}\n\nfunc Test_UpdateSecrets_DoesNotAddVolumeIfRequestSecretsIsEmpty(t *testing.T) {\n\trequest := &v2alpha1.Inference{\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:    \"testfunc\",\n\t\t\tSecrets: []string{},\n\t\t},\n\t}\n\texistingSecrets := map[string]*corev1.Secret{\n\t\t\"pullsecret\": {Type: corev1.SecretTypeDockercfg},\n\t\t\"testsecret\": {Type: corev1.SecretTypeOpaque, Data: map[string][]byte{\"filename\": []byte(\"contents\")}},\n\t}\n\n\tdeployment := &appsv1.Deployment{\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{Name: \"testfunc\", Image: \"alpine:latest\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\terr := UpdateSecrets(request, deployment, existingSecrets)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error %s\", err.Error())\n\t}\n\n\tvalidateEmptySecretVolumesAndMounts(t, deployment)\n\n}\n\nfunc Test_UpdateSecrets_RemovesAllCopiesOfExitingSecretsVolumes(t *testing.T) {\n\tvolumeName := \"testfunc-projected-secrets\"\n\trequest := &v2alpha1.Inference{\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:    \"testfunc\",\n\t\t\tSecrets: []string{},\n\t\t},\n\t}\n\texistingSecrets := map[string]*corev1.Secret{\n\t\t\"pullsecret\": {Type: corev1.SecretTypeDockercfg},\n\t\t\"testsecret\": {Type: corev1.SecretTypeOpaque, Data: map[string][]byte{\"filename\": []byte(\"contents\")}},\n\t}\n\n\tdeployment := &appsv1.Deployment{\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  \"testfunc\",\n\t\t\t\t\t\t\tImage: \"alpine:latest\",\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\terr := UpdateSecrets(request, deployment, existingSecrets)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error %s\", err.Error())\n\t}\n\n\tvalidateEmptySecretVolumesAndMounts(t, deployment)\n\n}\n\nfunc Test_UpdateSecrets_AddNewSecretVolume(t *testing.T) {\n\trequest := &v2alpha1.Inference{\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:    \"testfunc\",\n\t\t\tSecrets: []string{\"pullsecret\", \"testsecret\"},\n\t\t},\n\t}\n\texistingSecrets := map[string]*corev1.Secret{\n\t\t\"pullsecret\": {Type: corev1.SecretTypeDockercfg},\n\t\t\"testsecret\": {Type: corev1.SecretTypeOpaque, Data: map[string][]byte{\"filename\": []byte(\"contents\")}},\n\t}\n\n\tdeployment := &appsv1.Deployment{\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{Name: \"testfunc\", Image: \"alpine:latest\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\terr := UpdateSecrets(request, deployment, existingSecrets)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error %s\", err.Error())\n\t}\n\n\tvalidateNewSecretVolumesAndMounts(t, deployment)\n\n}\n\nfunc Test_UpdateSecrets_ReplacesPreviousSecretMountWithNewMount(t *testing.T) {\n\trequest := &v2alpha1.Inference{\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:    \"testfunc\",\n\t\t\tSecrets: []string{\"pullsecret\", \"testsecret\"},\n\t\t},\n\t}\n\texistingSecrets := map[string]*corev1.Secret{\n\t\t\"pullsecret\": {Type: corev1.SecretTypeDockercfg},\n\t\t\"testsecret\": {Type: corev1.SecretTypeOpaque, Data: map[string][]byte{\"filename\": []byte(\"contents\")}},\n\t}\n\n\tdeployment := &appsv1.Deployment{\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{Name: \"testfunc\", Image: \"alpine:latest\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\terr := UpdateSecrets(request, deployment, existingSecrets)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error %s\", err.Error())\n\t}\n\t// mimic the deployment already existing and deployed with the same secrets by running\n\t// UpdateSecrets twice, the first run represents the original deployment, the second run represents\n\t// retrieving the deployment from the k8s api and applying the update to it\n\terr = UpdateSecrets(request, deployment, existingSecrets)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error %s\", err.Error())\n\t}\n\n\tvalidateNewSecretVolumesAndMounts(t, deployment)\n\n}\n\nfunc Test_UpdateSecrets_RemovesSecretsVolumeIfRequestSecretsIsEmptyOrNil(t *testing.T) {\n\trequest := &v2alpha1.Inference{\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:    \"testfunc\",\n\t\t\tSecrets: []string{\"pullsecret\", \"testsecret\"},\n\t\t},\n\t}\n\texistingSecrets := map[string]*corev1.Secret{\n\t\t\"pullsecret\": {Type: corev1.SecretTypeDockercfg},\n\t\t\"testsecret\": {Type: corev1.SecretTypeOpaque, Data: map[string][]byte{\"filename\": []byte(\"contents\")}},\n\t}\n\n\tdeployment := &appsv1.Deployment{\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{Name: \"testfunc\", Image: \"alpine:latest\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\terr := UpdateSecrets(request, deployment, existingSecrets)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error %s\", err.Error())\n\t}\n\n\tvalidateNewSecretVolumesAndMounts(t, deployment)\n\n\trequest = &v2alpha1.Inference{\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:    \"testfunc\",\n\t\t\tSecrets: []string{},\n\t\t},\n\t}\n\terr = UpdateSecrets(request, deployment, existingSecrets)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error %s\", err.Error())\n\t}\n\n\tvalidateEmptySecretVolumesAndMounts(t, deployment)\n}\n\nfunc validateEmptySecretVolumesAndMounts(t *testing.T, deployment *appsv1.Deployment) {\n\tnumVolumes := len(deployment.Spec.Template.Spec.Volumes)\n\tif numVolumes != 0 {\n\t\tfmt.Printf(\"%+v\", deployment.Spec.Template.Spec.Volumes)\n\t\tt.Errorf(\"Incorrect number of volumes: expected 0, got %d\", numVolumes)\n\t}\n\n\tc := deployment.Spec.Template.Spec.Containers[0]\n\tnumVolumeMounts := len(c.VolumeMounts)\n\tif numVolumeMounts != 0 {\n\t\tt.Errorf(\"Incorrect number of volumes mounts: expected 0, got %d\", numVolumeMounts)\n\t}\n}\n\nfunc validateNewSecretVolumesAndMounts(t *testing.T, deployment *appsv1.Deployment) {\n\tnumVolumes := len(deployment.Spec.Template.Spec.Volumes)\n\tif numVolumes != 1 {\n\t\tt.Errorf(\"Incorrect number of volumes: expected 1, got %d\", numVolumes)\n\t}\n\n\tvolume := deployment.Spec.Template.Spec.Volumes[0]\n\tif volume.Name != \"testfunc-projected-secrets\" {\n\t\tt.Errorf(\"Incorrect volume name: expected \\\"testfunc-projected-secrets\\\", got \\\"%s\\\"\", volume.Name)\n\t}\n\n\tif volume.VolumeSource.Projected == nil {\n\t\tt.Error(\"Secrets volume is not a projected volume type\")\n\t}\n\n\tif volume.VolumeSource.Projected.Sources[0].Secret.Items[0].Key != \"filename\" {\n\t\tt.Error(\"Project secret not constructed correctly\")\n\t}\n\n\tc := deployment.Spec.Template.Spec.Containers[0]\n\tnumVolumeMounts := len(c.VolumeMounts)\n\tif numVolumeMounts != 1 {\n\t\tt.Errorf(\"Incorrect number of volumes mounts: expected 1, got %d\", numVolumeMounts)\n\t}\n\n\tmount := c.VolumeMounts[0]\n\tif mount.Name != \"testfunc-projected-secrets\" {\n\t\tt.Errorf(\"Incorrect volume mounts: expected \\\"testfunc-projected-secrets\\\", got \\\"%s\\\"\", mount.Name)\n\t}\n\n\tif mount.MountPath != secretsMountPath {\n\t\tt.Errorf(\"Incorrect volume mount path: expected \\\"%s\\\", got \\\"%s\\\"\", secretsMountPath, mount.MountPath)\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/service.go",
    "content": "package controller\n\nimport (\n\tcorev1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n)\n\n// newService creates a new ClusterIP Service for a Function resource. It also sets\n// the appropriate OwnerReferences on the resource so handleObject can discover\n// the Function resource that 'owns' it.\nfunc newService(function *v2alpha1.Inference) *corev1.Service {\n\treturn &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:        consts.DefaultServicePrefix + function.Spec.Name,\n\t\t\tNamespace:   function.Namespace,\n\t\t\tAnnotations: map[string]string{\"prometheus.io.scrape\": \"false\"},\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(function, schema.GroupVersionKind{\n\t\t\t\t\tGroup:   v2alpha1.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: v2alpha1.SchemeGroupVersion.Version,\n\t\t\t\t\tKind:    v2alpha1.Kind,\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tType:     corev1.ServiceTypeClusterIP,\n\t\t\tSelector: map[string]string{consts.LabelInferenceName: function.Spec.Name},\n\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName:     \"http\",\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tPort:     functionPort,\n\t\t\t\t\tTargetPort: intstr.IntOrString{\n\t\t\t\t\t\tType:   intstr.Int,\n\t\t\t\t\t\tIntVal: int32(makePort(function)),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/controller/service_test.go",
    "content": "package controller\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\tv2alpha1 \"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\t. \"github.com/tensorchord/openmodelz/modelzetes/pkg/pointer\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nfunc Test_newService(t *testing.T) {\n\tinference := &v2alpha1.Inference{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"kubesec\",\n\t\t\tNamespace: \"mock-space\",\n\t\t},\n\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\tName:          \"kubesec\",\n\t\t\tImage:         \"docker.io/kubesec/kubesec\",\n\t\t\tHTTPProbePath: Ptr(\"/\"),\n\t\t\tAnnotations:   map[string]string{},\n\t\t},\n\t}\n\n\tservice := newService(inference)\n\n\tif !strings.Contains(service.ObjectMeta.Name, inference.ObjectMeta.Name) {\n\t\tt.Errorf(\"Service name %s should contains inference name %s\",\n\t\t\tservice.ObjectMeta.Name, inference.ObjectMeta.Name)\n\t\tt.Fail()\n\t}\n\tif service.ObjectMeta.Namespace != inference.ObjectMeta.Namespace {\n\t\tt.Errorf(\"Service namespace %s should be equal to inference namespace %s\",\n\t\t\tservice.ObjectMeta.Namespace, inference.ObjectMeta.Namespace)\n\t\tt.Fail()\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/config.go",
    "content": "// Copyright 2020 OpenFaaS Authors\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\n// ProbeConfig holds the deployment liveness and readiness options\ntype ProbeConfig struct {\n\tInitialDelaySeconds int32\n\tTimeoutSeconds      int32\n\tPeriodSeconds       int32\n}\n\n// DeploymentConfig holds the global deployment options\ntype DeploymentConfig struct {\n\tHTTPProbe                           bool\n\tReadinessProbe                      *ProbeConfig\n\tLivenessProbe                       *ProbeConfig\n\tStartupProbe                        *ProbeConfig\n\tHuggingfacePullThroughCache         bool\n\tHuggingfacePullThroughCacheEndpoint string\n\tImagePullPolicy                     string\n\tRuntimeClassNvidia                  bool\n\t// SetNonRootUser will override the function image user to ensure that it is not root. When\n\t// true, the user will set to 12000 for all functions.\n\tSetNonRootUser bool\n\t// ProfilesNamespace defines which namespace is used to look up available Profiles.\n\tProfilesNamespace string\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/errors.go",
    "content": "// Copyright 2020 OpenFaaS Authors\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport (\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n)\n\n// isNotFound tests if the error is a kubernetes API error that indicates that the object\n// was not found or does not exist\nfunc IsNotFound(err error) bool {\n\treturn k8serrors.IsNotFound(err) || k8serrors.IsGone(err)\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/factory.go",
    "content": "// Copyright 2020 OpenFaaS Authors\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport (\n\t\"k8s.io/client-go/kubernetes\"\n\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/client/clientset/versioned/typed/modelzetes/v2alpha1\"\n)\n\n// FunctionFactory is handling Kubernetes operations to materialise functions into deployments and services\ntype FunctionFactory struct {\n\tClient kubernetes.Interface\n\tConfig DeploymentConfig\n}\n\nfunc NewFunctionFactory(clientset kubernetes.Interface, config DeploymentConfig, inferenceclientset v2alpha1.InferenceInterface) FunctionFactory {\n\treturn FunctionFactory{\n\t\tClient: clientset,\n\t\tConfig: config,\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/factory_test.go",
    "content": "// Copyright 2020 OpenFaaS Authors\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport \"k8s.io/client-go/kubernetes/fake\"\n\nfunc mockFactory() FunctionFactory {\n\treturn NewFunctionFactory(fake.NewSimpleClientset(),\n\t\tDeploymentConfig{\n\t\t\tHTTPProbe: false,\n\t\t\tLivenessProbe: &ProbeConfig{\n\t\t\t\tPeriodSeconds:       1,\n\t\t\t\tTimeoutSeconds:      3,\n\t\t\t\tInitialDelaySeconds: 0,\n\t\t\t},\n\t\t\tReadinessProbe: &ProbeConfig{\n\t\t\t\tPeriodSeconds:       1,\n\t\t\t\tTimeoutSeconds:      3,\n\t\t\t\tInitialDelaySeconds: 0,\n\t\t\t},\n\t\t\tStartupProbe: &ProbeConfig{\n\t\t\t\tPeriodSeconds:       1,\n\t\t\t\tTimeoutSeconds:      3,\n\t\t\t\tInitialDelaySeconds: 0,\n\t\t\t},\n\t\t}, nil)\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/instance.go",
    "content": "package k8s\n\nimport (\n\ttypes \"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tv1 \"k8s.io/api/core/v1\"\n)\n\nfunc MakeLabelSelector(name string) map[string]string {\n\treturn map[string]string{\n\t\t\"app\": name,\n\t}\n}\n\nfunc InstanceFromPod(pod v1.Pod) *types.InferenceDeploymentInstance {\n\ti := &types.InferenceDeploymentInstance{\n\t\tSpec: types.InferenceDeploymentInstanceSpec{\n\t\t\tNamespace:      pod.Namespace,\n\t\t\tName:           pod.Name,\n\t\t\tOwnerReference: pod.Labels[consts.LabelInferenceName],\n\t\t},\n\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\tStartTime: pod.Status.StartTime.Time,\n\t\t\tReason:    pod.Status.Reason,\n\t\t\tMessage:   pod.Status.Message,\n\t\t},\n\t}\n\n\tswitch pod.Status.Phase {\n\tcase v1.PodRunning:\n\t\ti.Status.Phase = types.InstancePhaseRunning\n\tcase v1.PodPending:\n\t\ti.Status.Phase = types.InstancePhasePending\n\t\tfor _, c := range pod.Status.Conditions {\n\t\t\tif c.Type == v1.PodScheduled && c.Status == v1.ConditionFalse {\n\t\t\t\ti.Status.Phase = types.InstancePhaseScheduling\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\tcase v1.PodFailed:\n\t\ti.Status.Phase = types.InstancePhaseFailed\n\tcase v1.PodSucceeded:\n\t\ti.Status.Phase = types.InstancePhaseSucceeded\n\tcase v1.PodUnknown:\n\t\ti.Status.Phase = types.InstancePhaseUnknown\n\t}\n\n\tif pod.Status.ContainerStatuses[0].Started != nil &&\n\t\t!*pod.Status.ContainerStatuses[0].Started {\n\t\ti.Status.Phase = types.InstancePhaseCreating\n\t\tif pod.Status.ContainerStatuses[0].State.Waiting != nil {\n\t\t\ti.Status.Reason = pod.Status.ContainerStatuses[0].State.Waiting.Reason\n\t\t\ti.Status.Message = pod.Status.ContainerStatuses[0].State.Waiting.Message\n\t\t\ti.Status.Phase = types.InstancePhase(\n\t\t\t\tpod.Status.ContainerStatuses[0].State.Waiting.Reason)\n\t\t} else if pod.Status.ContainerStatuses[0].State.Running != nil {\n\t\t\ti.Status.Phase = types.InstancePhaseInitializing\n\t\t}\n\t}\n\treturn i\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/instance_test.go",
    "content": "package k8s\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\ttypes \"github.com/tensorchord/openmodelz/agent/api/types\"\n\t. \"github.com/tensorchord/openmodelz/modelzetes/pkg/pointer\"\n\tv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar (\n\tmock_time, _ = time.Parse(\"2006-01-02\", \"2023-08-31\")\n)\n\nfunc Test_InstanceFromPod(t *testing.T) {\n\tscenarios := []struct {\n\t\tname     string\n\t\tpod      v1.Pod\n\t\texpected types.InferenceDeploymentInstance\n\t}{\n\t\t{\n\t\t\t\"basic pod\",\n\t\t\tv1.Pod{\n\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\tStartTime: Ptr(metav1.NewTime(mock_time)),\n\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\tStartTime: mock_time,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"phase running pod\",\n\t\t\tv1.Pod{\n\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\tPhase:     v1.PodRunning,\n\t\t\t\t\tStartTime: Ptr(metav1.NewTime(mock_time)),\n\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\tPhase:     types.InstancePhaseRunning,\n\t\t\t\t\tStartTime: mock_time,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"phase pending pod\",\n\t\t\tv1.Pod{\n\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\tPhase:     v1.PodPending,\n\t\t\t\t\tStartTime: Ptr(metav1.NewTime(mock_time)),\n\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\tPhase:     types.InstancePhasePending,\n\t\t\t\t\tStartTime: mock_time,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"phase scheduling pod\",\n\t\t\tv1.Pod{\n\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\tPhase:     v1.PodPending,\n\t\t\t\t\tStartTime: Ptr(metav1.NewTime(mock_time)),\n\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t{},\n\t\t\t\t\t},\n\t\t\t\t\tConditions: []v1.PodCondition{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tType:   v1.PodScheduled,\n\t\t\t\t\t\t\tStatus: v1.ConditionFalse,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\tPhase:     types.InstancePhaseScheduling,\n\t\t\t\t\tStartTime: mock_time,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"phase failed pod\",\n\t\t\tv1.Pod{\n\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\tPhase:     v1.PodFailed,\n\t\t\t\t\tStartTime: Ptr(metav1.NewTime(mock_time)),\n\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\tPhase:     types.InstancePhaseFailed,\n\t\t\t\t\tStartTime: mock_time,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"phase succeed pod\",\n\t\t\tv1.Pod{\n\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\tPhase:     v1.PodSucceeded,\n\t\t\t\t\tStartTime: Ptr(metav1.NewTime(mock_time)),\n\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\tPhase:     types.InstancePhaseSucceeded,\n\t\t\t\t\tStartTime: mock_time,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"phase unknown pod\",\n\t\t\tv1.Pod{\n\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\tPhase:     v1.PodUnknown,\n\t\t\t\t\tStartTime: Ptr(metav1.NewTime(mock_time)),\n\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\tPhase:     types.InstancePhaseUnknown,\n\t\t\t\t\tStartTime: mock_time,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"phase creating pod\",\n\t\t\tv1.Pod{\n\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\tPhase:     v1.PodUnknown,\n\t\t\t\t\tStartTime: Ptr(metav1.NewTime(mock_time)),\n\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tStarted: Ptr(false),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\tPhase:     types.InstancePhaseCreating,\n\t\t\t\t\tStartTime: mock_time,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"phase initializing pod\",\n\t\t\tv1.Pod{\n\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\tPhase:     v1.PodUnknown,\n\t\t\t\t\tStartTime: Ptr(metav1.NewTime(mock_time)),\n\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tStarted: Ptr(false),\n\t\t\t\t\t\t\tState: v1.ContainerState{\n\t\t\t\t\t\t\t\tRunning: Ptr(v1.ContainerStateRunning{\n\t\t\t\t\t\t\t\t\tStartedAt: metav1.NewTime(mock_time),\n\t\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\tPhase:     types.InstancePhaseInitializing,\n\t\t\t\t\tStartTime: mock_time,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"phase waiting pod\",\n\t\t\tv1.Pod{\n\t\t\t\tStatus: v1.PodStatus{\n\t\t\t\t\tPhase:     v1.PodUnknown,\n\t\t\t\t\tStartTime: Ptr(metav1.NewTime(mock_time)),\n\t\t\t\t\tContainerStatuses: []v1.ContainerStatus{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tStarted: Ptr(false),\n\t\t\t\t\t\t\tState: v1.ContainerState{\n\t\t\t\t\t\t\t\tWaiting: Ptr(v1.ContainerStateWaiting{\n\t\t\t\t\t\t\t\t\tReason:  \"mock-reason\",\n\t\t\t\t\t\t\t\t\tMessage: \"mock-message\",\n\t\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\ttypes.InferenceDeploymentInstance{\n\t\t\t\tStatus: types.InferenceDeploymentInstanceStatus{\n\t\t\t\t\tPhase:     types.InstancePhase(\"mock-reason\"),\n\t\t\t\t\tReason:    \"mock-reason\",\n\t\t\t\t\tMessage:   \"mock-message\",\n\t\t\t\t\tStartTime: mock_time,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, s := range scenarios {\n\t\tt.Run(s.name, func(t *testing.T) {\n\t\t\tinstance := InstanceFromPod(s.pod)\n\t\t\tif diff := cmp.Diff(s.expected, *instance); diff != \"\" {\n\t\t\t\tt.Errorf(\"Create instance from pod: expected %v, got %v\", s.expected, instance)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/log.go",
    "content": "// Copyright 2020 OpenFaaS Author(s)\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\n// LogRequestor implements the Requestor interface for k8s\ntype LogRequestor struct {\n\tclient            kubernetes.Interface\n\tfunctionNamespace string\n}\n\n// NewLogRequestor returns a new logs.Requestor that uses kail to select and follow pod logs\nfunc NewLogRequestor(client kubernetes.Interface, functionNamespace string) *LogRequestor {\n\treturn &LogRequestor{\n\t\tclient:            client,\n\t\tfunctionNamespace: functionNamespace,\n\t}\n}\n\n// Query implements the actual Swarm logs request logic for the Requestor interface\n// This implementation ignores the r.Limit value because the OF-Provider already handles server side\n// line limits.\nfunc (l LogRequestor) Query(ctx context.Context, r types.LogRequest) (<-chan types.Message, error) {\n\tns := l.functionNamespace\n\n\tif len(r.Namespace) > 0 && strings.ToLower(r.Namespace) != \"kube-system\" {\n\t\tns = r.Namespace\n\t}\n\n\tvar since *time.Time\n\tif r.Since != \"\" {\n\t\tbuf, err := time.Parse(time.RFC3339, r.Since)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsince = &buf\n\t}\n\n\tlogStream, err := GetLogs(ctx, l.client, r.Name, ns, int64(r.Tail), since, r.Follow)\n\tif err != nil {\n\t\tlog.Printf(\"LogRequestor: get logs failed: %s\\n\", err)\n\t\treturn nil, err\n\t}\n\n\tmsgStream := make(chan types.Message, LogBufferSize)\n\tgo func() {\n\t\tdefer close(msgStream)\n\t\t// here we depend on the fact that logStream will close when the context is cancelled,\n\t\t// this ensures that the go routine will resolve\n\t\tfor msg := range logStream {\n\t\t\tmsgStream <- types.Message{\n\t\t\t\tTimestamp: msg.Timestamp,\n\t\t\t\tText:      msg.Text,\n\t\t\t\tName:      msg.FunctionName,\n\t\t\t\tInstance:  msg.PodName,\n\t\t\t\tNamespace: msg.Namespace,\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn msgStream, nil\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/logs.go",
    "content": "package k8s\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"io\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/pkg/errors\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tk8serrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/informers\"\n\t\"k8s.io/client-go/informers/internalinterfaces\"\n\t\"k8s.io/client-go/kubernetes\"\n\tv1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n\t\"k8s.io/client-go/tools/cache\"\n\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n)\n\nconst (\n\t// podInformerResync is the period between cache syncs in the pod informer\n\tpodInformerResync = 5 * time.Second\n\n\t// defaultLogSince is the fallback log stream history\n\tdefaultLogSince = 5 * time.Minute\n\n\t// LogBufferSize number of log messages that may be buffered\n\tLogBufferSize = 500 * 2\n)\n\n// Log is the object which will be used together with the template to generate\n// the output.\ntype Log struct {\n\t// Text is the log message itself\n\tText string `json:\"text\"`\n\n\t// Namespace of the pod\n\tNamespace string `json:\"namespace\"`\n\n\t// PodName of the instance\n\tPodName string `json:\"podName\"`\n\n\t// FunctionName of the pod\n\tFunctionName string `json:\"FunctionName\"`\n\n\t// Timestamp of the message\n\tTimestamp time.Time `json:\"timestamp\"`\n}\n\n// GetLogs returns a channel of logs for the given function\nfunc GetLogs(ctx context.Context, client kubernetes.Interface, functionName, namespace string, tail int64, since *time.Time, follow bool) (<-chan Log, error) {\n\tadded, err := startFunctionPodInformer(ctx, client, functionName, namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogs := make(chan Log, LogBufferSize)\n\n\tgo func() {\n\t\tvar watching uint\n\t\tdefer close(logs)\n\n\t\tfinished := make(chan error)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-finished:\n\t\t\t\twatching--\n\t\t\t\tif watching == 0 && !follow {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase p := <-added:\n\t\t\t\twatching++\n\t\t\t\tgo func() {\n\t\t\t\t\tfinished <- podLogs(ctx, client.CoreV1().Pods(namespace), p, functionName, namespace, tail, since, follow, logs)\n\t\t\t\t}()\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn logs, nil\n}\n\n// podLogs returns a stream of logs lines from the specified pod\nfunc podLogs(ctx context.Context, i v1.PodInterface, pod, container, namespace string, tail int64, since *time.Time, follow bool, dst chan<- Log) error {\n\tlog.Printf(\"Logger: starting log stream for %s\\n\", pod)\n\tdefer log.Printf(\"Logger: stopping log stream for %s\\n\", pod)\n\n\topts := &corev1.PodLogOptions{\n\t\tFollow:     follow,\n\t\tTimestamps: true,\n\t\tContainer:  container,\n\t}\n\n\tif tail > 0 {\n\t\topts.TailLines = &tail\n\t}\n\n\tif opts.TailLines == nil || since != nil {\n\t\topts.SinceSeconds = parseSince(since)\n\t}\n\n\tstream, err := i.GetLogs(pod, opts).Stream(context.TODO())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stream.Close()\n\n\tdone := make(chan error)\n\tgo func() {\n\t\treader := bufio.NewReader(stream)\n\t\tfor {\n\t\t\tline, err := reader.ReadBytes('\\n')\n\t\t\tif err != nil {\n\t\t\t\tdone <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmsg, ts := extractTimestampAndMsg(string(bytes.Trim(line, \"\\x00\")))\n\t\t\tdst <- Log{\n\t\t\t\tTimestamp:    ts,\n\t\t\t\tText:         msg,\n\t\t\t\tPodName:      pod,\n\t\t\t\tFunctionName: container,\n\t\t\t\tNamespace:    namespace,\n\t\t\t}\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase err := <-done:\n\t\tif err != io.EOF {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc extractTimestampAndMsg(logText string) (string, time.Time) {\n\t// first 32 characters is the k8s timestamp\n\tparts := strings.SplitN(logText, \" \", 2)\n\tts, err := time.Parse(time.RFC3339Nano, parts[0])\n\tif err != nil {\n\t\tlog.Printf(\"error: invalid timestamp '%s'\\n\", parts[0])\n\t\treturn \"\", time.Time{}\n\t}\n\n\tif len(parts) == 2 {\n\t\treturn parts[1], ts\n\t}\n\n\treturn \"\", ts\n}\n\n// parseSince returns the time.Duration of the requested Since value _or_ 5 minutes\nfunc parseSince(r *time.Time) *int64 {\n\tvar since int64\n\tif r == nil || r.IsZero() {\n\t\tsince = int64(defaultLogSince.Seconds())\n\t\treturn &since\n\t}\n\tsince = int64(time.Since(*r).Seconds())\n\treturn &since\n}\n\n// startFunctionPodInformer will gather the list of existing Pods for the function, it will then watch\n// and watch for newly added or deleted function instances.\nfunc startFunctionPodInformer(ctx context.Context, client kubernetes.Interface, functionName, namespace string) (<-chan string, error) {\n\tfunctionSelector := &metav1.LabelSelector{\n\t\tMatchLabels: map[string]string{consts.LabelInferenceName: functionName},\n\t}\n\tselector, err := metav1.LabelSelectorAsSelector(functionSelector)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"unable to build function selector\")\n\t\tlog.Printf(\"PodInformer: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tlog.Printf(\"PodInformer: starting informer for %s in: %s\\n\", selector.String(), namespace)\n\tfactory := informers.NewFilteredSharedInformerFactory(\n\t\tclient,\n\t\tpodInformerResync,\n\t\tnamespace,\n\t\twithLabels(selector.String()),\n\t)\n\n\tpodInformer := factory.Core().V1().Pods()\n\tpodsResp, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})\n\tif err != nil {\n\t\tlog.Printf(\"PodInformer: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tpods := podsResp.Items\n\tif len(pods) == 0 {\n\t\terr = errors.New(\"no matching instances found\")\n\t\tlog.Printf(\"PodInformer: %s\", err)\n\t\treturn nil, k8serrors.NewNotFound(corev1.Resource(\"pods\"), selector.String())\n\t}\n\n\t// prepare channel with enough space for the current instance set\n\tadded := make(chan string, len(pods))\n\tpodInformer.Informer().AddEventHandler(&podLoggerEventHandler{\n\t\tadded: added,\n\t})\n\n\t// will add existing pods to the chan and then listen for any new pods\n\tgo podInformer.Informer().Run(ctx.Done())\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tclose(added)\n\t}()\n\n\treturn added, nil\n}\n\nfunc withLabels(selector string) internalinterfaces.TweakListOptionsFunc {\n\treturn func(opts *metav1.ListOptions) {\n\t\topts.LabelSelector = selector\n\t}\n}\n\ntype podLoggerEventHandler struct {\n\tcache.ResourceEventHandler\n\tadded   chan<- string\n\tdeleted chan<- string\n}\n\nfunc (h *podLoggerEventHandler) OnAdd(obj interface{}, isInitialList bool) {\n\tpod := obj.(*corev1.Pod)\n\tlog.Printf(\"PodInformer: adding instance: %s\", pod.Name)\n\th.added <- pod.Name\n}\n\nfunc (h *podLoggerEventHandler) OnUpdate(oldObj, newObj interface{}) {\n\t// purposefully empty, we don't need to do anything for logs on update\n}\n\nfunc (h *podLoggerEventHandler) OnDelete(obj interface{}) {\n\t// this may not be needed, the log stream Reader _should_ close on its own without\n\t// us needing to watch and close it\n\t// pod := obj.(*corev1.Pod)\n\t// h.deleted <- pod.Name\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/probes.go",
    "content": "// Copyright 2020 OpenFaaS Authors\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport (\n\tcorev1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n)\n\ntype FunctionProbes struct {\n\tLiveness  *corev1.Probe\n\tReadiness *corev1.Probe\n\tStartup   *corev1.Probe\n}\n\n// MakeProbes returns the liveness and readiness probes\n// by default the health check runs `cat /tmp/.lock` every ten seconds\nfunc (f *FunctionFactory) MakeProbes(port int, httpProbePath string) (*FunctionProbes, error) {\n\tvar handler corev1.ProbeHandler\n\n\tif f.Config.HTTPProbe {\n\t\thandler = corev1.ProbeHandler{\n\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\tPath: httpProbePath,\n\t\t\t\tPort: intstr.IntOrString{\n\t\t\t\t\tType:   intstr.Int,\n\t\t\t\t\tIntVal: int32(port),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t} else {\n\t\treturn nil, nil\n\t}\n\n\tprobes := FunctionProbes{}\n\tprobes.Readiness = &corev1.Probe{\n\t\tProbeHandler:        handler,\n\t\tInitialDelaySeconds: f.Config.ReadinessProbe.InitialDelaySeconds,\n\t\tTimeoutSeconds:      int32(f.Config.ReadinessProbe.TimeoutSeconds),\n\t\tPeriodSeconds:       int32(f.Config.ReadinessProbe.PeriodSeconds),\n\t\tSuccessThreshold:    1,\n\t\tFailureThreshold:    3,\n\t}\n\n\tprobes.Liveness = &corev1.Probe{\n\t\tProbeHandler:        handler,\n\t\tInitialDelaySeconds: f.Config.LivenessProbe.InitialDelaySeconds,\n\t\tTimeoutSeconds:      int32(f.Config.LivenessProbe.TimeoutSeconds),\n\t\tPeriodSeconds:       int32(f.Config.LivenessProbe.PeriodSeconds),\n\t\tSuccessThreshold:    1,\n\t\tFailureThreshold:    3,\n\t}\n\n\tprobes.Startup = &corev1.Probe{\n\t\tProbeHandler:        handler,\n\t\tInitialDelaySeconds: f.Config.StartupProbe.InitialDelaySeconds,\n\t\tTimeoutSeconds:      int32(f.Config.StartupProbe.TimeoutSeconds),\n\t\tPeriodSeconds:       int32(f.Config.StartupProbe.PeriodSeconds),\n\t\tSuccessThreshold:    1,\n\t\t// Set failure threshold to 30 to allow for slow-starting inferences.\n\t\tFailureThreshold: 30,\n\t}\n\n\treturn &probes, nil\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/probes_test.go",
    "content": "// Copyright 2020 OpenFaaS Authors\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport (\n\t\"testing\"\n)\n\nfunc Test_makeProbes_useHTTPProbe(t *testing.T) {\n\tf := mockFactory()\n\tf.Config.HTTPProbe = true\n\n\tprobes, err := f.MakeProbes(8080, \"/\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif probes.Readiness.HTTPGet == nil {\n\t\tt.Errorf(\"Readiness probe should have had HTTPGet handler\")\n\t\tt.Fail()\n\t}\n\tif probes.Liveness.HTTPGet == nil {\n\t\tt.Errorf(\"Liveness probe should have had HTTPGet handler\")\n\t\tt.Fail()\n\t}\n}\n\nfunc Test_makeProbes_useCustomDurationHTTPProbe(t *testing.T) {\n\tf := mockFactory()\n\tf.Config.HTTPProbe = true\n\tf.Config.LivenessProbe = &ProbeConfig{\n\t\tPeriodSeconds:       1,\n\t\tTimeoutSeconds:      3,\n\t\tInitialDelaySeconds: 0,\n\t}\n\tf.Config.ReadinessProbe = &ProbeConfig{\n\t\tPeriodSeconds:       1,\n\t\tTimeoutSeconds:      3,\n\t\tInitialDelaySeconds: 0,\n\t}\n\tf.Config.StartupProbe = &ProbeConfig{\n\t\tPeriodSeconds:       1,\n\t\tTimeoutSeconds:      3,\n\t\tInitialDelaySeconds: 0,\n\t}\n\n\tcustomDelay := \"0\"\n\n\tprobes, err := f.MakeProbes(8080, \"/\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif probes.Readiness.HTTPGet == nil {\n\t\tt.Errorf(\"Readiness probe should have had HTTPGet handler\")\n\t\tt.Fail()\n\t}\n\tif probes.Readiness.InitialDelaySeconds != 0 {\n\t\tt.Errorf(\"Readiness probe should have initial delay seconds set to %s\", customDelay)\n\t\tt.Fail()\n\t}\n\n\tif probes.Liveness.HTTPGet == nil {\n\t\tt.Errorf(\"Liveness probe should have had HTTPGet handler\")\n\t\tt.Fail()\n\t}\n\tif probes.Liveness.InitialDelaySeconds != 0 {\n\t\tt.Errorf(\"Readiness probe should have had HTTPGet handler set to %s\", customDelay)\n\t\tt.Fail()\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/proxy.go",
    "content": "// Copyright (c) Alex Ellis 2017. All rights reserved.\n// Copyright 2020 OpenFaaS Author(s)\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"net/url\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/consts\"\n\tcorelister \"k8s.io/client-go/listers/core/v1\"\n)\n\n// watchdogPort for the OpenFaaS function watchdog\nconst watchdogPort = 8080\n\nfunc NewFunctionLookup(ns string, lister corelister.EndpointsLister) *FunctionLookup {\n\treturn &FunctionLookup{\n\t\tDefaultNamespace: ns,\n\t\tEndpointLister:   lister,\n\t\tListers:          map[string]corelister.EndpointsNamespaceLister{},\n\t\tlock:             sync.RWMutex{},\n\t}\n}\n\ntype FunctionLookup struct {\n\tDefaultNamespace string\n\tEndpointLister   corelister.EndpointsLister\n\tListers          map[string]corelister.EndpointsNamespaceLister\n\n\tlock sync.RWMutex\n}\n\nfunc (f *FunctionLookup) GetLister(ns string) corelister.EndpointsNamespaceLister {\n\tf.lock.RLock()\n\tdefer f.lock.RUnlock()\n\treturn f.Listers[ns]\n}\n\nfunc (f *FunctionLookup) SetLister(ns string, lister corelister.EndpointsNamespaceLister) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\tf.Listers[ns] = lister\n}\n\nfunc getNamespace(name, defaultNamespace string) string {\n\tnamespace := defaultNamespace\n\tif strings.Contains(name, \".\") {\n\t\tnamespace = name[strings.LastIndexAny(name, \".\")+1:]\n\t}\n\treturn namespace\n}\n\nfunc (l *FunctionLookup) Resolve(name string) (url.URL, error) {\n\tfunctionName := name\n\tnamespace := getNamespace(name, l.DefaultNamespace)\n\tif err := l.verifyNamespace(namespace); err != nil {\n\t\treturn url.URL{}, err\n\t}\n\n\tif strings.Contains(name, \".\") {\n\t\tfunctionName = strings.TrimSuffix(name, \".\"+namespace)\n\t}\n\n\tnsEndpointLister := l.GetLister(namespace)\n\n\tif nsEndpointLister == nil {\n\t\tl.SetLister(namespace, l.EndpointLister.Endpoints(namespace))\n\n\t\tnsEndpointLister = l.GetLister(namespace)\n\t}\n\n\tsvcName := consts.DefaultServicePrefix + functionName\n\n\tsvc, err := nsEndpointLister.Get(svcName)\n\tif err != nil {\n\t\treturn url.URL{}, fmt.Errorf(\"error listing \\\"%s.%s\\\": %s\", svcName, namespace, err.Error())\n\t}\n\n\tif len(svc.Subsets) == 0 {\n\t\treturn url.URL{}, fmt.Errorf(\"no subsets available for \\\"%s.%s\\\"\", svcName, namespace)\n\t}\n\n\tall := len(svc.Subsets[0].Addresses)\n\tif len(svc.Subsets[0].Addresses) == 0 {\n\t\treturn url.URL{}, fmt.Errorf(\"no addresses in subset for \\\"%s.%s\\\"\", svcName, namespace)\n\t}\n\n\ttarget := rand.Intn(all)\n\n\tserviceIP := svc.Subsets[0].Addresses[target].IP\n\tservicePort := svc.Subsets[0].Ports[target].Port\n\n\turlStr := fmt.Sprintf(\"http://%s:%d\", serviceIP, servicePort)\n\n\turlRes, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn url.URL{}, err\n\t}\n\n\treturn *urlRes, nil\n}\n\nfunc (l *FunctionLookup) verifyNamespace(name string) error {\n\tif name != \"kube-system\" {\n\t\treturn nil\n\t}\n\t// ToDo use global namespace parse and validation\n\treturn fmt.Errorf(\"namespace not allowed\")\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/proxy_test.go",
    "content": "package k8s\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\n\tcorelister \"k8s.io/client-go/listers/core/v1\"\n\n\tcorev1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n)\n\ntype FakeLister struct {\n}\n\nfunc (f FakeLister) List(selector labels.Selector) (ret []*corev1.Endpoints, err error) {\n\treturn nil, nil\n}\n\nfunc (f FakeLister) Endpoints(namespace string) corelister.EndpointsNamespaceLister {\n\n\treturn FakeNSLister{}\n}\n\ntype FakeNSLister struct {\n}\n\nfunc (f FakeNSLister) List(selector labels.Selector) (ret []*corev1.Endpoints, err error) {\n\treturn nil, nil\n}\n\nfunc (f FakeNSLister) Get(name string) (*corev1.Endpoints, error) {\n\n\t// make sure that we only send the function name to the lister\n\tif strings.Contains(name, \".\") {\n\t\treturn nil, fmt.Errorf(\"can not look up function name with a dot!\")\n\t}\n\n\tep := corev1.Endpoints{\n\t\tSubsets: []corev1.EndpointSubset{{\n\t\t\tAddresses: []corev1.EndpointAddress{{IP: \"127.0.0.1\"}},\n\t\t\tPorts:     []corev1.EndpointPort{{Port: 8080}},\n\t\t}},\n\t}\n\n\treturn &ep, nil\n}\n\nfunc Test_FunctionLookup(t *testing.T) {\n\n\tlister := FakeLister{}\n\n\tresolver := NewFunctionLookup(\"testDefault\", lister)\n\n\tcases := []struct {\n\t\tname     string\n\t\tfuncName string\n\t\texpError string\n\t\texpUrl   string\n\t}{\n\t\t{\n\t\t\tname:     \"function without namespace uses default namespace\",\n\t\t\tfuncName: \"testfunc\",\n\t\t\texpUrl:   \"http://127.0.0.1:8080\",\n\t\t},\n\t\t{\n\t\t\tname:     \"function with namespace uses the given namespace\",\n\t\t\tfuncName: \"testfunc.othernamespace\",\n\t\t\texpUrl:   \"http://127.0.0.1:8080\",\n\t\t},\n\t\t{\n\t\t\tname:     \"url parse errors are returned\",\n\t\t\tfuncName: \"testfunc.kube-system\",\n\t\t\texpError: \"namespace not allowed\",\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\turl, err := resolver.Resolve(tc.funcName)\n\t\t\tif tc.expError == \"\" && err != nil {\n\t\t\t\tt.Fatalf(\"expected no error, got %s\", err)\n\t\t\t}\n\n\t\t\tif tc.expError != \"\" && (err == nil || !strings.Contains(err.Error(), tc.expError)) {\n\t\t\t\tt.Fatalf(\"expected %s, got %s\", tc.expError, err)\n\t\t\t}\n\n\t\t\tif url.String() != tc.expUrl {\n\t\t\t\tt.Fatalf(\"expected url %s, got %s\", tc.expUrl, url.String())\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/secrets.go",
    "content": "// Copyright 2020 OpenFaaS Authors\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/pkg/errors\"\n\ttypes \"github.com/tensorchord/openmodelz/agent/api/types\"\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tapiv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\ttypedV1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n)\n\nconst (\n\tsecretsMountPath             = \"/var/modelz/secrets\"\n\tsecretLabel                  = \"app.kubernetes.io/managed-by\"\n\tsecretLabelValue             = \"modelz\"\n\tsecretsProjectVolumeNameTmpl = \"projected-secrets\"\n)\n\n// SecretsClient exposes the standardized CRUD behaviors for Kubernetes secrets.  These methods\n// will ensure that the secrets are structured and labelled correctly for use by the modelz system.\ntype SecretsClient interface {\n\t// List returns a list of available function secrets.  Only the names are returned\n\t// to ensure we do not accidentally read or print the sensitive values during\n\t// read operations.\n\tList(namespace string) (names []string, err error)\n\t// Create adds a new secret, with the appropriate labels and structure to be\n\t// used as a function secret.\n\tCreate(secret types.Secret) error\n\t// Replace updates the value of a function secret\n\tReplace(secret types.Secret) error\n\t// Delete removes a function secret\n\tDelete(name string, namespace string) error\n\t// GetSecrets queries Kubernetes for a list of secrets by name in the given k8s namespace.\n\t// This should only be used if you need access to the actual secret structure/value. Specifically,\n\t// inside the FunctionFactory.\n\tGetSecrets(namespace string, secretNames []string) (map[string]*apiv1.Secret, error)\n}\n\n// SecretInterfacer exposes the SecretInterface getter for the k8s client.\n// This is implemented by the CoreV1Interface() interface in the Kubernetes client.\n// The SecretsClient only needs this one interface, but needs to be able to set the\n// namespaces when the interface is instantiated, meaning, we need the Getter and not the\n// SecretInterface itself.\ntype SecretInterfacer interface {\n\t// Secrets returns a SecretInterface scoped to the specified namespace\n\tSecrets(namespace string) typedV1.SecretInterface\n}\n\ntype secretClient struct {\n\tkube SecretInterfacer\n}\n\n// NewSecretsClient constructs a new SecretsClient using the provided Kubernetes client.\nfunc NewSecretsClient(kube kubernetes.Interface) SecretsClient {\n\treturn &secretClient{\n\t\tkube: kube.CoreV1(),\n\t}\n}\n\nfunc (c secretClient) List(namespace string) (names []string, err error) {\n\tres, err := c.kube.Secrets(namespace).List(context.TODO(), c.selector())\n\tif err != nil {\n\t\tlog.Printf(\"failed to list secrets in %s: %v\\n\", namespace, err)\n\t\treturn nil, err\n\t}\n\n\tnames = make([]string, len(res.Items))\n\tfor idx, item := range res.Items {\n\t\t// this is safe because size of names matches res.Items exactly\n\t\tnames[idx] = item.Name\n\t}\n\treturn names, nil\n}\n\nfunc (c secretClient) Create(secret types.Secret) error {\n\terr := c.validateSecret(secret)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq := &apiv1.Secret{\n\t\tType: apiv1.SecretTypeOpaque,\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      secret.Name,\n\t\t\tNamespace: secret.Namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\tsecretLabel: secretLabelValue,\n\t\t\t},\n\t\t},\n\t}\n\n\treq.Data = c.getValidSecretData(secret)\n\n\t_, err = c.kube.Secrets(secret.Namespace).Create(context.TODO(), req, metav1.CreateOptions{})\n\tif err != nil {\n\t\tlog.Printf(\"failed to create secret %s.%s: %v\\n\", secret.Name, secret.Namespace, err)\n\t\treturn err\n\t}\n\n\tlog.Printf(\"created secret %s.%s\\n\", secret.Name, secret.Namespace)\n\n\treturn nil\n}\n\nfunc (c secretClient) Replace(secret types.Secret) error {\n\terr := c.validateSecret(secret)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkube := c.kube.Secrets(secret.Namespace)\n\tfound, err := kube.Get(context.TODO(), secret.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tlog.Printf(\"can not retrieve secret for update %s.%s: %v\\n\", secret.Name, secret.Namespace, err)\n\t\treturn err\n\t}\n\n\tfound.Data = c.getValidSecretData(secret)\n\n\t_, err = kube.Update(context.TODO(), found, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tlog.Printf(\"can not update secret %s.%s: %v\\n\", secret.Name, secret.Namespace, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c secretClient) Delete(namespace string, name string) error {\n\terr := c.kube.Secrets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})\n\tif err != nil {\n\t\tlog.Printf(\"can not delete %s.%s: %v\\n\", name, namespace, err)\n\t}\n\treturn err\n}\n\nfunc (c secretClient) GetSecrets(namespace string, secretNames []string) (map[string]*apiv1.Secret, error) {\n\tkube := c.kube.Secrets(namespace)\n\topts := metav1.GetOptions{}\n\n\tsecrets := map[string]*apiv1.Secret{}\n\tfor _, secretName := range secretNames {\n\t\tsecret, err := kube.Get(context.TODO(), secretName, opts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsecrets[secretName] = secret\n\t}\n\n\treturn secrets, nil\n}\n\nfunc (c secretClient) selector() metav1.ListOptions {\n\treturn metav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"%s=%s\", secretLabel, secretLabelValue),\n\t}\n}\n\nfunc (c secretClient) validateSecret(secret types.Secret) error {\n\tif strings.TrimSpace(secret.Namespace) == \"\" {\n\t\treturn errors.New(\"namespace may not be empty\")\n\t}\n\n\tif strings.TrimSpace(secret.Name) == \"\" {\n\t\treturn errors.New(\"name may not be empty\")\n\t}\n\n\treturn nil\n}\n\nfunc (c secretClient) getValidSecretData(secret types.Secret) map[string][]byte {\n\n\tif len(secret.RawValue) > 0 {\n\t\treturn map[string][]byte{\n\t\t\tsecret.Name: secret.RawValue,\n\t\t}\n\t}\n\n\treturn map[string][]byte{\n\t\tsecret.Name: []byte(secret.Value),\n\t}\n\n}\n\n// ConfigureSecrets will update the Deployment spec to include secrets that have been deployed\n// in the kubernetes cluster.  For each requested secret, we inspect the type and add it to the\n// deployment spec as appropriate: secrets with type `SecretTypeDockercfg/SecretTypeDockerjson`\n// are added as ImagePullSecrets all other secrets are mounted as files in the deployments containers.\nfunc (f *FunctionFactory) ConfigureSecrets(request v2alpha1.Inference, deployment *appsv1.Deployment, existingSecrets map[string]*apiv1.Secret) error {\n\t// Add / reference pre-existing secrets within Kubernetes\n\tsecretVolumeProjections := []apiv1.VolumeProjection{}\n\n\tfor _, secretName := range request.Spec.Secrets {\n\t\tdeployedSecret, ok := existingSecrets[secretName]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"required secret '%s' was not found in the cluster\", secretName)\n\t\t}\n\n\t\tswitch deployedSecret.Type {\n\n\t\tcase apiv1.SecretTypeDockercfg,\n\t\t\tapiv1.SecretTypeDockerConfigJson:\n\n\t\t\tdeployment.Spec.Template.Spec.ImagePullSecrets = append(\n\t\t\t\tdeployment.Spec.Template.Spec.ImagePullSecrets,\n\t\t\t\tapiv1.LocalObjectReference{\n\t\t\t\t\tName: secretName,\n\t\t\t\t},\n\t\t\t)\n\t\tdefault:\n\n\t\t\tprojectedPaths := []apiv1.KeyToPath{}\n\t\t\tfor secretKey := range deployedSecret.Data {\n\t\t\t\tprojectedPaths = append(projectedPaths, apiv1.KeyToPath{Key: secretKey, Path: secretKey})\n\t\t\t}\n\n\t\t\tprojection := &apiv1.SecretProjection{Items: projectedPaths}\n\t\t\tprojection.Name = secretName\n\t\t\tsecretProjection := apiv1.VolumeProjection{\n\t\t\t\tSecret: projection,\n\t\t\t}\n\t\t\tsecretVolumeProjections = append(secretVolumeProjections, secretProjection)\n\t\t}\n\t}\n\n\tvolumeName := secretsProjectVolumeNameTmpl\n\tprojectedSecrets := apiv1.Volume{\n\t\tName: volumeName,\n\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\tProjected: &apiv1.ProjectedVolumeSource{\n\t\t\t\tSources: secretVolumeProjections,\n\t\t\t},\n\t\t},\n\t}\n\n\t// remove the existing secrets volume, if we can find it. The update volume will be\n\t// added below\n\texistingVolumes := removeVolume(volumeName, deployment.Spec.Template.Spec.Volumes)\n\tdeployment.Spec.Template.Spec.Volumes = existingVolumes\n\tif len(secretVolumeProjections) > 0 {\n\t\tdeployment.Spec.Template.Spec.Volumes = append(existingVolumes, projectedSecrets)\n\t}\n\n\t// add mount secret as a file\n\tupdatedContainers := []apiv1.Container{}\n\tfor _, container := range deployment.Spec.Template.Spec.Containers {\n\t\tmount := apiv1.VolumeMount{\n\t\t\tName:      volumeName,\n\t\t\tReadOnly:  true,\n\t\t\tMountPath: secretsMountPath,\n\t\t}\n\n\t\t// remove the existing secrets volume mount, if we can find it. We update it later.\n\t\tcontainer.VolumeMounts = removeVolumeMount(volumeName, container.VolumeMounts)\n\t\tif len(secretVolumeProjections) > 0 {\n\t\t\tcontainer.VolumeMounts = append(container.VolumeMounts, mount)\n\t\t}\n\n\t\tupdatedContainers = append(updatedContainers, container)\n\t}\n\n\tdeployment.Spec.Template.Spec.Containers = updatedContainers\n\n\treturn nil\n}\n\n// ReadFunctionSecretsSpec parses the name of the required function secrets. This is the inverse of ConfigureSecrets.\nfunc ReadFunctionSecretsSpec(item appsv1.Deployment) []string {\n\tsecrets := []string{}\n\n\tfor _, s := range item.Spec.Template.Spec.ImagePullSecrets {\n\t\tsecrets = append(secrets, s.Name)\n\t}\n\n\tvolumeName := secretsProjectVolumeNameTmpl\n\tvar sourceSecrets []apiv1.VolumeProjection\n\tfor _, v := range item.Spec.Template.Spec.Volumes {\n\t\tif v.Name == volumeName {\n\t\t\tsourceSecrets = v.Projected.Sources\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor _, s := range sourceSecrets {\n\t\tif s.Secret == nil {\n\t\t\tcontinue\n\t\t}\n\t\tsecrets = append(secrets, s.Secret.Name)\n\t}\n\n\tsort.Strings(secrets)\n\treturn secrets\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/secrets_factory_test.go",
    "content": "// Copyright 2020 OpenFaaS Author(s)\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/tensorchord/openmodelz/modelzetes/pkg/apis/modelzetes/v2alpha1\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tapiv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nfunc Test_ReadFunctionSecretsSpec(t *testing.T) {\n\n\tf := mockFactory()\n\texistingSecrets := map[string]*apiv1.Secret{\n\t\t\"pullsecret\": {Type: apiv1.SecretTypeDockercfg},\n\t\t\"testsecret\": {Type: apiv1.SecretTypeOpaque, Data: map[string][]byte{\"filename\": []byte(\"contents\")}},\n\t}\n\tfunctionDep := appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{Name: \"testfunc\"},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tTemplate: apiv1.PodTemplateSpec{\n\t\t\t\tSpec: apiv1.PodSpec{\n\t\t\t\t\tContainers: []apiv1.Container{\n\t\t\t\t\t\t{Name: \"testfunc\", Image: \"alpine:latest\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tcases := []struct {\n\t\tname       string\n\t\treq        v2alpha1.Inference\n\t\tdeployment appsv1.Deployment\n\t\texpected   []string\n\t}{\n\t\t{\n\t\t\tname: \"empty secrets, returns empty slice\",\n\t\t\treq: v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tSecrets: []string{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdeployment: functionDep,\n\t\t\texpected:   []string{},\n\t\t},\n\t\t{\n\t\t\tname: \"detects and extracts image pull secret\",\n\t\t\treq: v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tSecrets: []string{\"pullsecret\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdeployment: functionDep,\n\t\t\texpected:   []string{\"pullsecret\"},\n\t\t},\n\t\t{\n\t\t\tname: \"detects and extracts projected generic secret\",\n\t\t\treq: v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tSecrets: []string{\"testsecret\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdeployment: functionDep,\n\t\t\texpected:   []string{\"testsecret\"},\n\t\t},\n\t\t{\n\t\t\tname: \"detects and extracts both pull secrets and projected generic secret, result is sorted\",\n\t\t\treq: v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tSecrets: []string{\"pullsecret\", \"testsecret\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdeployment: functionDep,\n\t\t\texpected:   []string{\"pullsecret\", \"testsecret\"},\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\terr := f.ConfigureSecrets(tc.req, &tc.deployment, existingSecrets)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error result: got %q\", err)\n\t\t}\n\n\t\tparsedSecrets := ReadFunctionSecretsSpec(tc.deployment)\n\t\tif len(tc.expected) != len(parsedSecrets) {\n\t\t\tt.Fatalf(\"incorrect secret count, expected: %v, got: %v\", tc.expected, parsedSecrets)\n\t\t}\n\n\t\tfor idx, expected := range tc.expected {\n\t\t\tvalue := parsedSecrets[idx]\n\t\t\tif expected != value {\n\t\t\t\tt.Fatalf(\"incorrect secret in idx %d, expected: %q, got: %q\", idx, expected, value)\n\t\t\t}\n\t\t}\n\t}\n\n}\n\nfunc Test_FunctionFactory_ConfigureSecrets(t *testing.T) {\n\tf := mockFactory()\n\texistingSecrets := map[string]*apiv1.Secret{\n\t\t\"pullsecret\": {Type: apiv1.SecretTypeDockercfg},\n\t\t\"testsecret\": {Type: apiv1.SecretTypeOpaque, Data: map[string][]byte{\"filename\": []byte(\"contents\")}},\n\t}\n\n\tbasicDeployment := appsv1.Deployment{\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tTemplate: apiv1.PodTemplateSpec{\n\t\t\t\tSpec: apiv1.PodSpec{\n\t\t\t\t\tContainers: []apiv1.Container{\n\t\t\t\t\t\t{Name: \"testfunc\", Image: \"alpine:latest\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvolumeName := \"projected-secrets\"\n\twithExistingSecret := appsv1.Deployment{\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tTemplate: apiv1.PodTemplateSpec{\n\t\t\t\tSpec: apiv1.PodSpec{\n\t\t\t\t\tContainers: []apiv1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  \"testfunc\",\n\t\t\t\t\t\t\tImage: \"alpine:latest\",\n\t\t\t\t\t\t\tVolumeMounts: []apiv1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []apiv1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tcases := []struct {\n\t\tname       string\n\t\treq        v2alpha1.Inference\n\t\tdeployment appsv1.Deployment\n\t\tvalidator  func(t *testing.T, deployment *appsv1.Deployment)\n\t\terr        error\n\t}{\n\t\t{\n\t\t\tname: \"does not add volume if request secrets is nil\",\n\t\t\treq: v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{},\n\t\t\t},\n\t\t\tdeployment: basicDeployment,\n\t\t\tvalidator:  validateEmptySecretVolumesAndMounts,\n\t\t},\n\t\t{\n\t\t\tname: \"does not add volume if request secrets is nil\",\n\t\t\treq: v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tSecrets: []string{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdeployment: basicDeployment,\n\t\t\tvalidator:  validateEmptySecretVolumesAndMounts,\n\t\t},\n\t\t{\n\t\t\tname: \"removes all copies of exiting secrets volumes\",\n\t\t\treq: v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tSecrets: []string{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdeployment: withExistingSecret,\n\t\t\tvalidator:  validateEmptySecretVolumesAndMounts,\n\t\t},\n\t\t{\n\t\t\tname: \"add new secret volume\",\n\t\t\treq: v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tSecrets: []string{\"pullsecret\", \"testsecret\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdeployment: basicDeployment,\n\t\t\tvalidator:  validateNewSecretVolumesAndMounts,\n\t\t},\n\t\t{\n\t\t\tname: \"replaces previous secret mount with new mount\",\n\t\t\treq: v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tSecrets: []string{\"pullsecret\", \"testsecret\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdeployment: withExistingSecret,\n\t\t\tvalidator:  validateNewSecretVolumesAndMounts,\n\t\t},\n\t\t{\n\t\t\tname: \"removes secrets volume if request secrets is empty or nil\",\n\t\t\treq: v2alpha1.Inference{\n\t\t\t\tSpec: v2alpha1.InferenceSpec{\n\t\t\t\t\tSecrets: []string{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdeployment: withExistingSecret,\n\t\t\tvalidator:  validateEmptySecretVolumesAndMounts,\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\terr := f.ConfigureSecrets(tc.req, &tc.deployment, existingSecrets)\n\t\tif err != tc.err {\n\t\t\tt.Errorf(\"unexpected error result: got %v, expected %v\", err, tc.err)\n\t\t}\n\n\t\ttc.validator(t, &tc.deployment)\n\t}\n}\n\nfunc validateEmptySecretVolumesAndMounts(t *testing.T, deployment *appsv1.Deployment) {\n\tnumVolumes := len(deployment.Spec.Template.Spec.Volumes)\n\tif numVolumes != 0 {\n\t\tfmt.Printf(\"%+v\", deployment.Spec.Template.Spec.Volumes)\n\t\tt.Errorf(\"Incorrect number of volumes: expected 0, got %d\", numVolumes)\n\t}\n\n\tc := deployment.Spec.Template.Spec.Containers[0]\n\tnumVolumeMounts := len(c.VolumeMounts)\n\tif numVolumeMounts != 0 {\n\t\tt.Errorf(\"Incorrect number of volumes mounts: expected 0, got %d\", numVolumeMounts)\n\t}\n}\n\nfunc validateNewSecretVolumesAndMounts(t *testing.T, deployment *appsv1.Deployment) {\n\tnumVolumes := len(deployment.Spec.Template.Spec.Volumes)\n\tif numVolumes != 1 {\n\t\tt.Errorf(\"Incorrect number of volumes: expected 1, got %d\", numVolumes)\n\t}\n\n\tvolume := deployment.Spec.Template.Spec.Volumes[0]\n\tif volume.Name != \"projected-secrets\" {\n\t\tt.Errorf(\"Incorrect volume name: expected \\\"projected-secrets\\\", got \\\"%s\\\"\", volume.Name)\n\t}\n\n\tif volume.VolumeSource.Projected == nil {\n\t\tt.Error(\"Secrets volume is not a projected volume type\")\n\t}\n\n\tif volume.VolumeSource.Projected.Sources[0].Secret.Items[0].Key != \"filename\" {\n\t\tt.Error(\"Project secret not constructed correctly\")\n\t}\n\n\tc := deployment.Spec.Template.Spec.Containers[0]\n\tnumVolumeMounts := len(c.VolumeMounts)\n\tif numVolumeMounts != 1 {\n\t\tt.Errorf(\"Incorrect number of volumes mounts: expected 1, got %d\", numVolumeMounts)\n\t}\n\n\tmount := c.VolumeMounts[0]\n\tif mount.Name != \"projected-secrets\" {\n\t\tt.Errorf(\"Incorrect volume mounts: expected \\\"projected-secrets\\\", got \\\"%s\\\"\", mount.Name)\n\t}\n\n\tif mount.MountPath != secretsMountPath {\n\t\tt.Errorf(\"Incorrect volume mount path: expected \\\"%s\\\", got \\\"%s\\\"\", secretsMountPath, mount.MountPath)\n\t}\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/securityContext.go",
    "content": "// Copyright 2020 OpenFaaS Authors\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport (\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tcorev1 \"k8s.io/api/core/v1\"\n)\n\n// nonRootFunctionuserID is the user id that is set when DeployHandlerConfig.SetNonRootUser is true.\n// value >10000 per the suggestion from https://kubesec.io/basics/containers-securitycontext-runasuser/\nconst SecurityContextUserID = int64(12000)\n\n// ConfigureContainerUserID sets the UID to 12000 for the function Container.  Defaults to user\n// specified in image metadata if `SetNonRootUser` is `false`. Root == 0.\nfunc (f *FunctionFactory) ConfigureContainerUserID(deployment *appsv1.Deployment) {\n\tuserID := SecurityContextUserID\n\tvar functionUser *int64\n\n\tif f.Config.SetNonRootUser {\n\t\tfunctionUser = &userID\n\t}\n\n\tif deployment.Spec.Template.Spec.Containers[0].SecurityContext == nil {\n\t\tdeployment.Spec.Template.Spec.Containers[0].SecurityContext = &corev1.SecurityContext{}\n\t}\n\n\tdeployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser = functionUser\n}\n\n// ConfigureReadOnlyRootFilesystem will create or update the required settings and mounts to ensure\n// that the ReadOnlyRootFilesystem setting works as expected, meaning:\n//  1. when ReadOnlyRootFilesystem is true, the security context of the container will have ReadOnlyRootFilesystem also\n//     marked as true and a new `/tmp` folder mount will be added to the deployment spec\n//  2. when ReadOnlyRootFilesystem is false, the security context of the container will also have ReadOnlyRootFilesystem set\n//     to false and there will be no mount for the `/tmp` folder\n//\n// This method is safe for both create and update operations.\nfunc (f *FunctionFactory) ConfigureReadOnlyRootFilesystem(deployment *appsv1.Deployment) {\n\treadonly := false\n\tif deployment.Spec.Template.Spec.Containers[0].SecurityContext != nil {\n\t\tdeployment.Spec.Template.Spec.Containers[0].SecurityContext.ReadOnlyRootFilesystem = &readonly\n\t} else {\n\t\tdeployment.Spec.Template.Spec.Containers[0].SecurityContext = &corev1.SecurityContext{\n\t\t\tReadOnlyRootFilesystem: &readonly,\n\t\t}\n\t}\n\n\texistingVolumes := removeVolume(\"temp\", deployment.Spec.Template.Spec.Volumes)\n\tdeployment.Spec.Template.Spec.Volumes = existingVolumes\n\n\texistingMounts := removeVolumeMount(\"temp\", deployment.Spec.Template.Spec.Containers[0].VolumeMounts)\n\tdeployment.Spec.Template.Spec.Containers[0].VolumeMounts = existingMounts\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/securityContext_test.go",
    "content": "// Copyright 2020 OpenFaaS Authors\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport (\n\t\"testing\"\n\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tapiv1 \"k8s.io/api/core/v1\"\n)\n\nfunc readOnlyRootDisabled(t *testing.T, deployment *appsv1.Deployment) {\n\tif len(deployment.Spec.Template.Spec.Volumes) != 0 {\n\t\tt.Error(\"Volumes should be empty if ReadOnlyRootFilesystem is false\")\n\t}\n\n\tif len(deployment.Spec.Template.Spec.Containers[0].VolumeMounts) != 0 {\n\t\tt.Error(\"VolumeMounts should be empty if ReadOnlyRootFilesystem is false\")\n\t}\n\tfunctionContatiner := deployment.Spec.Template.Spec.Containers[0]\n\n\tif functionContatiner.SecurityContext != nil {\n\t\tif *functionContatiner.SecurityContext.ReadOnlyRootFilesystem != false {\n\t\t\tt.Error(\"ReadOnlyRootFilesystem should be false on the container SecurityContext\")\n\t\t}\n\t}\n}\n\nfunc Test_configureReadOnlyRootFilesystem_Disabled_To_Disabled(t *testing.T) {\n\tf := mockFactory()\n\tdeployment := &appsv1.Deployment{\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tTemplate: apiv1.PodTemplateSpec{\n\t\t\t\tSpec: apiv1.PodSpec{\n\t\t\t\t\tContainers: []apiv1.Container{\n\t\t\t\t\t\t{Name: \"testfunc\", Image: \"alpine:latest\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tf.ConfigureReadOnlyRootFilesystem(deployment)\n\treadOnlyRootDisabled(t, deployment)\n}\n"
  },
  {
    "path": "modelzetes/pkg/k8s/utils.go",
    "content": "// Copyright 2020 OpenFaaS Authors\n// Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage k8s\n\nimport (\n\tcorev1 \"k8s.io/api/core/v1\"\n)\n\n// removeVolume returns a Volume slice with any volumes matching volumeName removed.\n// Uses the filter without allocation technique\n// https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating\nfunc removeVolume(volumeName string, volumes []corev1.Volume) []corev1.Volume {\n\tif volumes == nil {\n\t\treturn []corev1.Volume{}\n\t}\n\n\tnewVolumes := volumes[:0]\n\tfor _, v := range volumes {\n\t\tif v.Name != volumeName {\n\t\t\tnewVolumes = append(newVolumes, v)\n\t\t}\n\t}\n\n\treturn newVolumes\n}\n\n// removeVolumeMount returns a VolumeMount slice with any mounts matching volumeName removed\n// Uses the filter without allocation technique\n// https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating\nfunc removeVolumeMount(volumeName string, mounts []corev1.VolumeMount) []corev1.VolumeMount {\n\tif mounts == nil {\n\t\treturn []corev1.VolumeMount{}\n\t}\n\n\tnewMounts := mounts[:0]\n\tfor _, v := range mounts {\n\t\tif v.Name != volumeName {\n\t\t\tnewMounts = append(newMounts, v)\n\t\t}\n\t}\n\n\treturn newMounts\n}\n"
  },
  {
    "path": "modelzetes/pkg/pointer/ptr.go",
    "content": "package util\n\nfunc Ptr[T any](v T) *T {\n\treturn &v\n}\n\nfunc PtrCopy[T any](v T) *T {\n\tn := new(T)\n\t*n = v\n\treturn n\n}\n"
  },
  {
    "path": "modelzetes/pkg/signals/signal.go",
    "content": "package signals\n\nimport (\n\t\"os\"\n\t\"os/signal\"\n)\n\nvar onlyOneSignalHandler = make(chan struct{})\n\n// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned\n// which is closed on one of these signals. If a second signal is caught, the program\n// is terminated with exit code 1.\nfunc SetupSignalHandler() (stopCh <-chan struct{}) {\n\tclose(onlyOneSignalHandler) // panics when called twice\n\n\tstop := make(chan struct{})\n\tc := make(chan os.Signal, 2)\n\tsignal.Notify(c, shutdownSignals...)\n\tgo func() {\n\t\t<-c\n\t\tclose(stop)\n\t\t<-c\n\t\tos.Exit(1) // second signal. Exit directly.\n\t}()\n\n\treturn stop\n}\n"
  },
  {
    "path": "modelzetes/pkg/signals/signal_posix.go",
    "content": "//go:build !windows\n// +build !windows\n\npackage signals\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\nvar shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}\n"
  },
  {
    "path": "modelzetes/pkg/signals/signal_windows.go",
    "content": "package signals\n\nimport (\n\t\"os\"\n)\n\nvar shutdownSignals = []os.Signal{os.Interrupt}\n"
  },
  {
    "path": "modelzetes/pkg/version/version.go",
    "content": "/*\n   Copyright The TensorChord Inc.\n   Copyright The BuildKit Authors.\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npackage version\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\t// Package is filled at linking time\n\tPackage = \"github.com/tensorchord/openmodelz/modelzetes\"\n\n\t// Revision is filled with the VCS (e.g. git) revision being used to build\n\t// the program at linking time.\n\tRevision = \"\"\n\n\tversion         = \"0.0.0+unknown\"\n\tbuildDate       = \"1970-01-01T00:00:00Z\" // output from `date -u +'%Y-%m-%dT%H:%M:%SZ'`\n\tgitCommit       = \"\"                     // output from `git rev-parse HEAD`\n\tgitTag          = \"\"                     // output from `git describe --exact-match --tags HEAD` (if clean tree state)\n\tgitTreeState    = \"\"                     // determined from `git status --porcelain`. either 'clean' or 'dirty'\n\tdevelopmentFlag = \"false\"\n)\n\n// Version contains envd version information\ntype Version struct {\n\tVersion      string\n\tBuildDate    string\n\tGitCommit    string\n\tGitTag       string\n\tGitTreeState string\n\tGoVersion    string\n\tCompiler     string\n\tPlatform     string\n}\n\nfunc (v Version) String() string {\n\treturn v.Version\n}\n\n// SetGitTagForE2ETest sets the gitTag for test purpose.\nfunc SetGitTagForE2ETest(tag string) {\n\tgitTag = tag\n}\n\n// GetEnvdVersion gets Envd version information\nfunc GetEnvdVersion() string {\n\tvar versionStr string\n\n\tif gitCommit != \"\" && gitTag != \"\" &&\n\t\tgitTreeState == \"clean\" && developmentFlag == \"false\" {\n\t\t// if we have a clean tree state and the current commit is tagged,\n\t\t// this is an official release.\n\t\tversionStr = gitTag\n\t} else {\n\t\t// otherwise formulate a version string based on as much metadata\n\t\t// information we have available.\n\t\tif strings.HasPrefix(version, \"v\") {\n\t\t\tversionStr = version\n\t\t} else {\n\t\t\tversionStr = \"v\" + version\n\t\t}\n\t\tif len(gitCommit) >= 7 {\n\t\t\tversionStr += \"+\" + gitCommit[0:7]\n\t\t\tif gitTreeState != \"clean\" {\n\t\t\t\tversionStr += \".dirty\"\n\t\t\t}\n\t\t} else {\n\t\t\tversionStr += \"+unknown\"\n\t\t}\n\t}\n\treturn versionStr\n}\n\n// GetVersion returns the version information\nfunc GetVersion() Version {\n\treturn Version{\n\t\tVersion:      GetEnvdVersion(),\n\t\tBuildDate:    buildDate,\n\t\tGitCommit:    gitCommit,\n\t\tGitTag:       gitTag,\n\t\tGitTreeState: gitTreeState,\n\t\tGoVersion:    runtime.Version(),\n\t\tCompiler:     runtime.Compiler,\n\t\tPlatform:     fmt.Sprintf(\"%s/%s\", runtime.GOOS, runtime.GOARCH),\n\t}\n}\n\nvar (\n\treRelease *regexp.Regexp\n\treDev     *regexp.Regexp\n\treOnce    sync.Once\n)\n\nfunc UserAgent() string {\n\tversion := GetVersion().String()\n\n\treOnce.Do(func() {\n\t\treRelease = regexp.MustCompile(`^(v[0-9]+\\.[0-9]+)\\.[0-9]+$`)\n\t\treDev = regexp.MustCompile(`^(v[0-9]+\\.[0-9]+)\\.[0-9]+`)\n\t})\n\n\tif matches := reRelease.FindAllStringSubmatch(version, 1); len(matches) > 0 {\n\t\tversion = matches[0][1]\n\t} else if matches := reDev.FindAllStringSubmatch(version, 1); len(matches) > 0 {\n\t\tversion = matches[0][1] + \"-dev\"\n\t}\n\n\treturn \"envd/\" + version\n}\n"
  },
  {
    "path": "modelzetes/vendor.go",
    "content": "//go:build vendor\n\npackage main\n\n// This file exists to trick \"go mod vendor\" to include \"main\" packages.\n// It is not expected to build, the build tag above is only to prevent this\n// file from being included in builds.\n\nimport (\n\t_ \"k8s.io/code-generator/cmd/client-gen\"\n\t_ \"k8s.io/code-generator/cmd/deepcopy-gen\"\n\t_ \"k8s.io/code-generator/cmd/defaulter-gen\"\n\t_ \"k8s.io/code-generator/cmd/informer-gen\"\n\t_ \"k8s.io/code-generator/cmd/lister-gen\"\n\t_ \"k8s.io/code-generator/cmd/openapi-gen\"\n)\n\nfunc main() {}\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[project]\nname = \"openmodelz\"\ndescription = \"Simplify machine learning deployment for any environments.\"\nreadme = \"README.md\"\nauthors = [\n    {name = \"TensorChord\", email = \"modelz@tensorchord.ai\"},\n]\nlicense = {text = \"Apache-2.0\"}\nkeywords = [\"machine learning\", \"deep learning\", \"model serving\"]\ndynamic = [\"version\"]\nrequires-python = \">=2.7\"\nclassifiers = [\n    \"Environment :: GPU\",\n    \"Intended Audience :: Developers\",\n    \"License :: OSI Approved :: Apache Software License\",\n    \"Programming Language :: Python :: 2\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3.8\",\n    \"Programming Language :: Python :: 3.9\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Programming Language :: Python :: 3.12\",\n    \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n    \"Topic :: Software Development :: Libraries :: Python Modules\",\n    \"Topic :: Software Development :: Build Tools\",\n]\n\n[project.urls]\nhomepage = \"https://modelz.ai/\"\ndocumentation = \"https://docs.open.modelz.ai/\"\nrepository = \"https://github.com/tensorchord/openmodelz\"\nchangelog = \"https://github.com/tensorchord/openmodelz/releases\"\n\n[tool.cibuildwheel]\nbuild-frontend = \"build\"\narchs = [\"auto64\"]\nskip = \"pp*\" # skip pypy\nbefore-all = \"\"\nenvironment = { PIP_NO_CLEAN=\"yes\" }\nbefore-build = \"ls -la mdz/bin\" # help to debug\n\n[project.optional-dependencies]\n\n[project.scripts]\n\n[build-system]\nrequires = [\"setuptools>=45\", \"wheel\", \"setuptools_scm\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[tool.setuptools_scm]\nwrite_to = \"mdz/_version.py\"\n"
  },
  {
    "path": "setup.py",
    "content": "import os\nimport subprocess\nimport shlex\nfrom wheel.bdist_wheel import bdist_wheel\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools_scm import get_version\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as f:\n    readme = f.read()\n\nclass bdist_wheel_universal(bdist_wheel):\n    def get_tag(self):\n        *_, plat = super().get_tag()\n        return \"py2.py3\", \"none\", plat\n\n\ndef build_if_not_exist():\n    if os.path.isfile(\"mdz/bin/mdz\"):\n        return\n    version = get_version()\n    print(f\"build mdz from source ({version})\")\n    errno = subprocess.call(shlex.split(\n        f\"make build-release GIT_TAG={version}\"\n    ), cwd=\"mdz\")\n    assert errno == 0, f\"mdz build failed with code {errno}\"\n\n\nclass ModelzExtension(Extension):\n    \"\"\"A custom extension to define the OpenModelz extension.\"\"\"\n\n\nclass ModelzBuildExt(build_ext):\n    def build_extension(self, ext: Extension) -> None:\n        if not isinstance(ext, ModelzExtension):\n            return super().build_extension(ext)\n\n        build_if_not_exist()\n\n\nsetup(\n    name=\"openmodelz\",\n    use_scm_version=True,\n    description=\"Simplify machine learning deployment for any environments.\",\n    long_description=readme,\n    long_description_content_type=\"text/markdown\",\n    url=\"https://github.com/tensorchord/openmodelz\",\n    license=\"Apache License 2.0\",\n    author=\"TensorChord\",\n    author_email=\"modelz@tensorchord.ai\",\n    packages=find_packages(\"mdz\"),\n    include_package_data=True,\n    data_files=[(\"bin\", [\"mdz/bin/mdz\"])],\n    zip_safe=False,\n    ext_modules=[\n        ModelzExtension(name=\"mdz\", sources=[\"mdz/*\"]),\n    ],\n    cmdclass=dict(\n        build_ext=ModelzBuildExt,\n        bdist_wheel=bdist_wheel_universal,\n    ),\n)\n"
  },
  {
    "path": "typos.toml",
    "content": "[files]\nextend-exclude = [\"CHANGELOG.md\", \"go.mod\", \"go.sum\"]\n[default.extend-words]\nrequestor = \"requestor\"\nba = \"ba\"\n"
  }
]