[
  {
    "path": ".dockerignore",
    "content": "node_modules\n"
  },
  {
    "path": ".editorconfig",
    "content": "# EditorConfig is awesome: http://EditorConfig.org\n\n# https://github.com/jokeyrhyme/standard-editorconfig\n\n# top-most EditorConfig file\nroot = true\n\n# defaults\n[*]\ncharset = utf-8\nend_of_line = lf\ninsert_final_newline = true\ntrim_trailing_whitespace = true\nindent_size = 2\nindent_style = space\n\n[*.md]\ntrim_trailing_whitespace = false\n"
  },
  {
    "path": ".fmf/version",
    "content": "1\n"
  },
  {
    "path": ".gitattributes",
    "content": "* text=auto eol=lf\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.yml",
    "content": "name: Bug 🐞\ndescription: Report a bug report\ntype: bug\n\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Before opening a bug report, please search for the behaviour in the existing issues.\n\n        ---\n\n        Thank you for taking the time to file a bug report. To address this bug as fast as possible, we need some information.\n\n  - type: textarea\n    id: bug-description\n    attributes:\n      label: Bug description\n      description: What happened?\n    validations:\n      required: true\n\n  - type: input\n    id: os\n    attributes:\n      label: Operating system\n      description: \"Which operating system are you on? Please provide the version as well. If you are on a Mac, please specify Apple silicon or Intel.\"\n      placeholder: \"macOS Ventura 13.4 (Arm), Windows 11\"\n    validations:\n      required: true\n\n  - type: dropdown\n    id: install\n    attributes:\n      label: Installation Method\n      description: \"How did you install AI Lab ?\"\n      options:\n        - \"from `ghcr.io/containers/podman-desktop-extension-ai-lab` container image\"\n        - \"from Podman-Desktop extension page\"\n        - \"Other\"\n\n  - type: dropdown\n    id: version\n    attributes:\n      label: Version\n      description: What version of the software are you running?\n      options:\n        - \"next (development version)\"\n        - \"1.3.x\"\n        - \"1.2.x\"\n        - \"1.1.x\"\n        - \"1.0.x\"\n    validations:\n      required: true\n\n  - type: textarea\n    id: steps\n    attributes:\n      label: Steps to reproduce\n      description: What steps do we need to take to reproduce this error?\n\n  - type: textarea\n    id: logs\n    attributes:\n      label: Relevant log output\n      description: If applicable, provide relevant log output.\n      render: shell\n\n  - type: textarea\n    id: additional-context\n    attributes:\n      label: Additional context\n      description: Add any other context or screenshots here.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yml",
    "content": "blank_issues_enabled: false\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/epic.yml",
    "content": "name: Epic ⚡\ndescription: A high-level feature\ntype: epic\n\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Epics are normally created by the development team, to group a set of related features and plan work across multiple sprints.\n        The features this epic includes are referenced with the text of the epic.\n\n  - type: textarea\n    id: domain\n    attributes:\n      label: Epic domain\n      description: A clear and concise description of the feature area or domain that this epic will address.\n      placeholder: AI-Lab should support [...]\n    validations:\n      required: true\n\n  - type: textarea\n    id: additional-context\n    attributes:\n      label: Additional context\n      description: Add any other context or screenshots here.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.yml",
    "content": "name: Feature 💡\ndescription: A request, idea, or new functionality\ntype: feature\n\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Before opening a feature request, please search for potential existing issues.\n\n        ---\n\n        Thank you for taking the time to file a feature request, we appreciate and value your time to help the project!\n\n  - type: textarea\n    id: problem\n    attributes:\n      label: Is your feature request related to a problem? Please describe\n      description: A clear and concise description of what the problem is.\n      placeholder: I'm always frustrated when [...]\n    validations:\n      required: true\n\n  - type: textarea\n    id: solution\n    attributes:\n      label: Describe the solution you'd like\n      description: A clear and concise description of what you want to happen.\n    validations:\n      required: true\n\n  - type: textarea\n    id: alternatives\n    attributes:\n      label: Describe alternatives you've considered\n      description: A clear and concise description of any alternative solutions or features you've considered.\n\n  - type: textarea\n    id: additional-context\n    attributes:\n      label: Additional context\n      description: Add any other context or screenshots here.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/ux-request.yaml",
    "content": "name: UX Request\ndescription: UX Request Form\ntype: UX (design spec)\nlabels: [UX/UI Issue, Graphic design]\n\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Before opening a UX request, please search for existing issues.\n\n        ---\n\n  - type: textarea\n    id: UX-description\n    attributes:\n      label: UX Description\n      description: Describe the request\n    validations:\n      required: true\n\n  - type: dropdown\n    id: request-type\n    attributes:\n      label: Request type\n      description: \"What type of request is this?\"\n      options:\n        - \"A logo design\"\n        - \"An icon\"\n        - \"An infographic/chart\"\n        - \"a template or design for printed materials\"\n        - \"Swag design\"\n        - \"Graphic design not covered by the above\"\n\n  - type: dropdown\n    id: user-experience\n    attributes:\n      label: User Experience Request type\n      description: \"What type of request is this?\"\n      options:\n        - \"UX analysis/suggestions for improvement\"\n        - \"User research\"\n        - \"User testing\"\n        - \"Application mockups/designs\"\n        - \"Website mockups/designs\"\n        - \"Something else UX-related\"\n      \n\n  - type: textarea\n    id: Contacts\n    attributes:\n      label: Engineering Contact\n      description: Who is the primary engineer the design team can speak with about this issue?\n\n  - type: textarea\n    id: Deadlne\n    attributes:\n      label: Deadline for request\n      description: When do you need this?  If this is for an event, please let us know the date of the evnt and any lead time you need to get materials produced.\n"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE.md",
    "content": "### What does this PR do?\n\n### Screenshot / video of UI\n\n<!-- If this PR is changing UI, please include\nscreenshots or screencasts showing the difference -->\n\n### What issues does this PR fix or reference?\n\n<!-- Include any related issues from Podman Desktop\nrepository (or from another issue tracker). -->\n\n### How to test this PR?\n\n<!-- Please explain steps to reproduce -->\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "# Set update schedule for GitHub Actions\n\nversion: 2\nupdates:\n  - package-ecosystem: \"github-actions\"\n    directory: \"/\"\n    schedule:\n      interval: \"daily\"\n    open-pull-requests-limit: 10\n\n  - package-ecosystem: \"npm\"\n    directory: \"/\"\n    schedule:\n      interval: daily\n    open-pull-requests-limit: 10\n    groups:\n      fortawesome:\n        applies-to: version-updates\n        patterns:\n          - \"@fortawesome/*\"\n      ai-sdk:\n        applies-to: version-updates\n        patterns:\n          - \"@ai-sdk/mcp\"\n          - \"ai\"\n"
  },
  {
    "path": ".github/workflows/ai-lab-e2e-nightly-windows.yaml",
    "content": "#\n# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: Podman Desktop AI Lab E2E Nightly\nrun-name: Podman Desktop AI Lab E2E Nightly ${{ github.event_name == 'push' && '[Recipe change]' || '' }}\n\non:\n  schedule:\n    - cron:  '0 2 * * *'\n  push:\n    paths:\n      - 'packages/backend/src/assets/ai.json'\n  workflow_dispatch:\n    inputs:\n      podman_desktop_repo_args:\n        default: 'REPO=podman-desktop,FORK=podman-desktop,BRANCH=main'\n        description: 'Podman Desktop repo fork and branch'\n        type: string\n        required: true\n      ext_repo_options:\n        default: 'REPO=podman-desktop-extension-ai-lab,FORK=containers,BRANCH=main'\n        description: 'Podman Desktop Extension repo, fork and branch'\n        type: string\n        required: true\n      ext_tests_options:\n        default: 'EXT_RUN_TESTS_FROM_EXTENSION=1,EXT_RUN_TESTS_AS_ADMIN=1,EXT_TEST_GPU_SUPPORT_ENABLED=0'\n        description: 'E2E tests options in format VAR1=xxx,VAR2=true,VAR3=15 etc.'\n        type: string\n        required: true\n      npm_target:\n        default: 'test:e2e'\n        description: 'npm target to run tests'\n        type: string\n        required: true\n      podman_version:\n        default: 'latest'\n        description: 'Podman version (use \"latest\" to auto-fetch latest release, or specify version like \"v5.6.1\")'\n        type: string\n        required: true\n      podman_options:\n        default: 'INIT=1,START=1,ROOTFUL=1,NETWORKING=0'\n        description: 'Podman machine configuration options, no spaces'\n        type: string\n        required: true\n      env_vars:\n        default: 'TEST_PODMAN_MACHINE=true,ELECTRON_ENABLE_INSPECT=true'\n        description: 'Env. Variables passed into target machine, ie: VAR1=xxx,VAR2=true... use EXT_TEST_RAG_CHATBOT=1 to run RAG Chatbot test\"'\n        type: string\n        required: true\n      pde2e_image_version:\n        default: 'v0.0.3'\n        description: 'PDE2E runner, builder, podman image versions'\n        type: string\n        required: true\n      mapt_params:\n        description: |\n          **Create instance(leave empty to use repo secrets/variables)**\n\n          **Format:** IMAGE=xxx;VERSION_TAG=xxx;CPUS=xxx;MEMORY=xxx;EXCLUDED_REGIONS=xxx\n\n          **Example:** \n            IMAGE=quay.io/redhat-developer/mapt;VERSION_TAG=v0.9.8;CPUS=4;MEMORY=32;EXCLUDED_REGIONS=\"westindia,centralindia,southindia,australiacentral,australiacentral2,australiaeast,australiasoutheast,southafricanorth,southafricawest\"\n        required: false\n        type: string\n\njobs:\n  windows:\n    timeout-minutes: 180\n    name: windows-${{ matrix.windows-version }}-${{ matrix.windows-featurepack }}\n    runs-on: ubuntu-latest\n    strategy:\n      fail-fast: false\n      matrix:\n        windows-version: ['11']\n        windows-featurepack: ['25h2-ent']\n\n    steps:\n    - name: Fetch latest Podman version\n      id: fetch-podman\n      uses: redhat-actions/podman-install/.github/actions/fetch-latest-podman-version-windows@6b757b792b67ec663765a4f2ca36226e12b2f4cd\n      with:\n        version_input: ${{ github.event.inputs.podman_version || 'latest' }}\n        file_type: 'setup.exe'\n        github_token: ${{ secrets.GITHUB_TOKEN }}\n\n    - name: Set the default env. variables\n      env:\n        CI: true\n        DEFAULT_PODMAN_DESKTOP_REPO_ARGS: 'REPO=podman-desktop,FORK=podman-desktop,BRANCH=main'\n        DEFAULT_NPM_TARGET: 'test:e2e'\n        DEFAULT_ENV_VARS: 'TEST_PODMAN_MACHINE=true,ELECTRON_ENABLE_INSPECT=true'\n        DEFAULT_PODMAN_OPTIONS: 'INIT=1,START=1,ROOTFUL=1,NETWORKING=0'\n        DEFAULT_EXT_TESTS_OPTIONS: 'EXT_RUN_TESTS_FROM_EXTENSION=1,EXT_RUN_TESTS_AS_ADMIN=1,EXT_TEST_GPU_SUPPORT_ENABLED=0'\n        DEFAULT_EXT_REPO_OPTIONS: 'REPO=podman-desktop-extension-ai-lab,FORK=containers,BRANCH=main'\n        DEFAULT_PDE2E_IMAGE_VERSION: 'v0.0.3'\n      run: |\n        echo \"NPM_TARGET=${{ github.event.inputs.npm_target || env.DEFAULT_NPM_TARGET }}\" >> $GITHUB_ENV\n        echo \"ENV_VARS=${{ github.event.inputs.env_vars || env.DEFAULT_ENV_VARS }}\" >> $GITHUB_ENV\n        echo \"PODMAN_URL=${{ steps.fetch-podman.outputs.download_url }}\" >> $GITHUB_ENV\n        echo \"PDE2E_IMAGE_VERSION=${{ github.event.inputs.pde2e_image_version || env.DEFAULT_PDE2E_IMAGE_VERSION }}\" >> $GITHUB_ENV\n        echo \"${{ github.event.inputs.podman_desktop_repo_args || env.DEFAULT_PODMAN_DESKTOP_REPO_ARGS }}\" | awk -F ',' \\\n         '{for (i=1; i<=NF; i++) {split($i, kv, \"=\"); print \"PD_\"kv[1]\"=\"kv[2]}}' >> $GITHUB_ENV\n        echo \"${{ github.event.inputs.ext_tests_options || env.DEFAULT_EXT_TESTS_OPTIONS }}\" | awk -F ',' \\\n         '{for (i=1; i<=NF; i++) {split($i, kv, \"=\"); print kv[1]\"=\"kv[2]}}' >> $GITHUB_ENV\n        echo \"${{ github.event.inputs.podman_options || env.DEFAULT_PODMAN_OPTIONS }}\" | awk -F ',' \\\n         '{for (i=1; i<=NF; i++) {split($i, kv, \"=\"); print \"PODMAN_\"kv[1]\"=\"kv[2]}}' >> $GITHUB_ENV\n        echo \"${{ github.event.inputs.ext_repo_options || env.DEFAULT_EXT_REPO_OPTIONS }}\" | awk -F ',' \\\n         '{for (i=1; i<=NF; i++) {split($i, kv, \"=\"); print \"EXT_\"kv[1]\"=\"kv[2]}}' >> $GITHUB_ENV\n\n        # For mapt_params, use repo variables directly if input is empty\n        if [ -n \"${{ github.event.inputs.mapt_params }}\" ]; then\n          mapt_params=\"${{ github.event.inputs.mapt_params }}\"\n        else\n          mapt_params=\"IMAGE=${{ vars.MAPT_IMAGE }};VERSION_TAG=${{ vars.MAPT_VERSION_TAG }};CPUS=${{ vars.MAPT_CPUS }};MEMORY=${{ vars.MAPT_MEMORY }};EXCLUDED_REGIONS=\\\"${{ vars.MAPT_EXCLUDED_REGIONS }}\\\"\"\n        fi\n        echo \"$mapt_params\" | awk -F ';' '{for (i=1; i<=NF; i++) {split($i, kv, \"=\"); print \"MAPT_\"kv[1]\"=\"kv[2]}}' >> $GITHUB_ENV\n\n    - name: Create instance\n      uses: podman-desktop/e2e/.github/actions/create-instance@213a276952d746324895f63cea0b23083013990f\n      with:\n        mapt-image: ${{ env.MAPT_IMAGE || '' }}\n        mapt-version: ${{ env.MAPT_VERSION_TAG || '' }}\n        windows-version: ${{ matrix.windows-version }}\n        windows-featurepack: ${{ matrix.windows-featurepack }}\n        cpus: ${{ env.MAPT_CPUS || '' }}\n        memory: ${{ env.MAPT_MEMORY || '' }}\n        excluded-regions: ${{ env.MAPT_EXCLUDED_REGIONS || '' }}\n        arm-tenant-id: ${{ secrets.ARM_TENANT_ID }}\n        arm-subscription-id: ${{ secrets.ARM_SUBSCRIPTION_ID }}\n        arm-client-id: ${{ secrets.ARM_CLIENT_ID }}\n        arm-client-secret: ${{ secrets.ARM_CLIENT_SECRET }}\n\n    - name: Check instance system info\n      uses: podman-desktop/e2e/.github/actions/instance-system-info@3548105f45def129d5e3aaa5a3d922e09ac892d9\n\n    - name: Emulate X session\n      uses: podman-desktop/e2e/.github/actions/emulate-x-session@3548105f45def129d5e3aaa5a3d922e09ac892d9\n\n    - name: Download Podman, do not initialize\n      uses: podman-desktop/e2e/.github/actions/download-podman-nightly@952cafee20ca82b1ce48b29c848bac1c31062245\n      with:\n        podman-image-tag: ${{ env.PDE2E_IMAGE_VERSION }}\n        podman-download-url: ${{ env.PODMAN_URL }}\n\n    - name: Build Podman Desktop Electron Inspect Enabled binary\n      uses: podman-desktop/e2e/.github/actions/build-podman-desktop@0c1f0a035e0949941fd6abf959ab556ceec13f03\n      with:\n        fork: ${{ env.PD_FORK }}\n        branch: ${{ env.PD_BRANCH }}\n        env-vars: ${{ env.ENV_VARS }}\n\n    - name: Run Podman Desktop Playwright E2E tests\n      uses: podman-desktop/e2e/.github/actions/run-playwright-test@15b800edab941d394b32aaaa3f7961bb7db7ec3a\n      with:\n        pde2e-runner-tag: ${{ env.PDE2E_IMAGE_VERSION }}\n        podman-desktop-path: true\n        fork-repo: ${{ env.PD_FORK }}\n        branch-name: ${{ env.PD_BRANCH }}\n        ext-repo: ${{ env.EXT_REPO }}\n        ext-fork: ${{ env.EXT_FORK }}\n        ext-branch: ${{ env.EXT_BRANCH }}\n        ext-tests: ${{ env.EXT_RUN_TESTS_FROM_EXTENSION }}\n        npm-target: ${{ env.NPM_TARGET }}\n        podman-init: ${{ env.PODMAN_INIT }}\n        podman-start: ${{ env.PODMAN_START }}\n        rootful: ${{ env.PODMAN_ROOTFUL }}\n        user-networking: ${{ env.PODMAN_NETWORKING }}\n        podman-provider: 'wsl'\n        env-vars: ${{ env.ENV_VARS }}\n        ci-bot-token: ${{ secrets.PODMAN_DESKTOP_BOT_TOKEN }}\n\n    - name: Destroy instance\n      if: always()\n      uses: podman-desktop/e2e/.github/actions/destroy-instance@36e440f2ac18193214f4ffa8f7f1c4c0cb8c9446\n      with:\n        mapt-image: ${{ env.MAPT_IMAGE }}\n        mapt-version: ${{ env.MAPT_VERSION_TAG }}\n        arm-tenant-id: ${{ secrets.ARM_TENANT_ID }}\n        arm-subscription-id: ${{ secrets.ARM_SUBSCRIPTION_ID }}\n        arm-client-id: ${{ secrets.ARM_CLIENT_ID }}\n        arm-client-secret: ${{ secrets.ARM_CLIENT_SECRET }}\n\n    - name: Publish Test Report\n      uses: mikepenz/action-junit-report@v6\n      if: always()\n      with:\n        annotate_only: true\n        fail_on_failure: true\n        include_passed: true\n        detailed_summary: true\n        require_tests:  true\n        report_paths: '**/*results.xml'\n\n    - name: Upload test artifacts\n      uses: actions/upload-artifact@v7\n      if: always()\n      with:\n        name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}\n        path: |\n          results/*\n          !./**/*.gguf\n          !./**/*.bin\n          !./**/output/videos/*\n          !./**/output/traces/*\n\n    - name: Upload test videos\n      uses: actions/upload-artifact@v7\n      if: always()\n      with:\n        name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}-videos\n        path: ./**/output/videos/*\n\n    - name: Upload test traces\n      uses: actions/upload-artifact@v7\n      if: always()\n      with:\n        name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}-traces\n        path: ./**/output/traces/*\n"
  },
  {
    "path": ".github/workflows/build-next.yaml",
    "content": "#\n# Copyright (C) 2023-2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: CI\n\non:\n  push:\n    branches:\n      - 'main'\n\njobs:\n  build:\n    runs-on: ubuntu-22.04\n    steps:\n      - uses: actions/checkout@v6.0.2\n\n      - uses: pnpm/action-setup@v5\n        name: Install pnpm\n        with:\n          run_install: false\n\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 24\n          cache: 'pnpm'\n\n      - name: Execute pnpm\n        run: pnpm install\n\n      - name: Run Build\n        run: pnpm build\n\n      - name: Login to ghcr.io\n        run: podman login --username ${{ github.repository_owner }} --password ${{ secrets.GITHUB_TOKEN }} ghcr.io\n\n      - name: Publish Image\n        id: publish-image\n        run: |\n          IMAGE_NAME=ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab\n          IMAGE_NIGHTLY=${IMAGE_NAME}:nightly\n          IMAGE_SHA=${IMAGE_NAME}:${GITHUB_SHA}\n          podman build -t $IMAGE_NIGHTLY .\n          podman push $IMAGE_NIGHTLY\n          podman tag $IMAGE_NIGHTLY $IMAGE_SHA\n          podman push $IMAGE_SHA\n"
  },
  {
    "path": ".github/workflows/compute-model-sizes.yml",
    "content": "# This is a basic workflow that is manually triggered\n\nname: Compute model sizes\n\n# Controls when the action will run. Workflow runs when manually triggered using the UI\n# or API.\non:\n  workflow_dispatch:\n# A workflow run is made up of one or more jobs that can run sequentially or in parallel\njobs:\n  # This workflow contains a single job called \"greet\"\n  compute:\n    # The type of runner that the job will run on\n    runs-on: ubuntu-latest\n\n    # Steps represent a sequence of tasks that will be executed as part of the job\n    steps:\n    - uses: actions/checkout@v6.0.2\n    # Runs a single command using the runners shell\n    - name: Compute model size\n      run: ./tools/compute-model-sizes.sh\n"
  },
  {
    "path": ".github/workflows/e2e-main-tf.yaml",
    "content": "# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: PD AI Lab E2E Nightly Testing Farm\n\non:\n  schedule:\n    - cron: '0 0 * * *'\n    \n  workflow_dispatch:\n    inputs:\n      podman_version:\n        default: 'latest'\n        description: 'Podman version to install (e.g., \"5.5.2\", \"5.6.0~rc1\"). Use \"latest\" for stable or \"nightly\" for the latest development build.'\n        type: string\n        required: true\n      npm_target:\n        description: npm tests target\n        type: choice\n        default: 'e2e'\n        options:\n          - e2e\n          - smoke\n          - instructlab\n      plan:\n        description: plans to run\n        type: choice\n        default: 'default'\n        options:\n          - default\n          - gpu\n\njobs:\n  pd-ai-lab-e2e-testing-farm:\n    name: pd-e2e-testing-farm-ci\n    runs-on: ubuntu-latest\n    timeout-minutes: 180\n    strategy:\n      fail-fast: false\n      matrix:\n        fedora-version: ['Fedora-42', 'Fedora-43']\n        plan: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.plan != '' && fromJSON(format('[\"{0}\"]', github.event.inputs.plan)) || fromJSON('[\"default\", \"gpu\"]') }}\n    steps:\n      - name: Set the default env. variables\n        env:\n          DEFAULT_NPM_TARGET: 'smoke'\n          DEFAULT_PODMAN_VERSION: 'latest'\n          DEFAULT_NODE_VERSION: 'v24.11.1'\n        run: |\n          echo \"NPM_TARGET=${{ github.event.inputs.npm_target || env.DEFAULT_NPM_TARGET }}\" >> $GITHUB_ENV\n          echo \"PLAN=${{ matrix.plan }}\" >> $GITHUB_ENV\n          echo \"PODMAN_VERSION=${{ github.event.inputs.podman_version || env.DEFAULT_PODMAN_VERSION }}\" >> $GITHUB_ENV\n          echo \"NODE_VERSION=${{ vars.NODE_VERSION || env.DEFAULT_NODE_VERSION }}\" >> $GITHUB_ENV\n\n      - name: Run Podman Desktop Playwright E2E tests on Testing Farm CI\n        id: run-e2e-tf\n        uses: sclorg/testing-farm-as-github-action@b23f0de29ac969d12411215a983da264b4ced149 #v4.2.0\n        with:\n          api_key: ${{ secrets.TF_TOKEN }}\n          create_github_summary: \"false\"\n          compose: ${{ matrix.fedora-version }}\n          tmt_plan_filter: 'name:/tests/tmt/plans/ai-lab-e2e-plan-${{ env.PLAN }}/${{ env.NPM_TARGET }}'\n          variables: COMPOSE=${{ matrix.fedora-version }};ARCH=x86_64;PODMAN_VERSION=${{ env.PODMAN_VERSION }};NODE_VERSION=${{ env.NODE_VERSION }}\n\n      - name: Extract Testing Farm work ID and base URL\n        if: always()\n        run: |\n          TF_ARTIFACTS_URL=\"${{ steps.run-e2e-tf.outputs.test_log_url }}\"\n          TF_DEFAULT_JUNIT_DEFAULT=\"${TF_ARTIFACTS_URL}/results-junit.xml\"\n          curl -o results-junit.xml \"$TF_DEFAULT_JUNIT_DEFAULT\"\n\n          TF_WORK_ID=$(grep -o 'work-${{ env.NPM_TARGET }}[^/\"]*' results-junit.xml | head -1)\n\n          echo \"TF_WORK_ID=$TF_WORK_ID\" >> $GITHUB_ENV\n          echo \"TF_ARTIFACTS_URL=$TF_ARTIFACTS_URL\" >> $GITHUB_ENV\n\n      - name: Download Playwright JUnit report from Testing Farm\n        if: always()\n        run: |\n          TF_PLAYWRIGHT_JUNIT_URL=\"${{ env.TF_ARTIFACTS_URL }}/${{ env.TF_WORK_ID }}/tests/tmt/plans/ai-lab-e2e-plan-${{ env.PLAN }}/${{ env.NPM_TARGET }}/execute/data/guest/default-0/tests/tmt/tests/${{ env.NPM_TARGET }}-test-1/data/junit-results.xml\"\n          curl -o junit-playwright-results.xml \"$TF_PLAYWRIGHT_JUNIT_URL\"\n\n      - name: Publish test report to PR\n        if: always()\n        uses: mikepenz/action-junit-report@5b7ee5a21e8674b695313d769f3cbdfd5d4d53a4 #v6.0.0\n        with:\n          fail_on_failure: true\n          include_passed: true\n          detailed_summary: true\n          annotate_only: true\n          require_tests: true\n          report_paths: '**/junit-playwright-results.xml'\n\n      - name: Download test artifacts from Testing Farm\n        if: failure()\n        run: |\n          mkdir -p results\n\n          TF_TEST_DATA_URL=\"${{ env.TF_ARTIFACTS_URL }}/${{ env.TF_WORK_ID }}/tests/tmt/plans/ai-lab-e2e-plan-${{ env.PLAN }}/${{ env.NPM_TARGET }}/execute/data/guest/default-0/tests/tmt/tests/${{ env.NPM_TARGET }}-test-1/data\"\n          TF_TRACES_URL=\"${TF_TEST_DATA_URL}/traces/\"\n          TF_VIDEOS_URL=\"${TF_TEST_DATA_URL}/videos/\"\n\n          echo \"Downloading traces\"\n          wget \\\n            --recursive \\\n            --no-parent \\\n            --no-host-directories \\\n            --cut-dirs=10 \\\n            --reject \"index.html*\" \\\n            --directory-prefix=results \\\n            \"$TF_TRACES_URL\"\n\n          echo \"Downloading videos\"\n          wget \\\n            --recursive \\\n            --no-parent \\\n            --no-host-directories \\\n            --cut-dirs=10 \\\n            --reject \"index.html*\" \\\n            --directory-prefix=results \\\n            \"$TF_VIDEOS_URL\"\n\n      - name: Upload test artifacts\n        if: always()\n        uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0\n        with:\n          name: ai-lab-testing-farm-artifacts-${{ matrix.fedora-version }}-${{ env.PLAN }}\n          path: |\n            results/*\n            **/junit-playwright-results.xml\n"
  },
  {
    "path": ".github/workflows/e2e-main.yaml",
    "content": "#\n# Copyright (C) 2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: e2e-tests-main\n\non:\n  push:\n    branches: [main]\n  schedule:\n    - cron: '0 2 * * *'\n\n  workflow_dispatch:\n    inputs:\n      podman_desktop_repo_args:\n        default: 'REPO=podman-desktop,FORK=podman-desktop,BRANCH=main'\n        description: 'Podman Desktop repo fork and branch'\n        type: string\n        required: true\n      ext_repo_options:\n        default: 'REPO=podman-desktop-extension-ai-lab,FORK=containers,BRANCH=main'\n        description: 'Podman Desktop Extension repo, fork and branch'\n        type: string\n        required: true\n\njobs:\n  e2e-tests:\n    name: Run E2E tests ${{ github.event_name == 'schedule' && '[nightly]' || '' }}\n    runs-on: ubuntu-24.04\n    steps:\n      - name: Set default env variables\n        env:\n          DEFAULT_PODMAN_DESKTOP_REPO_ARGS: 'REPO=podman-desktop,FORK=podman-desktop,BRANCH=main'\n          DEFAULT_EXT_REPO_OPTIONS: 'REPO=podman-desktop-extension-ai-lab,FORK=containers,BRANCH=main'\n        run: |\n          echo \"${{ github.event.inputs.podman_desktop_repo_args || env.DEFAULT_PODMAN_DESKTOP_REPO_ARGS }}\" | awk -F ',' \\\n           '{for (i=1; i<=NF; i++) {split($i, kv, \"=\"); print \"PD_\"kv[1]\"=\"kv[2]}}' >> $GITHUB_ENV\n          echo \"${{ github.event.inputs.ext_repo_options || env.DEFAULT_EXT_REPO_OPTIONS }}\" | awk -F ',' \\\n           '{for (i=1; i<=NF; i++) {split($i, kv, \"=\"); print \"EXT_\"kv[1]\"=\"kv[2]}}' >> $GITHUB_ENV\n\n      - uses: actions/checkout@v6.0.2\n        name: Checkout AI Lab - Workflow Dispatch\n        if: github.event_name == 'workflow_dispatch'\n        with:\n          repository: ${{ env.EXT_FORK }}/${{ env.EXT_REPO }}\n          ref: ${{ env.EXT_BRANCH }}\n          path: podman-desktop-extension-ai-lab\n\n      - uses: actions/checkout@v6.0.2\n        name: Checkout AI Lab - Push or Schedule\n        if: github.event_name == 'push' || github.event_name == 'schedule'\n        with:\n          path: podman-desktop-extension-ai-lab\n\n      - uses: actions/checkout@v6.0.2\n        name: Checkout Podman Desktop\n        with:\n          repository: ${{ env.PD_FORK }}/${{ env.PD_REPO }}\n          ref: ${{ env.PD_BRANCH }}\n          path: podman-desktop\n\n      - uses: pnpm/action-setup@v5\n        name: Install pnpm\n        with:\n          run_install: false\n          package_json_file: ./podman-desktop/package.json\n\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 24\n          cache: 'pnpm'\n          cache-dependency-path: |\n            ./podman-desktop\n            ./podman-desktop-extension-ai-lab\n\n      - name: Update podman\n        run: |\n          echo \"ubuntu version from kubic repository to install podman we need (v5)\"\n          ubuntu_version='23.10'\n          echo \"Add unstable kubic repo into list of available sources and get the repo key\"\n          sudo sh -c \"echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list\"\n          curl -L \"https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key\" | sudo apt-key add -\n          echo \"Updating database of packages...\"\n          sudo apt-get update -qq\n          echo \"install necessary dependencies for criu package which is not part of ${ubuntu_version}\"\n          sudo apt-get install -qq libprotobuf32t64 python3-protobuf libnet1\n          echo \"install criu manually from static location\"\n          curl -sLO http://archive.ubuntu.com/ubuntu/pool/universe/c/criu/criu_3.16.1-2_amd64.deb && sudo dpkg -i criu_3.16.1-2_amd64.deb\n          echo \"installing/update podman package...\"\n          sudo apt-get -qq -y install podman || { echo \"Start fallback steps for podman nightly installation from a static mirror\" && \\\n            sudo sh -c \"echo 'deb http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list\" && \\\n            curl -L \"http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key\" | sudo apt-key add - && \\\n            sudo apt-get update && \\\n            sudo apt-get -y install podman; }\n          podman version\n\n      - name: Revert unprivileged user namespace restrictions in Ubuntu 24.04\n        run: |\n          # allow unprivileged user namespace\n          sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0\n\n      - name: Set cgroup_manager to 'cgroupfs' instead of systemd\n        run: |\n          mkdir -p ~/.config/containers\n          cat <<EOT >> ~/.config/containers/containers.conf\n          [engine]\n          cgroup_manager=\"cgroupfs\"\n          EOT\n          podman info\n\n      - name: Execute pnpm\n        working-directory: ./podman-desktop\n        run: pnpm install --frozen-lockfile\n\n      - name: Build Podman Desktop for E2E tests\n        working-directory: ./podman-desktop\n        run: pnpm test:e2e:build\n\n      - name: Ensure getting current HEAD version of the test framework\n        working-directory: ./podman-desktop-extension-ai-lab/tests/playwright\n        run: pnpm add -D @podman-desktop/tests-playwright@next\n\n      - name: Execute pnpm in AI Lab Extension\n        working-directory: ./podman-desktop-extension-ai-lab\n        run: pnpm install\n\n      - name: Build Image\n        working-directory: ./podman-desktop-extension-ai-lab\n        id: build-image\n        run: |\n          pnpm build\n          podman build -t local_ai_lab_image ./\n          CONTAINER_ID=$(podman create localhost/local_ai_lab_image --entrypoint \"\")\n          mkdir -p tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins\n          podman export $CONTAINER_ID | tar -x -C tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins/\n          podman rm -f $CONTAINER_ID\n          podman rmi -f localhost/local_ai_lab_image:latest\n\n      - name: Free up disk space\n        uses: podman-desktop/e2e/.github/actions/disk-cleanup@6a406f8f24bacffc481553266f9ba8a5293f3077\n\n      - name: Run All E2E tests\n        working-directory: ./podman-desktop-extension-ai-lab\n        env:\n          PODMAN_DESKTOP_ARGS: ${{ github.workspace }}/podman-desktop\n          EXTENSION_PREINSTALLED: true\n        run: pnpm test:e2e\n\n      - name: Publish Test Report\n        uses: mikepenz/action-junit-report@v6\n        if: always()\n        with:\n          annotate_only: true\n          fail_on_failure: true\n          include_passed: true\n          detailed_summary: true\n          require_tests:  true\n          report_paths: '**/*results.xml'\n\n      - uses: actions/upload-artifact@v7\n        if: always()\n        with:\n          name: e2e-tests\n          path: |\n            ./**/tests/**/output/\n            !./**/*.gguf\n            !./**/*.bin\n            !./**/output/videos/*\n            !./**/output/traces/*\n\n      - name: Upload test videos\n        uses: actions/upload-artifact@v7\n        if: always()\n        with:\n          name: e2e-tests-videos\n          path: ./**/output/videos/*\n\n      - name: Upload test traces\n        uses: actions/upload-artifact@v7\n        if: always()\n        with:\n          name: e2e-tests-traces\n          path: ./**/output/traces/*\n"
  },
  {
    "path": ".github/workflows/llama-stack-playground.yaml",
    "content": "#\n# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: llama-stack-playground\n\non:\n  workflow_dispatch:\n    inputs:\n      version:\n        description: 'llama-stack tag to use (e.g. main, v0.2.8,...)'\n        type: string\n        required: true\n\njobs:\n  publish:\n    name: publish\n    runs-on: ubuntu-24.04\n    steps:\n      - uses: actions/checkout@0c366fd6a839edf440554fa01a7085ccba70ac98 #v5.0.1\n        with:\n          repository: meta-llama/llama-stack\n          ref: ${{ github.event.inputs.version }}\n\n      - name: Install qemu dependency\n        run: |\n          sudo apt-get update\n          sudo apt-get install -y qemu-user-static\n\n      - name: Build manifest and images\n        run: |\n          podman manifest create quay.io/podman-ai-lab/llama-stack-playground:${{ github.event.inputs.version }}\n          podman build --platform linux/amd64,linux/arm64 llama_stack/distribution/ui --manifest quay.io/podman-ai-lab/llama-stack-playground:${{ github.event.inputs.version }}\n\n      - name: Login to quay.io\n        run: podman login quay.io --username ${{ secrets.QUAY_USERNAME }} --password ${{ secrets.QUAY_PASSWORD }}\n\n      - name: Push manifest and images to quay.io\n        run: podman manifest push quay.io/podman-ai-lab/llama-stack-playground:${{ github.event.inputs.version }}\n\n"
  },
  {
    "path": ".github/workflows/pr-check.yaml",
    "content": "#\n# Copyright (C) 2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: pr-check\n\non: [pull_request]\n\njobs:\n  lint-format-unit:\n    name: linter, formatters and unit tests / ${{ matrix.os }}\n    runs-on: ${{ matrix.os }}\n    timeout-minutes: 40\n    strategy:\n      fail-fast: false\n      matrix:\n        os: [windows-2022, ubuntu-22.04, macos-14]\n    steps:\n      - uses: actions/checkout@v6.0.2\n\n      - uses: pnpm/action-setup@v5\n        name: Install pnpm\n        with:\n          run_install: false\n\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 24\n          cache: 'pnpm'\n\n      - name: Execute pnpm\n        run: pnpm install\n\n      - name: Run linter\n        run: pnpm lint:check\n\n      - name: Run formatter\n        run: pnpm format:check\n\n      - name: Run unit tests\n        run: pnpm test:unit\n\n      - name: Run typecheck\n        run: pnpm typecheck\n\n      - name: Run svelte check\n        run: pnpm svelte:check\n\n      # Check we don't have changes in git\n      - name: Check no changes in git\n        if: ${{ matrix.os=='ubuntu-22.04'}}\n        run: |\n          if ! git diff --exit-code; then\n            echo \"Found changes in git\"\n            exit 1\n          fi\n\n  e2e-pr-check:\n    name: e2e tests smoke\n    runs-on: ubuntu-24.04\n    env:\n      SKIP_INSTALLATION: true\n    steps:\n      - uses: actions/checkout@v6.0.2\n        with:\n          path: podman-desktop-extension-ai-lab\n      # Set up pnpm\n      - uses: pnpm/action-setup@v5\n        name: Install pnpm\n        with:\n          run_install: false\n          package_json_file: ./podman-desktop-extension-ai-lab/package.json\n      # Install Node.js\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 24\n\n      # Checkout podman desktop\n      - uses: actions/checkout@v6.0.2\n        with:\n          repository: containers/podman-desktop\n          ref: main\n          path: podman-desktop\n\n      - name: Update podman\n        run: |\n          echo \"ubuntu version from kubic repository to install podman we need (v5)\"\n          ubuntu_version='23.10'\n          echo \"Add unstable kubic repo into list of available sources and get the repo key\"\n          sudo sh -c \"echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list\"\n          curl -L \"https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key\" | sudo apt-key add -\n          echo \"Updating database of packages...\"\n          sudo apt-get update -qq\n          echo \"install necessary dependencies for criu package which is not part of ${ubuntu_version}\"\n          sudo apt-get install -qq libprotobuf32t64 python3-protobuf libnet1\n          echo \"install criu manually from static location\"\n          curl -sLO http://archive.ubuntu.com/ubuntu/pool/universe/c/criu/criu_3.16.1-2_amd64.deb && sudo dpkg -i criu_3.16.1-2_amd64.deb\n          echo \"installing/update podman package...\"\n          sudo apt-get -qq -y install podman || { echo \"Start fallback steps for podman nightly installation from a static mirror\" && \\\n            sudo sh -c \"echo 'deb http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list\" && \\\n            curl -L \"http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key\" | sudo apt-key add - && \\\n            sudo apt-get update && \\\n            sudo apt-get -y install podman; }\n          podman version\n\n      - name: Revert unprivileged user namespace restrictions in Ubuntu 24.04\n        run: |\n          # allow unprivileged user namespace\n          sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0\n\n      - name: Set cgroup_manager to 'cgroupfs' instead of systemd\n        run: |\n          mkdir -p ~/.config/containers\n          cat <<EOT >> ~/.config/containers/containers.conf\n          [engine]\n          cgroup_manager=\"cgroupfs\"\n          EOT\n          podman info\n\n      - name: Install pnpm deps and build Podman Desktop\n        working-directory: ./podman-desktop\n        run: |\n          pnpm install --frozen-lockfile\n          pnpm test:e2e:build\n\n      - name: Ensure getting current HEAD version of the test framework\n        working-directory: ./podman-desktop-extension-ai-lab/tests/playwright\n        run: |\n          # workaround for https://github.com/containers/podman-desktop-extension-bootc/issues/712\n          version=$(npm view @podman-desktop/tests-playwright@next version)\n          echo \"Version of @podman-desktop/tests-playwright to be used: $version\"\n          jq --arg version \"$version\" '.devDependencies.\"@podman-desktop/tests-playwright\" = $version' package.json > package.json_tmp && mv package.json_tmp package.json\n\n      - name: Execute pnpm in AI Lab Extension\n        working-directory: ./podman-desktop-extension-ai-lab\n        run: pnpm install --no-frozen-lockfile\n\n      - name: Build Image\n        working-directory: ./podman-desktop-extension-ai-lab\n        id: build-image\n        run: |\n          pnpm build\n          podman build -t local_ai_lab_image ./\n          CONTAINER_ID=$(podman create localhost/local_ai_lab_image --entrypoint \"\")\n          mkdir -p tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins\n          podman export $CONTAINER_ID | tar -x -C tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins/\n          podman rm -f $CONTAINER_ID\n          podman rmi -f localhost/local_ai_lab_image:latest\n\n      - name: Free up disk space\n        uses: podman-desktop/e2e/.github/actions/disk-cleanup@6a406f8f24bacffc481553266f9ba8a5293f3077\n\n      - name: Run E2E Smoke tests\n        working-directory: ./podman-desktop-extension-ai-lab\n        env:\n          PODMAN_DESKTOP_ARGS: ${{ github.workspace }}/podman-desktop\n          EXTENSION_PREINSTALLED: true\n        run: pnpm test:e2e:smoke\n\n      - name: Publish Test Report\n        uses: mikepenz/action-junit-report@v6\n        if: always()\n        with:\n          annotate_only: true\n          fail_on_failure: true\n          include_passed: true\n          detailed_summary: true\n          require_tests:  true\n          report_paths: '**/*results.xml'\n\n      - uses: actions/upload-artifact@v7\n        if: always()\n        with:\n          name: e2e-pr-check\n          path: |\n            ./**/tests/**/output/\n            !./**/*.gguf\n            !./**/*.bin\n            !./**/output/videos/*\n            !./**/output/traces/*\n\n      - name: Upload test videos\n        uses: actions/upload-artifact@v7\n        if: always()\n        with:\n          name: e2e-pr-check-videos\n          path: ./**/output/videos/*\n\n      - name: Upload test traces\n        uses: actions/upload-artifact@v7\n        if: always()\n        with:\n          name: e2e-pr-check-traces\n          path: ./**/output/traces/*\n"
  },
  {
    "path": ".github/workflows/ramalama.yaml",
    "content": "#\n# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: ramalama\n\non:\n  schedule:\n    - cron: '0 2 * * *'\n\n  workflow_dispatch:\n    inputs:\n      tag:\n        default: 'latest'\n        description: 'Ramalama images tag to use'\n        type: string\n        required: true\n\njobs:\n  e2e-check:\n    name: e2e tests\n    runs-on: ubuntu-24.04\n    env:\n      SKIP_INSTALLATION: true\n    steps:\n      - uses: actions/checkout@v6.0.2\n        with:\n          path: podman-desktop-extension-ai-lab\n      # Set up pnpm\n      - uses: pnpm/action-setup@v5\n        name: Install pnpm\n        with:\n          run_install: false\n          package_json_file: ./podman-desktop-extension-ai-lab/package.json\n      # Install Node.js\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 24\n      # Checkout podman desktop\n      - uses: actions/checkout@v6.0.2\n        with:\n          repository: podman-desktop/podman-desktop\n          ref: main\n          path: podman-desktop\n\n      - name: Update podman\n        run: |\n          echo \"ubuntu version from kubic repository to install podman we need (v5)\"\n          ubuntu_version='23.10'\n          echo \"Add unstable kubic repo into list of available sources and get the repo key\"\n          sudo sh -c \"echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list\"\n          curl -L \"https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key\" | sudo apt-key add -\n          echo \"Updating database of packages...\"\n          sudo apt-get update -qq\n          echo \"install necessary dependencies for criu package which is not part of ${ubuntu_version}\"\n          sudo apt-get install -qq libprotobuf32t64 python3-protobuf libnet1\n          echo \"install criu manually from static location\"\n          curl -sLO http://archive.ubuntu.com/ubuntu/pool/universe/c/criu/criu_3.16.1-2_amd64.deb && sudo dpkg -i criu_3.16.1-2_amd64.deb\n          echo \"installing/update podman package...\"\n          sudo apt-get -qq -y install podman || { echo \"Start fallback steps for podman nightly installation from a static mirror\" && \\\n            sudo sh -c \"echo 'deb http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list\" && \\\n            curl -L \"http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key\" | sudo apt-key add - && \\\n            sudo apt-get update && \\\n            sudo apt-get -y install podman; }\n          podman version\n\n      - name: Revert unprivileged user namespace restrictions in Ubuntu 24.04\n        run: |\n          # allow unprivileged user namespace\n          sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0\n\n      - name: Set cgroup_manager to 'cgroupfs' instead of systemd\n        run: |\n          mkdir -p ~/.config/containers\n          cat <<EOT >> ~/.config/containers/containers.conf\n          [engine]\n          cgroup_manager=\"cgroupfs\"\n          EOT\n          podman info\n\n      - name: Install pnpm deps and build Podman Desktop\n        working-directory: ./podman-desktop\n        run: |\n          pnpm install --frozen-lockfile\n          pnpm test:e2e:build\n\n      - name: Ensure getting current HEAD version of the test framework\n        working-directory: ./podman-desktop-extension-ai-lab/tests/playwright\n        run: |\n          # workaround for https://github.com/podman-desktop/podman-desktop-extension-bootc/issues/712\n          version=$(npm view @podman-desktop/tests-playwright@next version)\n          echo \"Version of @podman-desktop/tests-playwright to be used: $version\"\n          jq --arg version \"$version\" '.devDependencies.\"@podman-desktop/tests-playwright\" = $version' package.json > package.json_tmp && mv package.json_tmp package.json\n\n      - name: Execute pnpm in AI Lab Extension\n        working-directory: ./podman-desktop-extension-ai-lab\n        run: pnpm install --no-frozen-lockfile\n\n      - name: Update ramalama image references in AI Lab Extension\n        working-directory: ./podman-desktop-extension-ai-lab\n        run: sed -i -E \"s/(@sha256:[0-9a-f]+)/:${{ github.event_name != 'workflow_dispatch' && 'latest' || github.event.inputs.tag }}/g\" packages/backend/src/assets/inference-images.json\n\n      - name: Build Image\n        working-directory: ./podman-desktop-extension-ai-lab\n        id: build-image\n        run: |\n          pnpm build\n          podman build -t local_ai_lab_image ./\n          CONTAINER_ID=$(podman create localhost/local_ai_lab_image --entrypoint \"\")\n          mkdir -p tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins\n          podman export $CONTAINER_ID | tar -x -C tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins/\n          podman rm -f $CONTAINER_ID\n          podman rmi -f localhost/local_ai_lab_image:latest\n\n      - name: Free up disk space\n        uses: podman-desktop/e2e/.github/actions/disk-cleanup@6a406f8f24bacffc481553266f9ba8a5293f3077\n\n      - name: Run E2E tests\n        working-directory: ./podman-desktop-extension-ai-lab\n        env:\n          PODMAN_DESKTOP_ARGS: ${{ github.workspace }}/podman-desktop\n          EXTENSION_PREINSTALLED: true\n        run: pnpm test:e2e\n\n      - name: Publish Test Report\n        uses: mikepenz/action-junit-report@v6\n        if: always()\n        with:\n          annotate_only: true\n          fail_on_failure: true\n          include_passed: true\n          detailed_summary: true\n          require_tests:  true\n          report_paths: '**/*results.xml'\n\n      - uses: actions/upload-artifact@v7\n        if: always()\n        with:\n          name: e2e-check\n          path: |\n            ./**/tests/**/output/\n            !./**/*.gguf\n            !./**/*.bin\n            !./**/output/videos/*\n            !./**/output/traces/*\n\n      - name: Upload test videos\n        uses: actions/upload-artifact@v7\n        if: always()\n        with:\n          name: e2e-check-videos\n          path: ./**/output/videos/*\n\n      - name: Upload test traces\n        uses: actions/upload-artifact@v7\n        if: always()\n        with:\n          name: e2e-check-traces\n          path: ./**/output/traces/*\n\n"
  },
  {
    "path": ".github/workflows/recipe-catalog-change-cleanup.yaml",
    "content": "name: recipe-catalog-change-cleanup\n\non:\n  workflow_run:\n    workflows: [\"recipe-catalog-change-windows-trigger\"]\n    types:\n      - completed\n\njobs:\n  extract-context:\n    runs-on: ubuntu-24.04\n    outputs:\n      extract-context: ${{ steps.prepare-context.outputs.extract-context }}\n      trigger-template: ${{ steps.prepare-context.outputs.trigger-template }}\n    steps:\n      - name: Prepare context\n        id: prepare-context\n        env:\n          WORKFLOW_RUN: ${{ toJson(github.event.workflow_run) }}\n          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        run: |\n          echo \"Workflow run ID: ${{ fromJson(env.WORKFLOW_RUN).id }}\"\n          echo \"Fork owner: ${{ fromJson(env.WORKFLOW_RUN).head_repository.owner.login }}\"\n          echo \"Fork repo: ${{ fromJson(env.WORKFLOW_RUN).head_repository.name }}\"\n          echo \"Fork branch: ${{ fromJson(env.WORKFLOW_RUN).head_branch }}\"\n          echo \"Commit SHA: ${{ fromJson(env.WORKFLOW_RUN).head_sha }}\"\n          echo \"Base repo: ${{ fromJson(env.WORKFLOW_RUN).repository.full_name }}\"\n          echo \"Conclusion: ${{ fromJson(env.WORKFLOW_RUN).conclusion }}\"\n          # Fetch job conclusions using the GitHub CLI\n          echo \"Fetching jobs for workflow run ID: ${{ fromJson(env.WORKFLOW_RUN).id }}\"\n            gh api \\\n            repos/${{ github.repository }}/actions/runs/${{ fromJson(env.WORKFLOW_RUN).id }}/jobs \\\n            --jq '.jobs[] | \"\\(.name)=\\(.conclusion)\"' | while read -r line; do\n              echo \"$line\" >> $GITHUB_OUTPUT\n            done\n          cat $GITHUB_OUTPUT\n  cleanup:\n    runs-on: ubuntu-24.04\n    needs: extract-context\n    if: ${{ github.event.workflow_run.conclusion == 'skipped' || (github.event.workflow_run.conclusion == 'success' && needs.extract-context.outputs.trigger-template == 'skipped') }}\n    steps:\n      - name: Remove skipped or cancelled workflow run\n        env:\n          WORKFLOW_RUN: ${{ toJson(github.event.workflow_run) }}\n          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        run: |\n          echo \"Cleaning up workflow run ID: ${{ fromJson(env.WORKFLOW_RUN).id }}\"\n          gh run delete ${{ fromJson(env.WORKFLOW_RUN).id }} --repo ${{ fromJson(env.WORKFLOW_RUN).repository.full_name }}\n          echo \"Workflow run ID ${{ fromJson(env.WORKFLOW_RUN).id }} has been cleaned up.\"\n"
  },
  {
    "path": ".github/workflows/recipe-catalog-change-template.yaml",
    "content": "name: Run recipe tests on catalog change\n\non:\n  workflow_call:\n    inputs:\n      trigger-workflow-run-id:\n        required: true\n        type: string\n      trigger-workflow-fork:\n        required: true\n        type: string\n      trigger-workflow-repo-name:\n        required: true\n        type: string\n      trigger-workflow-branch:\n        required: true\n        type: string\n      trigger-workflow-commit-sha:\n        required: true\n        type: string\n      trigger-workflow-base-repo:\n        required: true\n        type: string\n      pd-fork:\n        required: false\n        type: string\n      pd-branch:\n        required: false\n        type: string\n      pd-env-vars:\n        required: false\n        type: string\n      podman-options:\n        required: false\n        type: string\n      podman-download-url:\n        required: false\n        type: string\n      ext_tests_options:\n        required: false\n        type: string\n      npm-target:\n        required: false\n        type: string\n      pde2e-image-version:\n        required: false\n        type: string\n      mapt_params:\n        required: false\n        type: string\n\njobs:\n  windows:\n    name: recipe-catalog-windows-${{ matrix.windows-version }}-${{ matrix.windows-featurepack }}\n    runs-on: ubuntu-24.04\n    strategy:\n      fail-fast: false\n      matrix:\n        windows-version: ['11']\n        windows-featurepack: ['25h2-ent']\n\n    steps:\n    - name: Add PR check status\n      env:\n        GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n      run: |\n        status_context=\"catalog-change-windows-matrix-${{ matrix.windows-version }}-${{ matrix.windows-featurepack }}\"\n        echo \"status_context=${status_context}\" >> \"$GITHUB_ENV\"\n        set -xuo\n        # Status msg\n        data=\"{\\\"state\\\":\\\"pending\\\"\"\n        data=\"${data},\\\"description\\\":\\\"Running recipe tests on catalog change on Windows ${{ matrix.windows-version }}-${{ matrix.windows-featurepack }}\\\"\"\n        data=\"${data},\\\"context\\\":\\\"$status_context\\\"\"\n        data=\"${data},\\\"target_url\\\":\\\"https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\\\"}\"\n        # Create status by API call\n        curl -L -v -X POST \\\n          -H \"Accept: application/vnd.github+json\" \\\n          -H \"Authorization: Bearer ${{ env.GH_TOKEN }}\" \\\n          https://api.github.com/repos/${{ inputs.trigger-workflow-base-repo }}/statuses/${{ inputs.trigger-workflow-commit-sha }} \\\n          -d \"${data}\"\n\n    - name: Get Podman version used by Podman Desktop\n      run: |\n        version=$(curl https://raw.githubusercontent.com/containers/podman-desktop/main/extensions/podman/packages/extension/src/podman5.json | jq -r '.version')\n        echo \"Default Podman Version from Podman Desktop: ${version}\"\n        echo \"PD_PODMAN_VERSION=${version}\" >> $GITHUB_ENV\n\n    - name: Set the default env. variables\n      env:\n        DEFAULT_FORK: 'containers'\n        DEFAULT_BRANCH: 'main'\n        DEFAULT_NPM_TARGET: 'test:e2e'\n        DEFAULT_ENV_VARS: 'TEST_PODMAN_MACHINE=true,ELECTRON_ENABLE_INSPECT=true'\n        DEFAULT_PODMAN_OPTIONS: 'INIT=1,START=1,ROOTFUL=1,NETWORKING=0'\n        DEFAULT_EXT_TESTS_OPTIONS: 'EXT_RUN_TESTS_FROM_EXTENSION=1,EXT_RUN_TESTS_AS_ADMIN=1,EXT_TEST_GPU_SUPPORT_ENABLED=0'\n        DEFAULT_EXT_REPO_OPTIONS: 'REPO=podman-desktop-extension-ai-lab,FORK=containers,BRANCH=main'\n        DEFAULT_PODMAN_VERSION: \"${{ env.PD_PODMAN_VERSION || '5.3.2' }}\"\n        DEFAULT_URL: \"https://github.com/containers/podman/releases/download/v$DEFAULT_PODMAN_VERSION/podman-$DEFAULT_PODMAN_VERSION-setup.exe\"\n        DEFAULT_PDE2E_IMAGE_VERSION: 'v0.0.3-windows'\n        DEFAULT_MAPT_PARAMS: \"IMAGE=${{ vars.MAPT_IMAGE || 'quay.io/redhat-developer/mapt' }};VERSION_TAG=${{ vars.MAPT_VERSION_TAG || 'v0.9.7' }};CPUS=${{ vars.MAPT_CPUS || '4' }};MEMORY=${{ vars.MAPT_MEMORY || '32' }};EXCLUDED_REGIONS=\\\"${{ vars.MAPT_EXCLUDED_REGIONS || 'westindia,centralindia,southindia,australiacentral,australiacentral2,australiaeast,australiasoutheast,southafricanorth,southafricawest' }}\\\"\"\n      run: |\n        echo \"FORK=${{ inputs.pd-fork || env.DEFAULT_FORK }}\" >> $GITHUB_ENV\n        echo \"BRANCH=${{ inputs.pd-branch || env.DEFAULT_BRANCH }}\" >> $GITHUB_ENV\n        echo \"NPM_TARGET=${{ inputs.npm-target || env.DEFAULT_NPM_TARGET }}\" >> $GITHUB_ENV\n        echo \"ENV_VARS=${{ inputs.pd-env-vars || env.DEFAULT_ENV_VARS }}\" >> $GITHUB_ENV\n        echo \"PODMAN_URL=${{ inputs.podman-download-url || env.DEFAULT_URL }}\" >> $GITHUB_ENV\n        echo \"PDE2E_IMAGE_VERSION=${{ inputs.pde2e-image-version || env.DEFAULT_PDE2E_IMAGE_VERSION }}\" >> $GITHUB_ENV\n        if [[ -z \"${{ inputs.trigger-workflow-repo-name }}\" ]] && [[ -z \"${{ inputs.trigger-workflow-fork }}\" ]] && [[ -z \"${{ inputs.trigger-workflow-branch }}\" ]]; then\n          echo \"DEFAULT_EXT_REPO_OPTIONS=REPO=${{ inputs.trigger-workflow-repo-name }},FORK=${{ inputs.trigger-workflow-fork }},BRANCH=${{ inputs.trigger-workflow-branch }}\" >> $GITHUB_ENV\n        fi\n        echo \"${{ github.event.inputs.ext_tests_options || env.DEFAULT_EXT_TESTS_OPTIONS }}\" | awk -F ',' \\\n          '{for (i=1; i<=NF; i++) {split($i, kv, \"=\"); print kv[1]\"=\"kv[2]}}' >> $GITHUB_ENV\n        echo \"${{ env.DEFAULT_PODMAN_OPTIONS }}\" | awk -F ',' \\\n          '{for (i=1; i<=NF; i++) {split($i, kv, \"=\"); print \"PODMAN_\"kv[1]\"=\"kv[2]}}' >> $GITHUB_ENV\n        echo \"${{ inputs.podman-options || env.DEFAULT_EXT_REPO_OPTIONS }}\" | awk -F ',' \\\n          '{for (i=1; i<=NF; i++) {split($i, kv, \"=\"); print \"EXT_\"kv[1]\"=\"kv[2]}}' >> $GITHUB_ENV\n        echo \"${{ github.event.inputs.mapt_params || env.DEFAULT_MAPT_PARAMS }}\" | awk -F ';' \\\n          '{for (i=1; i<=NF; i++) {split($i, kv, \"=\"); print \"MAPT_\"kv[1]\"=\"kv[2]}}' >> $GITHUB_ENV\n\n    - name: Create instance\n      run: |\n        # Create instance\n        podman run -d --name windows-create --rm \\\n          -v ${PWD}:/workspace:z \\\n          -e ARM_TENANT_ID=${{ secrets.ARM_TENANT_ID }} \\\n          -e ARM_SUBSCRIPTION_ID=${{ secrets.ARM_SUBSCRIPTION_ID }} \\\n          -e ARM_CLIENT_ID=${{ secrets.ARM_CLIENT_ID }} \\\n          -e ARM_CLIENT_SECRET='${{ secrets.ARM_CLIENT_SECRET }}' \\\n          --user 0 \\\n          ${{ env.MAPT_IMAGE }}:${{ env.MAPT_VERSION_TAG }} azure \\\n            windows create \\\n            --project-name 'windows-desktop' \\\n            --backed-url 'file:///workspace' \\\n            --conn-details-output '/workspace' \\\n            --windows-version '${{ matrix.windows-version }}' \\\n            --windows-featurepack '${{ matrix.windows-featurepack }}' \\\n            --cpus ${{ env.MAPT_CPUS }} \\\n            --memory ${{ env.MAPT_MEMORY }} \\\n            --nested-virt \\\n            --tags project=podman-desktop \\\n            --spot-excluded-regions ${{ env.MAPT_EXCLUDED_REGIONS }} \\\n            --spot\n        # Check logs\n        podman logs -f windows-create\n\n    - name: Check instance system info\n      run: |\n        ssh -i id_rsa \\\n          -o StrictHostKeyChecking=no \\\n          -o UserKnownHostsFile=/dev/null \\\n          -o ServerAliveInterval=30 \\\n          -o ServerAliveCountMax=1200 \\\n          $(cat username)@$(cat host) \"systeminfo\"\n\n    - name: Emulate X session\n      run: |\n        # use fake rdp to emulate an active x session\n        podman run -d --name x-session \\\n          -e RDP_HOST=$(cat host) \\\n          -e RDP_USER=$(cat username) \\\n          -e RDP_PASSWORD=$(cat userpassword) \\\n          quay.io/rhqp/frdp:v0.0.1\n        # Wait until the x session has been created\n        podman wait --condition running x-session\n        # Check logs for the x session\n        podman logs x-session\n\n    - name: Download Podman, do not initialize\n      run: |\n        podman run --rm -d --name pde2e-podman-run \\\n          -e TARGET_HOST=$(cat host) \\\n          -e TARGET_HOST_USERNAME=$(cat username) \\\n          -e TARGET_HOST_KEY_PATH=/data/id_rsa \\\n          -e TARGET_FOLDER=pd-e2e \\\n          -e TARGET_CLEANUP=false \\\n          -e TARGET_RESULTS=results \\\n          -e OUTPUT_FOLDER=/data \\\n          -e DEBUG=true \\\n          -v $PWD:/data:z \\\n          quay.io/odockal/pde2e-podman:${{ env.PDE2E_IMAGE_VERSION }} \\\n            pd-e2e/podman.ps1 \\\n              -downloadUrl ${{ env.PODMAN_URL }} \\\n              -targetFolder pd-e2e \\\n              -resultsFolder results \\\n              -initialize 0 \\\n              -rootful 0 \\\n              -start 0 \\\n              -installWSL 0\n        # check logs\n        podman logs -f pde2e-podman-run\n\n    - name: Build Podman Desktop Electron Inspect Enabled binary\n      run: |\n        podman run --rm -d --name pde2e-builder-run \\\n          -e TARGET_HOST=$(cat host) \\\n          -e TARGET_HOST_USERNAME=$(cat username) \\\n          -e TARGET_HOST_KEY_PATH=/data/id_rsa \\\n          -e TARGET_FOLDER=pd-e2e \\\n          -e TARGET_CLEANUP=false \\\n          -e TARGET_RESULTS=results \\\n          -e OUTPUT_FOLDER=/data \\\n          -e DEBUG=true \\\n          -v $PWD:/data:z \\\n          quay.io/odockal/pde2e-builder:${{ env.PDE2E_IMAGE_VERSION }} \\\n            pd-e2e/builder.ps1 \\\n              -targetFolder pd-e2e \\\n              -resultsFolder results \\\n              -fork ${{ env.FORK }} \\\n              -branch ${{ env.BRANCH }} \\\n              -envVars ${{ env.ENV_VARS }}\n        # check logs\n        podman logs -f pde2e-builder-run\n\n    - name: Run Podman Desktop Playwright E2E tests\n      run: |\n        podman run -d --name pde2e-runner-run \\\n          -e TARGET_HOST=$(cat host) \\\n          -e TARGET_HOST_USERNAME=$(cat username) \\\n          -e TARGET_HOST_KEY_PATH=/data/id_rsa \\\n          -e TARGET_FOLDER=pd-e2e \\\n          -e TARGET_RESULTS=results \\\n          -e OUTPUT_FOLDER=/data \\\n          -e DEBUG=true \\\n          -v $PWD:/data:z \\\n          quay.io/odockal/pde2e-runner:${{ env.PDE2E_IMAGE_VERSION }} \\\n              pd-e2e/runner.ps1 \\\n                -targetFolder pd-e2e \\\n                -resultsFolder results \\\n                -podmanPath $(cat results/podman-location.log) \\\n                -pdPath \"$(cat results/pde2e-binary-path.log | tr '\\n' \" \")\" \\\n                -fork ${{ env.FORK }} \\\n                -branch ${{ env.BRANCH }} \\\n                -extRepo ${{ env.EXT_REPO }} \\\n                -extFork ${{ env.EXT_FORK }} \\\n                -extBranch ${{ env.EXT_BRANCH }} \\\n                -extTests ${{ env.EXT_RUN_TESTS_FROM_EXTENSION }} \\\n                -npmTarget ${{ env.NPM_TARGET }} \\\n                -initialize ${{ env.PODMAN_INIT }} \\\n                -rootful ${{ env.PODMAN_ROOTFUL }} \\\n                -start ${{ env.PODMAN_START }} \\\n                -userNetworking ${{ env.PODMAN_NETWORKING }} \\\n                -envVars ${{ env.ENV_VARS }} \\\n                -runAsAdmin ${{ env.EXT_RUN_TESTS_AS_ADMIN }}\n        # check logs\n        podman logs -f pde2e-runner-run\n\n    - name: Publish Test Report\n      id: test-report\n      uses: mikepenz/action-junit-report@v6\n      if: always() # always run even if the previous step fails\n      with:\n        annotate_only: true\n        fail_on_failure: true\n        include_passed: true\n        detailed_summary: true\n        require_tests:  true\n        report_paths: '**/*results.xml'\n\n    - name: Update status of the PR check\n      if: always()\n      env:\n        GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n      run: |\n        set -xuo\n        # Status msg\n        data=\"{\\\"state\\\":\\\"success\\\"\"\n        if [[ ${{ steps.test-report.outcome }} != \"success\" ]]; then\n          data=\"{\\\"state\\\":\\\"failure\\\"\"\n        fi\n        data=\"${data},\\\"description\\\":\\\"Finished recipe tests on catalog change on Windows ${{ matrix.windows-version }}-${{ matrix.windows-featurepack }}\\\"\"\n        data=\"${data},\\\"context\\\":\\\"${{ env.status_context }}\\\"\"\n        data=\"${data},\\\"target_url\\\":\\\"https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\\\"}\"\n        # Create status by API call\n        curl -L -v -X POST \\\n          -H \"Accept: application/vnd.github+json\" \\\n          -H \"Authorization: Bearer ${{ env.GH_TOKEN }}\" \\\n          https://api.github.com/repos/${{ inputs.trigger-workflow-base-repo }}/statuses/${{ inputs.trigger-workflow-commit-sha }} \\\n          -d \"${data}\"\n\n    - name: Destroy instance\n      if: always()\n      run: |\n        # Destroy instance\n        podman run -d --name windows-destroy --rm \\\n          -v ${PWD}:/workspace:z \\\n          -e ARM_TENANT_ID=${{ secrets.ARM_TENANT_ID }} \\\n          -e ARM_SUBSCRIPTION_ID=${{ secrets.ARM_SUBSCRIPTION_ID }} \\\n          -e ARM_CLIENT_ID=${{ secrets.ARM_CLIENT_ID }} \\\n          -e ARM_CLIENT_SECRET='${{ secrets.ARM_CLIENT_SECRET }}' \\\n          --user 0 \\\n          ${{ env.MAPT_IMAGE }}:${{ env.MAPT_VERSION_TAG }} azure \\\n            windows destroy \\\n            --project-name 'windows-desktop' \\\n            --backed-url 'file:///workspace'\n        # Check logs\n        podman logs -f windows-destroy\n\n    - name: Upload test artifacts\n      uses: actions/upload-artifact@v7\n      if: always()\n      with:\n        name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}\n        path: |\n          results/*\n          !./**/*.gguf\n          !./**/*.bin\n          !./**/output/videos/*\n          !./**/output/traces/*\n\n    - name: Upload test videos\n      uses: actions/upload-artifact@v7\n      if: always()\n      with:\n        name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}-videos\n        path: ./**/output/videos/*\n\n    - name: Upload test traces\n      uses: actions/upload-artifact@v7\n      if: always()\n      with:\n        name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}-traces\n        path: ./**/output/traces/*\n"
  },
  {
    "path": ".github/workflows/recipe-catalog-change-trigger.yaml",
    "content": "name: recipe-catalog-change-windows-trigger\n\non:\n  workflow_run:\n    workflows: [\"pr-check\"]\n    types:\n      - completed\n\njobs:\n  extract-context:\n    runs-on: ubuntu-24.04\n    if: ${{ github.event.workflow_run.conclusion == 'success' }}\n    outputs:\n      workflow-run-id: ${{ steps.parse-event.outputs.workflow-run-id }}\n      fork-owner: ${{ steps.parse-event.outputs.fork-owner }}\n      fork-repo: ${{ steps.parse-event.outputs.fork-repo }}\n      fork-branch: ${{ steps.parse-event.outputs.fork-branch }}\n      commit-sha: ${{ steps.parse-event.outputs.commit-sha }}\n      base-repo: ${{ steps.parse-event.outputs.base-repo }}\n      changes-detected: ${{ steps.parse-event.outputs.changes-detected }}\n    steps:\n      - name: Parse event data\n        id: parse-event\n        env:\n          WORKFLOW_RUN: ${{ toJson(github.event.workflow_run) }}\n          GH_TOKEN: ${{ github.token }}\n        run: |\n          echo \"Workflow run ID: ${{ fromJson(env.WORKFLOW_RUN).id }}\"\n          echo \"workflow-run-id=${{ fromJson(env.WORKFLOW_RUN).id }}\" >> $GITHUB_OUTPUT\n          echo \"Fork owner: ${{ fromJson(env.WORKFLOW_RUN).head_repository.owner.login }}\"\n          echo \"fork-owner=${{ fromJson(env.WORKFLOW_RUN).head_repository.owner.login }}\" >> $GITHUB_OUTPUT\n          echo \"Fork repo: ${{ fromJson(env.WORKFLOW_RUN).head_repository.name }}\"\n          echo \"fork-repo=${{ fromJson(env.WORKFLOW_RUN).head_repository.name }}\" >> $GITHUB_OUTPUT\n          echo \"Fork branch: ${{ fromJson(env.WORKFLOW_RUN).head_branch }}\"\n          echo \"fork-branch=${{ fromJson(env.WORKFLOW_RUN).head_branch }}\" >> $GITHUB_OUTPUT\n          echo \"Commit SHA: ${{ fromJson(env.WORKFLOW_RUN).head_sha }}\"\n          echo \"commit-sha=${{ fromJson(env.WORKFLOW_RUN).head_sha }}\" >> $GITHUB_OUTPUT\n          echo \"Base repo: ${{ fromJson(env.WORKFLOW_RUN).repository.full_name }}\"\n          echo \"base-repo=${{ fromJson(env.WORKFLOW_RUN).repository.full_name }}\" >> $GITHUB_OUTPUT\n\n          git clone \"https://www.github.com/${{ fromJson(env.WORKFLOW_RUN).repository.full_name }}\" \"${{ fromJson(env.WORKFLOW_RUN).repository.name }}\" --depth 1\n          cd \"${{ fromJson(env.WORKFLOW_RUN).repository.name }}\"\n          git remote add upstream \"https://www.github.com/${{ fromJson(env.WORKFLOW_RUN).head_repository.full_name }}\"\n          git fetch upstream\n          git diff --name-only upstream/${{ fromJson(env.WORKFLOW_RUN).head_branch }} HEAD > changes.txt\n          if grep -qe 'packages/backend/src/assets/ai.json' changes.txt; then\n            echo \"Changes detected in ai.json\"\n            echo \"changes-detected=true\" >> $GITHUB_OUTPUT\n          else\n            echo \"No changes detected in ai.json\"\n            echo \"changes-detected=false\" >> $GITHUB_OUTPUT\n          fi\n\n  trigger-template:\n    needs: extract-context\n    uses: containers/podman-desktop-extension-ai-lab/.github/workflows/recipe-catalog-change-template.yaml@main\n    if: ${{ needs.extract-context.outputs.changes-detected == 'true' }}\n    strategy:\n      fail-fast: false\n    with:\n      trigger-workflow-run-id: ${{ needs.extract-context.outputs.workflow-run-id }}\n      trigger-workflow-fork: ${{ needs.extract-context.outputs.fork-owner }}\n      trigger-workflow-repo-name: ${{ needs.extract-context.outputs.fork-repo }}\n      trigger-workflow-branch: ${{ needs.extract-context.outputs.fork-branch }}\n      trigger-workflow-commit-sha: ${{ needs.extract-context.outputs.commit-sha }}\n      trigger-workflow-base-repo: ${{ needs.extract-context.outputs.base-repo }}\n      ext_tests_options: 'EXT_RUN_TESTS_FROM_EXTENSION=1,EXT_RUN_TESTS_AS_ADMIN=0,EXT_TEST_GPU_SUPPORT_ENABLED=0'\n    secrets: inherit\n"
  },
  {
    "path": ".github/workflows/release.yaml",
    "content": "#\n# Copyright (C) 2024-2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: release\n\non:\n  workflow_dispatch:\n    inputs:\n      version:\n        description: 'Version to release'\n        required: true\n      branch:\n        description: 'Branch to use for the release'\n        required: true\n        default: main\nenv:\n  GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}\n\njobs:\n\n  tag:\n    name: Tagging\n    runs-on: ubuntu-24.04\n    outputs:\n      githubTag: ${{ steps.TAG_UTIL.outputs.githubTag}}\n      extVersion: ${{ steps.TAG_UTIL.outputs.extVersion}}\n      releaseId: ${{ steps.create_release.outputs.id}}\n\n    steps:\n      - uses: actions/checkout@v6.0.2\n        with:\n          ref: ${{ github.event.inputs.branch }}\n      - name: Generate tag utilities\n        id: TAG_UTIL\n        run: |\n            TAG_PATTERN=${{ github.event.inputs.version }}\n            echo \"githubTag=v$TAG_PATTERN\" >> ${GITHUB_OUTPUT}\n            echo \"extVersion=$TAG_PATTERN\" >> ${GITHUB_OUTPUT}\n\n      - name: tag\n        run: |\n          git config --local user.name ${{ github.actor }}\n\n          # Add the new version in package.json file\n          sed -i  \"s#version\\\":\\ \\\"\\(.*\\)\\\",#version\\\":\\ \\\"${{ steps.TAG_UTIL.outputs.extVersion }}\\\",#g\" package.json\n          sed -i  \"s#version\\\":\\ \\\"\\(.*\\)\\\",#version\\\":\\ \\\"${{ steps.TAG_UTIL.outputs.extVersion }}\\\",#g\" packages/backend/package.json\n          sed -i  \"s#version\\\":\\ \\\"\\(.*\\)\\\",#version\\\":\\ \\\"${{ steps.TAG_UTIL.outputs.extVersion }}\\\",#g\" packages/frontend/package.json\n          sed -i  \"s#version\\\":\\ \\\"\\(.*\\)\\\",#version\\\":\\ \\\"${{ steps.TAG_UTIL.outputs.extVersion }}\\\",#g\" tests/playwright/package.json\n          git add package.json\n          git add packages/backend/package.json\n          git add packages/frontend/package.json\n          git add tests/playwright/package.json\n\n          # commit the changes\n          git commit -m \"chore: 🥁 tagging ${{ steps.TAG_UTIL.outputs.githubTag }} 🥳\"\n          echo \"Tagging with ${{ steps.TAG_UTIL.outputs.githubTag }}\"\n          git tag ${{ steps.TAG_UTIL.outputs.githubTag }}\n          git push origin ${{ steps.TAG_UTIL.outputs.githubTag }}\n      - name: Create Release\n        id: create_release\n        uses: ncipollo/release-action@v1\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        with:\n          tag: ${{ steps.TAG_UTIL.outputs.githubTag }}\n          name: ${{ steps.TAG_UTIL.outputs.githubTag }}\n          draft: true\n          prerelease: false\n\n      - name: Create the PR to bump the version in the main branch (only if we're tagging from main branch)\n        if: ${{ github.event.inputs.branch == 'main' }}\n        run: |\n          git config --local user.name ${{ github.actor }}\n          CURRENT_VERSION=$(echo \"${{ steps.TAG_UTIL.outputs.extVersion }}\")\n          tmp=${CURRENT_VERSION%.*}\n          minor=${tmp#*.}\n          bumpedVersion=${CURRENT_VERSION%%.*}.$((minor + 1)).0\n          bumpedBranchName=\"bump-to-${bumpedVersion}\"\n          git checkout -b \"${bumpedBranchName}\"\n          sed -i  \"s#version\\\":\\ \\\"\\(.*\\)\\\",#version\\\":\\ \\\"${bumpedVersion}-next\\\",#g\" package.json\n          sed -i  \"s#version\\\":\\ \\\"\\(.*\\)\\\",#version\\\":\\ \\\"${bumpedVersion}-next\\\",#g\" packages/backend/package.json\n          sed -i  \"s#version\\\":\\ \\\"\\(.*\\)\\\",#version\\\":\\ \\\"${bumpedVersion}-next\\\",#g\" packages/frontend/package.json\n          sed -i  \"s#version\\\":\\ \\\"\\(.*\\)\\\",#version\\\":\\ \\\"${bumpedVersion}-next\\\",#g\" tests/playwright/package.json\n          git add package.json\n          git add packages/backend/package.json\n          git add packages/frontend/package.json\n          git add tests/playwright/package.json\n          git commit -s --amend -m \"chore: bump version to ${bumpedVersion}\"\n          git push origin \"${bumpedBranchName}\"\n          echo -e \"📢 Bump version to ${bumpedVersion}\\n\\n${{ steps.TAG_UTIL.outputs.extVersion }} has been released.\\n\\n Time to switch to the new ${bumpedVersion} version 🥳\" > /tmp/pr-title\n          pullRequestUrl=$(gh pr create --title \"chore: 📢 Bump version to ${bumpedVersion}\" --body-file /tmp/pr-title --head \"${bumpedBranchName}\" --base \"main\")\n          echo \"📢 Pull request created: ${pullRequestUrl}\"\n          echo \"➡️ Flag the PR as being ready for review\"\n          gh pr ready \"${pullRequestUrl}\"\n          echo \"🔅 Mark the PR as being ok to be merged automatically\"\n          gh pr merge \"${pullRequestUrl}\" --auto --rebase\n          git checkout ${{ steps.TAG_UTIL.outputs.githubTag }}\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n  build:\n    needs: [tag]\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6.0.2\n        with:\n          ref: ${{ needs.tag.outputs.githubTag }}\n\n      - uses: pnpm/action-setup@v5\n        name: Install pnpm\n        with:\n          run_install: false\n\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 24\n          cache: 'pnpm'\n\n      - name: Execute yarn\n        run: pnpm install\n\n      - name: Run Build\n        run: pnpm build\n\n      - name: Login to ghcr.io\n        run: podman login --username ${{ github.repository_owner }} --password ${{ secrets.GITHUB_TOKEN }} ghcr.io\n\n      - name: Build Image\n        id: build-image\n        run: |\n          podman build -t ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab:${{ needs.tag.outputs.extVersion }} .\n          podman push ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab:${{ needs.tag.outputs.extVersion }}\n          podman tag ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab:${{ needs.tag.outputs.extVersion }} ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab:latest\n          podman push ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab:latest\n\n  release:\n    needs: [tag, build]\n    name: Release\n    runs-on: ubuntu-24.04\n    steps:\n      - name: id\n        run: echo the release id is ${{ needs.tag.outputs.releaseId}}\n\n      - name: Publish release\n        uses: StuYarrow/publish-release@v1.1.2\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        with:\n          id: ${{ needs.tag.outputs.releaseId}}\n"
  },
  {
    "path": ".github/workflows/update-ramalama-references.sh",
    "content": "#!/usr/bin/env bash\n#\n# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Script to update ramalama image references in inference-images.json\nset -euo pipefail\n\nJSON_PATH=\"packages/backend/src/assets/inference-images.json\"\nTMP_JSON=\"${JSON_PATH}.tmp\"\n\nTAG=$1\n# Images and their keys in the JSON\nIMAGES=(\n  \"whispercpp:ramalama/ramalama-whisper-server:default\"\n  \"llamacpp:ramalama/ramalama-llama-server:default\"\n  \"llamacpp:ramalama/cuda-llama-server:cuda\"\n  \"openvino:ramalama/openvino:default\"\n)\n\ncp \"$JSON_PATH\" \"$TMP_JSON\"\n\nfor entry in \"${IMAGES[@]}\"; do\n  IFS=\":\" read -r key image jsonkey <<< \"$entry\"\n  digest=$(curl -s \"https://quay.io/v2/$image/manifests/$TAG\"  -H 'Accept: application/vnd.oci.image.index.v1+json' --head | grep -i Docker-Content-Digest | awk -e '{ print $2 }' | tr -d '\\r')\n  # Update the JSON file with the new digest\n  jq --arg img \"quay.io/$image\" --arg dig \"$digest\" --arg key \"$key\" --arg jsonkey \"$jsonkey\" \\\n    '(.[$key][$jsonkey]) = ($img + \"@\" + $dig)' \\\n    \"$TMP_JSON\" > \"$TMP_JSON.new\" && mv \"$TMP_JSON.new\" \"$TMP_JSON\"\ndone\n\n# Compare and update if changed\nif cmp -s \"$JSON_PATH\" \"$TMP_JSON\"; then\n  echo \"No update needed: digests are up to date.\"\n  rm \"$TMP_JSON\"\n  exit 0\nelse\n  mv \"$TMP_JSON\" \"$JSON_PATH\"\n  echo \"Updated inference-images.json with latest digests.\"\n  exit 10\nfi\n"
  },
  {
    "path": ".github/workflows/update-ramalama-references.yaml",
    "content": "#\n# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# This workflow automatically updates ramalama image digests in inference-images.json\n# and creates a pull request with the changes.\n\nname: update-ramalama-references\n\non:\n  schedule:\n    - cron: '0 3 * * *' # Runs daily at 03:00 UTC\n  workflow_dispatch:\n\npermissions:\n  contents: write\n\njobs:\n  update-references:\n    runs-on: ubuntu-24.04\n    steps:\n      - uses: actions/checkout@0c366fd6a839edf440554fa01a7085ccba70ac98 # v5.0.1\n\n      - name: Get latest ramalama version\n        id: get_ramalama_version\n        run: |\n          RAMALAMA_VERSION=$(curl -s https://quay.io/v2/ramalama/ramalama-llama-server/tags/list -s | jq .tags[] | grep -E '^\"[0-9]+\\.[0-9]+\\.[0-9]+\"$' | sort -V | tail -n 1 | tr -d '\"')\n          echo \"RAMALAMA_VERSION=${RAMALAMA_VERSION}\" >> $GITHUB_OUTPUT\n\n      - name: Check if PR already exists\n        id: pr_exists\n        uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0\n        with:\n          script: |\n            const branch = `update-ramalama-references-${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}`;\n            const { data: pulls } = await github.rest.pulls.list({\n              owner: context.repo.owner,\n              repo: context.repo.repo,\n              head: `${context.repo.owner}:${branch}`,\n              state: 'open',\n            });\n            if (pulls.length > 0) {\n              core.setOutput('exists', 'true');\n            } else {\n              core.setOutput('exists', 'false');\n            }\n\n      - name: Update ramalama image references in inference-images.json\n        id: update_digests\n        if: steps.pr_exists.outputs.exists == 'false'\n        run: |\n          bash .github/workflows/update-ramalama-references.sh \"${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}\"\n        continue-on-error: true\n\n      - name: Commit changes\n        if: steps.pr_exists.outputs.exists == 'false' && steps.update_digests.outcome == 'failure'\n        run: |\n          git config --global user.email \"github-actions[bot]@users.noreply.github.com\"\n          git config --global user.name \"github-actions[bot]\"\n          git checkout -b \"update-ramalama-references-${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}\"\n          git add packages/backend/src/assets/inference-images.json\n          git commit -m \"chore: update ramalama image references ${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}\"\n          git push origin \"update-ramalama-references-${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}\"\n\n      - name: Create Pull Request\n        if: steps.pr_exists.outputs.exists == 'false' && steps.update_digests.outcome == 'failure'\n        run: |\n          echo -e \"update ramalama image references to ${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}\" > /tmp/pr-title\n          pullRequestUrl=$(gh pr create --title \"chore: update ramalama image references to ${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}\" --body-file /tmp/pr-title --head \"update-ramalama-references-${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}\" --base \"main\")\n          echo \"📢 Pull request created: ${pullRequestUrl}\"\n          echo \"➡️ Flag the PR as being ready for review\"\n          gh pr ready \"${pullRequestUrl}\"\n        env:\n          GITHUB_TOKEN: ${{ secrets.PODMAN_DESKTOP_BOT_TOKEN }}\n"
  },
  {
    "path": ".gitignore",
    "content": "node_modules\n.DS_Store\ndist\n.eslintcache\n**/coverage\n.idea\noutput\n"
  },
  {
    "path": ".husky/commit-msg",
    "content": "#!/bin/sh\n#\n# Copyright (C) 2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\nset -u\n# avoid [[ which is not POSIX sh.\nif test \"$#\" != 1; then\n  echo \"$0 requires an argument.\"\n  exit 1\nfi\nif test ! -f \"$1\"; then\n  echo \"file does not exist: $1\"\n  exit 1\nfi\npnpm commitlint --edit \"$1\"\nSOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\\(.*>\\).*$/Signed-off-by: \\1/p')\ngrep -qs \"^$SOB\" \"$1\" || echo \"$SOB\" >>\"$1\"\n# Catches duplicate Signed-off-by lines.\ntest \"\" = \"$(grep '^Signed-off-by: ' \"$1\" |\n  sort | uniq -c | sed -e '/^[   ]*1[    ]/d')\" || {\n  echo >&2 Duplicate Signed-off-by lines.\n  exit 1\n}\n"
  },
  {
    "path": ".husky/pre-commit",
    "content": "#\n# Copyright (C) 2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\npnpm lint-staged\n"
  },
  {
    "path": ".npmrc",
    "content": "node-linker=hoisted\n"
  },
  {
    "path": ".prettierrc",
    "content": "{\n  \"svelteSortOrder\" : \"options-styles-scripts-markup\",\n  \"svelteStrictMode\": true,\n  \"svelteAllowShorthand\": false,\n  \"svelteIndentScriptAndStyle\": false,\n  \"bracketSameLine\": true,\n  \"singleQuote\": true,\n  \"arrowParens\": \"avoid\",\n  \"printWidth\": 120,\n  \"trailingComma\": \"all\",\n  \"plugins\": [\"prettier-plugin-svelte\"]\n}\n"
  },
  {
    "path": ".vscode/settings.json",
    "content": "{\n  \"typescript.preferences.importModuleSpecifier\": \"non-relative\"\n}\n"
  },
  {
    "path": "CODE-OF-CONDUCT.md",
    "content": "Podman Desktop Extension AI Lab Project Community Code of Conduct\n\nThe Podman Desktop Extension AI Lab Project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).\n"
  },
  {
    "path": "Containerfile",
    "content": "#\n# Copyright (C) 2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nFROM scratch as builder\nCOPY packages/backend/dist/ /extension/dist\nCOPY packages/backend/package.json /extension/\nCOPY packages/backend/media/ /extension/media\nCOPY LICENSE /extension/\nCOPY packages/backend/icon.png /extension/\nCOPY packages/backend/brain.woff2 /extension/\nCOPY README.md /extension/\nCOPY api/openapi.yaml /extension/api/\n\nFROM scratch\n\nLABEL org.opencontainers.image.title=\"AI Lab\" \\\n        org.opencontainers.image.description=\"AI Lab\" \\\n        org.opencontainers.image.vendor=\"Red Hat\" \\\n        io.podman-desktop.api.version=\">= 1.8.0\"\n\nCOPY --from=builder /extension /extension\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "MIGRATION.md",
    "content": "# Migration guide\n\n## ℹ️ ApplicationCatalog\n\nBefore **Podman AI Lab** `v1.2.0` the [user-catalog](./PACKAGING-GUIDE.md#applicationcatalog) was not versioned.\nStarting from `v1.2.0` the user-catalog require to have a `version` property.\n\n> [!NOTE]\n> The `user-catalog.json` file can be found in `~/.local/share/containers/podman-desktop/extensions-storage/redhat.ai-lab`.\n\nThe list of catalog versions can be found in [packages/backend/src/utils/catalogUtils.ts](https://github.com/containers/podman-desktop-extension-ai-lab/blob/main/packages/backend/src/utils/catalogUtils.ts)\n\nThe catalog has its own version number, as we may not require to update it with every update. It will follow semantic versioning convention.\n\n## `None` to Catalog `1.0`\n\n`None` represents any catalog version prior to the first versioning.\n\nVersion `1.0` of the catalog adds an important property to models `backend`, defining the type of framework required by the model to run (E.g. LLamaCPP, WhisperCPP).\n\n### 🛠️ How to migrate\n\nYou can either delete any existing `user-catalog` by deleting the `~/.local/share/containers/podman-desktop/extensions-storage/redhat.ai-lab/user-catalog.json`.\n\n> [!WARNING]\n> This will remove the models you have imported from the catalog. You will be able to import it again afterward.\n\nIf you want to keep the data, you can migrate it by updating certain properties within the recipes and models fields.\n\n### Recipes\n\nThe recipe object has a new property `backend` which defines which framework is required.\nValue accepted are `llama-cpp`, `whisper-cpp` and `none`.\n\nMoreover, the `models` property has been changed to `recommended`.\n\n> [!TIP]\n> Before Podman AI Lab version v1.2 recipes uses the `models` property to list the models compatible. Now all models using the same `backend` could be used. We introduced `recommended` to highlight certain models.\n\n**Example**\n\n```diff\n{\n  \"version\": \"1.0\",\n  \"recipes\": [{\n    \"id\": \"chatbot\",\n    \"description\" : \"This is a Streamlit chat demo application.\",\n    \"name\" : \"ChatBot\",\n    \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n-   \"models\": [\n+   \"recommended\": [\n      \"hf.instructlab.granite-7b-lab-GGUF\",\n       \"hf.instructlab.merlinite-7b-lab-GGUF\"\n    ]\n+   \"backend\": \"llama-cpp\"\n  }],\n  \"models\": [],\n  \"categories\": []\n}\n```\n\n### Models\n\nThe model object has also the new property `backend`, which defines which framework is required.\nAdditionally, we have enhanced security by introducing a new optional `sha256` property.\n\n> [!TIP]\n> To get the sha256 of a model, you can use the `sha256sum [model-file]` command in a terminal.\n\n**Example**\n\n```diff\n{\n  \"version\": \"1.0\",\n  \"recipes\": [],\n  \"models\": [{\n    \"id\": \"hf.instructlab.granite-7b-lab-GGUF\",\n    \"name\": \"instructlab/granite-7b-lab-GGUF\",\n    \"description\": \"# InstructLab Granite 7B\",\n    \"hw\": \"CPU\",\n    \"registry\": \"Hugging Face\",\n    \"license\": \"Apache-2.0\",\n    \"url\": \"https://huggingface.co/instructlab/granite-7b-lab-GGUF/resolve/main/granite-7b-lab-Q4_K_M.gguf\",\n    \"memory\": 4080218931,\n    \"properties\": {\n      \"chatFormat\": \"openchat\"\n    },\n+   \"sha256\": \"6adeaad8c048b35ea54562c55e454cc32c63118a32c7b8152cf706b290611487\",\n+   \"backend\": \"llama-cpp\"\n  }],\n  \"categories\": []\n}\n```\n"
  },
  {
    "path": "PACKAGING-GUIDE.md",
    "content": "# Packaging guide\n\n## ApplicationCatalog\n\nAI Lab uses an internal catalog embedded within the application. This catalog is loaded\nby AI Lab and displayed when you access the catalog page.\n\nThe format of the catalog is JSON. It is possible for users to have a custom version of\nthe catalog. In order to do so, copy the file located at https://github.com/containers/podman-desktop-extension-ai-lab/blob/main/packages/backend/src/assets/ai.json to $HOME/podman-desktop/ai-lab/catalog.json and AI Lab will use it instead of the embedded one.\nAny change done to this file will also be automatically loaded by AI Lab.\n\n### Format of the catalog file\n\nThe catalog file has three main elements: categories, models and recipes. Each of these elements is\nrepresented in the JSON file as an array.\n\nThe catalog is `versioned`. Current version can be found in [ai.json](https://github.com/containers/podman-desktop-extension-ai-lab/blob/main/packages/backend/src/assets/ai.json#L2).\n\n> :warning: when the version of the catalog is undefined or different from the current, the user-catalog will be ignored.\n\n#### Categories\n\nThis is the top level construct of the catalog UI. Recipes are grouped into categories. A category\nrepresents the kind of AI application. Although the list of categories provided by default by\nAI Lab represents the AI landscape, it is possible to add new categories.\n\nA category has three main attributes: an id (which should be unique among categories), a description\nand a name. The category id attribute will then be used to attach a recipe to one or several categories.\n\n#### Models\n\nThe catalog also lists the models that may be associated to recipes. A model is also a first class\ncitizen in AI Lab as they will be listed in the Models page and can be tested through the playground.\n\nA model has the following attributes:\n- ```id```: a unique identifier for the model\n- ```name```: the model name\n- ```description```: a detailed description about the model\n- ```registry```: the model registry where the model is stored\n- ```popularity```: an integer field giving the rating of the model. Can be thought as the number of stars\n- ```license```: the license under which the model is available\n- ```url```: the URL used to download the model\n- ```memory```: the memory footprint of the model in bytes, as computed by the workflow `.github/workflows/compute-model-sizes.yaml`\n- ```sha256```: the SHA-256 checksum to be used to verify the downloaded model is identical to the original. It is optional and it must be HEX encoded\n\n#### Recipes\n\nA recipe is a sample AI application that is packaged as one or several containers. It is built by AI Lab when the user chooses to download and run it on their workstation. It is provided as\nsource code and AI Lab will make sure the container images are built prior to launching the containers.\n\nA recipe has the following attributes:\n- ```id```: a unique identifier to the recipe\n- ```name```: the recipe name\n- ```description```: a detailed description about the recipe\n- ```repository```: the URL where the recipe code can be retrieved\n- ```ref```: an optional ref in the repository to checkout (a branch name, tag name, or commit full id - short commit id won't be recognized). If not defined, the default branch will be used\n- ```categories```: an array of category id to be associated by this recipe\n- ```basedir```: an optional path within the repository where the ai-lab.yaml file is located. If not provided, the ai-lab.yaml is assumed to be located at the root the repository\n- ```readme```: a markdown description of the recipe\n- ```models```: an array of model id to be associated with this recipe\n\n#### Recipe configuration file\n\nThe configuration file is called ```ai-lab.yaml``` and follows the following syntax.\n\nThe root elements are called ```version``` and ```application```.\n\n```version``` represents the version of the specifications that ai-lab adheres to (so far, the only accepted value here is `v1.0`).\n\n```application``` contains an attribute called ```containers``` whose syntax is an array of objects containing the following attributes:\n- ```name```: the name of the container\n- ```contextdir```: the context directory used to build the container.\n- ```containerfile```: the containerfile used to build the image\n- ```model-service```: a boolean flag used to indicate if the container is running the model or not\n- ```arch```: an optional array of architecture for which this image is compatible with. The values follow the\n[GOARCH specification](https://go.dev/src/go/build/syslist.go)\n- ```gpu-env```: an optional array of GPU environment for which this image is compatible with. The only accepted value here is cuda.\n- ```ports```: an optional array of ports for which the application listens to.\n- `image`: an optional image name to be used when building the container image.\n\nThe container that is running the service (having the ```model-service``` flag equal to ```true```) can use at runtime\nthe model managed by AI Lab through an environment variable ```MODEL_PATH``` whose value is the full path name of the\nmodel file.\n\nBelow is given an example of such a configuration file:\n```yaml\napplication:\n  containers:\n    - name: chatbot-inference-app\n      contextdir: ai_applications\n      containerfile: builds/Containerfile\n    - name: chatbot-model-service\n      contextdir: model_services\n      containerfile: base/Containerfile\n      model-service: true\n      arch:\n        - arm64\n        - amd64\n      ports:\n        - 8001\n      image: quay.io/redhat-et/chatbot-model-service:latest\n    - name: chatbot-model-servicecuda\n      contextdir: model_services\n      containerfile: cuda/Containerfile\n      model-service: true\n      gpu-env:\n        - cuda\n      arch:\n        - amd64\n      ports:\n        - 8501\n      image: quay.io/redhat-et/model_services:latest\n```\n"
  },
  {
    "path": "README.md",
    "content": "# Podman AI Lab\n\nPodman AI Lab is an open source extension for Podman Desktop to work with LLMs (Large Language Models) on a local environment. Featuring a recipe catalog with common AI use cases, a curated set of open source models, and a playground for learning, prototyping and experimentation, Podman AI Lab helps you to quickly and easily get started bringing AI into your applications, without depending on infrastructure beyond your laptop ensuring data privacy and security.\n\n## Topics\n- [Technology](#technology)\n- [Extension features](#extension-features)\n- [Requirements](#requirements)\n- [Installation](#installation)\n- [Usage](#usage)\n- [Contributing](#contributing)\n- [Feedback](#feedback)\n\n## Technology\n\nPodman AI Lab uses [Podman](https://podman.io) machines to run inference servers for LLM models and AI applications.\nThe AI models can be downloaded, and common formats like [GGUF](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md), [Pytorch](https://pytorch.org) or [Tensorflow](https://www.tensorflow.org) are supported.\n\n## Extension features\n\n### AI models\n\nPodman AI Lab provides a curated list of open source AI models and LLMs. Once downloaded, the models are available to be used for AI applications, model services and playgrounds.\n\n#### Model services\n\nOnce a model is downloaded, a model service can be started. A model service is an inference server that is running in a container and exposing the model through the well-known chat API common to many providers.\n\n#### Playgrounds\n\nThe integrated Playground environments allow for experimenting with available models in a local environment. An intuitive user prompt helps in exploring the capabilities and accuracy of various models and aids in finding the best model for the use case at hand. The Playground interface further allows for parameterizing models to further optimize the settings and attributes of each model.\n\n### AI applications\n\nOnce an AI model is available through a well-known endpoint, it's easy to imagine a new world of applications that will connect and use the AI model. Podman AI Lab supports AI applications as a set of containers that are connected together.\n\nPodman AI Lab ships with a so-called Recipes Catalog that helps you navigate a number of core AI use cases and problem domains such as Chat Bots, Code Generators and Text Summarizers. Each recipe comes with detailed explanations and sample applications that can be run with various large language models (LLMs). Experimenting with multiple models allows finding the optimal one for your use case.\n\n## Requirements\n\n### Software\n\n- [Podman Desktop 1.8.0+](https://github.com/containers/podman-desktop)\n- [Podman 4.9.0+](https://github.com/containers/podman)\n- Compatible with Windows, macOS & Linux\n\n### Hardware\n\nLLMs AI models are heavy resource consumers both in terms of memory and CPU. Each of the provided models consumes about 4GiB of memory and requires at least 4 CPUs to run.\n\nWe recommend a minimum of 12GB of memory and at least 4 CPUs for the Podman machine. On Windows, the podman machine shares memory and CPU with all the Windows Subsystem for Linux (WSL) machines. By default, WSL is set to 50% of total memory and all logical processors. This can be changed in the WSL Settings (See [WSL Config](https://learn.microsoft.com/en-us/windows/wsl/wsl-config#wslconfig)).\n\nAs an additional recommended practice, do not run more than 3 models simultaneously.\n\n## Installation\n\nYou can install the Podman AI Lab extension directly inside Podman Desktop.\n\nGo to Extensions > Catalog > Install Podman AI Lab.\n\n![](https://github.com/containers/podman-desktop-media/raw/ai-lab/gifs/install_ai_lab.gif)\n\nTo install a development version, use the `Install custom...` action as shown in the recording below.\n\nThe name of the image to use is `ghcr.io/containers/podman-desktop-extension-ai-lab`. You can get released tags for the image at https://github.com/containers/podman-desktop-extension-ai-lab/pkgs/container/podman-desktop-extension-ai-lab.\n\n![](https://github.com/containers/podman-desktop-media/raw/ai-lab/gifs/install_development_version.gif)\n\n## Usage\n\n1. **Download a model**\n\nLet's select a model from the catalog and download it locally to our workstation.\n\n![](https://github.com/containers/podman-desktop-media/raw/ai-lab/gifs/download-model.gif)\n\n2. **Start an inference server**\n\nOnce a model is available locally, let's start an inference server\n\n![](https://github.com/containers/podman-desktop-media/raw/ai-lab/gifs/start-inference-server.gif)\n\n3. **Start a playground to have a chat conversation with model**\n\n![](https://github.com/containers/podman-desktop-media/raw/ai-lab/gifs/playground.gif)\n\n4. **Start an AI application and use it from the browser**\n\n![](https://github.com/containers/podman-desktop-media/raw/ai-lab/gifs/start-ai-app.gif)\n\n## Contributing\n\nWant to help develop and contribute to Podman AI Lab?\n\nYou can use `pnpm watch --extension-folder` from the Podman Desktop directory to automatically rebuild and test the AI Lab extension:\n\n> **_Note_**: make sure you have the appropriate [prerequisites](https://github.com/containers/podman-desktop/blob/main/CONTRIBUTING.md#prerequisites-prepare-your-environment)\n  installed.\n\n```sh\ngit clone https://github.com/containers/podman-desktop\ngit clone https://github.com/containers/podman-desktop-extension-ai-lab\ncd podman-desktop-extension-ai-lab\ncorepack enable pnpm\npnpm install\npnpm build\ncd ../podman-desktop\npnpm watch --extension-folder ../podman-desktop-extension-ai-lab/packages/backend\n```\n\nIf you are live editing the frontend package, from packages/frontend folder:\n\n```\n$ pnpm watch\n```\n\n### Cleaning up resources\n\nWe'll be adding a way to let a user cleanup their environment: see issue https://github.com/containers/podman-desktop-extension-ai-lab/issues/469.\nFor the time being, please consider the following actions:\n1. Remove the extension from Podman Desktop, from the Settings > Extensions\n2. Remove the running playground environments from the list of Pods\n3. Remove the images built by the recipes\n4. Remove the containers related to AI\n5. Cleanup your local clone of the recipes: `$HOME/podman-desktop/ai-lab`\n\n### 📖 Providing a custom catalog\n\nThe extension provides by default a curated list of recipes, models and categories. However, this system is extensible and you can define your own.\n\nTo enhance the existing catalog, you can create a file located in the extension storage folder `$HOME/.local/share/containers/podman-desktop/extensions-storage/redhat.ai-lab/user-catalog.json`.\n\nIt must follow the same format as the default catalog [in the sources of the extension](https://github.com/containers/podman-desktop-extension-ai-lab/blob/main/packages/backend/src/assets/ai.json).\n\n> :information_source: The default behaviour is to append the items of the user's catalog to the default one.\n\n> :warning: Each item (recipes, models or categories) has a unique id, when conflict between the default catalog and the user one are found, the user's items overwrite the defaults.\n\n### Packaging sample applications\n\nSample applications may be added to the catalog. See [packaging guide](https://github.com/containers/podman-desktop-extension-ai-lab/blob/main/PACKAGING-GUIDE.md) for detailed information.\n\n## Roadmap\n\nThe roadmap is always open and we are looking for your feedback. Please create new issues and upvote on the issues that are feeling the most important for you.\n\nWe will be working on the following items:\n- **Expanded Recipes**: Discover new use cases and samples to inspire and accelerate your applications.\n- **GPU Acceleration**: Speeding up processing times by leveraging GPU acceleration.\n- **API/CLI**: Interact with Podman AI Lab from CLI and APIs.\n- **Enhanced Playgrounds**: Streamlined workflows and UX giving a better space to experiment with LLMs and quickly iterate.\n- **Fine Tuning with [InstructLab](https://instructlab.ai/)**: Re-train LLMs with a set of taxonomy knowledges. Learn more about [the InstructLab project](https://github.com/instructlab).\n- **Enable Function Calling**: Use LLMs to retrieve or interact with external tools by doing API calls.\n- **Local RAG**: Explore RAG pattern, load your document and test behavior of the model.\n- **Bridge with AI Platforms (incl. K8s)**: Connect to remote models and ease deployment of applications.\n\n## Feedback\n\nYou can provide your feedback on the extension with [this form](https://forms.gle/tctQ4RtZSiMyQr3R8) or create [an issue on this repository](https://github.com/containers/podman-desktop-extension-ai-lab/issues).\n"
  },
  {
    "path": "RELEASE.md",
    "content": "# Release process for Podman AI Lab\n\n## Pre-requisites\n\n- Create Enhancement Issue `Release vX.X.X` for current sprint, then update the label to `kind/release` and assign it to yourself.\n- Confirm with Podman Desktop maintainers that pending / need-to-go-in PR's have been merged.\n- Notify main contributors on Discord / Slack.\n\nIn the below example, we will pretend that we're upgrading from `1.1.0` to `1.2.0`. Please use the CORRECT release numbers as these are just example numbers.\n\n## Release timeline\n\nBelow is what a typical release week may look like:\n\n- **Monday (Notify):** 48-hour notification. Communicate to maintainers and public channels a release will be cut on Wednesday and to merge any pending PRs. Inform QE team. Start work on blog post as it is usually the longest part of the release process.\n- **Tuesday (Staging, Testing & Blog):** Stage the release (see instructions below) to create a new cut of the release to test. Test the pre-release (master branch) build briefly. Get feedback from committers (if applicable). Push the blog post for review (as it usually takes a few back-and-forth reviews on documentation).\n- **Wednesday (Release):** Publish the new release on the catalog using the below release process.\n- **Thursday (Post-release Testing & Blog):** Test the post-release build briefly for any critical bugs. Confirm that new release has been pushed to the catalog. Push the blog post live. Get a known issues list together from QE and publish to the Podman Desktop Discussions, link to this from the release notes.\n- **Friday (Communicate):** Friday is statistically the best day for new announcements. Post on internal channels. Post on reddit, hackernews, twitter, etc.\n\n## Releasing on GitHub\n\n1. Go to https://github.com/containers/podman-desktop-extension-ai-lab/actions/workflows/release.yaml\n1. Click on the top right drop-down menu `Run workflow`\n1. Enter the name of the release. Example: `1.2.0` (DO NOT use the v prefix like v1.2.0)\n1. Specify the branch to use for the new release. It's main for all major releases. For a bugfix release, you'll select a different branch.\n1. Click on the `Run workflow` button.\n1. Note: `Run workflow` takes approximately 2-3 minutes.\n1. Close the milestone for the respective release, make sure that all tasks within the milestone are completed / updated before closing. https://github.com/containers/podman-desktop-extension-ai-lab/milestones\n1. If not already created, click on `New Milestone` and create a new milestone for the NEXT release.\n1. Check that https://github.com/containers/podman-desktop-extension-ai-lab/actions/workflows/release.yaml has been completed.\n1. There should be an automated PR that has been created. This will be automatically merged in after all tests have been ran (takes 5-10 minutes). The title looks like `chore: 📢 Bump version to 1.3.0`. Rerun workflow manually if some of e2e tests are failing.\n1. Above PR MUST be merged before continuing with the steps.\n1. Edit the new release https://github.com/containers/podman-desktop-extension-ai-lab/releases/edit/v1.2.0\n1. Select previous tag (v1.1.0) and click on `Generate release notes` and the click on `Update release`\n\n## Test release before it is rolling out.\n\nThe release is a pre-release, it means it is not yet the latest version, so no clients will automatically update to this version.\n\nIt allows QE (and everyone else) to test the release before they it will go live on the catalog.\n\n\n## Next phase\n\n- ❌ All severe bugs and regressions are investigated and discussed. If we agree any should block the release, need to fix the bugs and do a respin of the release with a new .z release like 1.2.1 instead of 1.2.0.\n\nCreate a branch if it does not exist. For example 1.2.x if 1.2.0 failed. Then, cherry-pick bugfixes in that branch.\n\n- ✅ If committers agree we have a green light, proceed. **Do not forget to change the release from 'pre-release' to 'latest release' before proceeding**.\n\n## Updating catalog\n\nPre-requisites:\n\n- Ensure the release is OK (green workflow, image has been published https://github.com/containers/podman-desktop-extension-ai-lab/releases https://github.com/containers/podman-desktop-extension-ai-lab/pkgs/container/podman-desktop-extension-ai-lab).\n\n#### Catalog\n\nCreate and submit a PR to the catalog (https://github.com/containers/podman-desktop-catalog on branch gh-pages). This is manual and will be automated in the future.\n"
  },
  {
    "path": "SECURITY.md",
    "content": "## Security and Disclosure Information Policy for the Podman Desktop Extension AI Lab Project\n\nThe Podman Desktop Extension AI Lab Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.\n"
  },
  {
    "path": "USAGE_DATA.md",
    "content": "# Data Collection\n\nThe AI Lab extension uses telemetry to collect anonymous usage data in order to identify issues and improve our user experience. You can read our privacy statement\n[here](https://developers.redhat.com/article/tool-data-collection).\n\nTelemetry for the extension is based on the Podman Desktop telemetry.\n\nUsers are prompted during Podman Desktop first startup to accept or decline telemetry. This setting can be\nchanged at any time in Settings > Preferences > Telemetry.\n\nOn disk the setting is stored in the `\"telemetry.*\"` keys within the settings file,\nat `$HOME/.local/share/containers/podman-desktop/configuration/settings.json`. A generated anonymous id\nis stored at `$HOME/.redhat/anonymousId`.\n\n## What's included in the telemetry data\n\n- General information, including operating system, machine architecture, and country.\n- When the extension starts and stops.\n- When the icon to enter the extension zone is clicked.\n- When a recipe page is opened (with recipe Id and name).\n- When a sample application is pulled (with recipe Id and name).\n- When a playground is started or stopped (with model Id).\n- When a request is sent to a model in the playground (with model Id, **without** request content).\n- When a model is downloaded or deleted from disk.\n\nNo personally identifiable information is captured. An anonymous id is used so that we can correlate the actions of a user even if we can't tell who they are.\n"
  },
  {
    "path": "api/openapi.yaml",
    "content": "openapi: 3.0.0\ninfo:\n  title: Podman Desktop AI Lab API\n  description: API for interacting with the Podman Desktop AI Lab service.\n  version: 0.0.1\nservers:\n  - url: http://{host}:{port}\n    description: Podman Desktop AI Lab API server\n    variables:\n      host:\n        default: 127.0.0.1\n      port:\n        default: '10434'\n\ntags:\n  - name: server\n    description: Server information\n\npaths:\n  /api/version:\n    get:\n      operationId: getServerVersion\n      tags:\n        - server\n      description: Return the Podman Desktop AI Lab API server version\n      summary: Return the Podman Desktop AI Lab API server version\n      responses:\n        '200':\n          description: The Podman Desktop AI Lab API server version was successfully fetched\n          content:\n            application/json:\n              schema:\n                type: object\n                additionalProperties: false\n                properties:\n                  version:\n                    type: string\n                required:\n                - version\n  /api/tags:\n    get:\n      operationId: getModels\n      tags:\n        - models\n      description: List models that are available locally\n      summary: List models that are available locally\n      responses:\n        '200':\n          description: The models were successfully fetched\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ListResponse'\n  /api/pull:\n    post:\n      operationId: pullModel\n      tags:\n        - models\n      description: |\n        Download a model from the Podman AI Lab catalog.\n      summary: |\n        Download a model from the Podman AI Lab Catalog.\n      requestBody:\n        required: true\n        description: Request to pull a model\n        content:\n          application/json:\n            schema:\n              $ref: '#/components/schemas/PullRequest'\n      responses:\n        '200':\n          description: Model was successfully pulled\n          content:\n            application/x-ndjson:\n              schema:\n                $ref: '#/components/schemas/ProgressResponse'\n\n  /api/show:\n    post:\n      operationId: showModel\n      tags:\n        - models\n      description: |\n        Not implemented, returns an empty object - Show information about a model including details, modelfile, template, \n        parameters, license, and system prompt.\n      summary: |\n        Show information about a model including details, modelfile, template, \n        parameters, license, and system prompt.\n      requestBody:\n        required: true\n        description: Request to show a model\n        content:\n          application/json:\n            schema:\n              $ref: '#/components/schemas/ShowRequest'\n      responses:\n        '200':\n          description: The model's information was successfully fetched\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ShowResponse'\n\n  /api/generate:\n    post:\n      operationId: generateResponse\n      tags:\n        - generate\n      description: |\n        Generate a response for a given prompt with a provided model. This is \n        a streaming endpoint, so there will be a series of responses. The \n        final response object will include statistics and additional data from \n        the request.\n      summary: |\n        Generate a response for a given prompt with a provided model. This is \n        a streaming endpoint, so there will be a series of responses. The final \n        response object will include statistics and additional data from the \n        request.\n      requestBody:\n        required: true\n        description: Request to generate a response\n        content:\n          application/json:\n            schema:\n              $ref: '#/components/schemas/GenerateRequest'\n      responses:\n        '200':\n          description: A response was successfully generated for the prompt\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/GenerateResponse'\n\n  /api/chat:\n    post:\n      operationId: generateChat\n      tags:\n        - chat\n        - generate\n      description: | \n        Generate the next message in a chat with a provided model. This is a \n        streaming endpoint, so there will be a series of responses. Streaming \n        can be disabled using \"stream\": false. The final response object will \n        include statistics and additional data from the request.\n      summary: |\n        Generate the next message in a chat with a provided model. This is a \n        streaming endpoint, so there will be a series of responses. Streaming \n        can be disabled using \"stream\": false. The final response object will \n        include statistics and additional data from the request.\n      requestBody:\n        required: true\n        description: Request to generate a response in a chat\n        content:\n          application/json:\n            schema:\n              $ref: '#/components/schemas/ChatRequest'\n      responses:\n        '200':\n          description: The next message was successfully generated for the chat\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ChatResponse'\n\n  /api/ps:\n    get:\n      operationId: getRunningModels\n      tags:\n        - models\n      description: List running models\n      summary: List running models\n      responses:\n        '200':\n          description: The list of running models was successfully fetched\n          content:\n            application/json:\n              schema:\n                $ref: '#/components/schemas/ProcessResponse'\n\ncomponents:\n  schemas:\n    ListResponse:\n      type: object\n      description: Response from a list request\n      properties:\n        models:\n          type: array\n          items:\n            $ref: '#/components/schemas/ListModelResponse'\n\n    ListModelResponse:\n      type: object\n      description: Response from a list request\n      properties:\n        name:\n          type: string\n        model:\n          type: string\n        modified_at:\n          type: string\n          format: date-time\n        size:\n          type: integer\n        digest:\n          type: string\n        details:\n          $ref: '#/components/schemas/ModelDetails'\n\n    ProcessResponse:\n      type: object\n      description: Response with a list of running models\n      properties:\n        models:\n          type: array\n          items:\n            $ref: '#/components/schemas/ProcessModelResponse'\n\n    ProcessModelResponse:\n      type: object\n      description: Running model description\n      properties:\n        name:\n          type: string\n        model:\n          type: string\n        size:\n          type: integer\n        digest:\n          type: string\n        details:\n          $ref: '#/components/schemas/ModelDetails'\n        expires_at:\n          type: string\n          format: date-time\n        size_vram:\n          type: integer\n\n    ModelDetails:\n      type: object\n      description: Details about a model\n      properties:\n        parent_model:\n          type: string\n        format:\n          type: string\n        family:\n          type: string\n        families:\n          type: array\n          items:\n            type: string\n        parameter_size:\n          type: string\n        quantization_level:\n          type: string\n\n    PullRequest:\n      type: object\n      description: Request to pull a model\n      properties:\n        model:\n          type: string\n          description: The name of the model to pull\n          example: instructlab/granite-7b-lab-GGUF\n        insecure:\n          type: boolean\n          description: |\n            allow insecure connections to the catalog.\n        stream:\n          type: boolean\n          description: |\n            If false the response will be returned as a single response object,\n            rather than a stream of objects\n      required:\n        - model\n\n    ProgressResponse:\n      type: object\n      description: The response returned from various streaming endpoints\n      properties:\n        status:\n          type: string\n          description: The status of the request\n        digest:\n          type: string\n          description: The SHA256 digest of the blob\n        total:\n          type: integer\n          description: The total size of the task\n        completed:\n          type: integer\n          description: The completed size of the task\n\n    ShowRequest:\n      type: object\n      description: Request to show a model\n      properties:\n        model:\n          type: string\n          description: The name of the model to show\n      required:\n        - model\n\n    ShowResponse:\n      type: object\n      description: Response from a show request\n      properties:\n        license:\n          type: string\n          description: The model license\n        modelfile:\n          type: string\n          description: The modelfile content\n        parameters:\n          type: string\n          description: The model parameters\n        template:\n          type: string\n          description: The model template\n        system:\n          type: string\n          description: The model system message/prompt\n        details:\n          $ref: '#/components/schemas/ModelDetails'\n        messages:\n          type: array\n          items:\n            $ref: '#/components/schemas/Message'\n\n    GenerateRequest:\n      type: object\n      description: Request to generate a response\n      properties:\n        model:\n          type: string\n          description: The model name\n        prompt:\n          type: string\n          description: The prompt to generate a response for\n        suffix:\n          type: string\n        images:\n          type: array\n          items:\n            type: string\n            format: byte\n          description: |\n            A list of base64-encoded images (for multimodal models such as \n            llava)        \n        format:\n          type: string\n          description: |\n            The format to return a response in. Currently the only accepted \n            value is json\n        system:\n          type: string\n          description: |\n            System message to (overrides what is defined in the Modelfile)\n        template:\n          type: string\n          description: |\n            The prompt template to use (overrides what is defined in the \n            Modelfile)\n        context:\n          type: array\n          items:\n            type: integer\n          description: |\n            The context parameter returned from a previous request to generate, \n            this can be used to keep a short conversational memory\n          example: []\n        stream:\n          type: boolean\n          description: |\n            If false the response will be returned as a single response object, \n            rather than a stream of objects\n        raw:\n          type: boolean\n          description: |\n            If true no formatting will be applied to the prompt. You may choose \n            to use the raw parameter if you are specifying a full templated \n            prompt in your request to the API\n        keep_alive:\n          $ref: '#/components/schemas/Duration'\n      required:\n        - model\n\n    GenerateResponse:\n      type: object\n      description: Response from a generate request\n      properties:\n        model:\n          type: string\n          description: The model name that generated the response\n        created_at:\n          type: string\n          format: date-time\n          description: Timestamp of the response\n        response:\n          type: string\n          description: |\n            The textual response itself. When done, empty if the response was \n            streamed, if not streamed, this will contain the full response\n        done:\n          type: boolean\n          description: Specifies if the response is complete\n        context:\n          type: array\n          items:\n            type: integer\n          description: |\n            When done, encoding of the conversation used in this response\n        total_duration:\n          type: number\n          description: When done, time spent generating the response\n        load_duration:\n          type: number\n          description: When done, time spent in nanoseconds loading the model\n        prompt_eval_count:\n          type: integer\n          description: When done, number of tokens in the prompt\n        prompt_eval_duration:\n          type: number\n          description: |\n            When done, time spent in nanoseconds evaluating the prompt\n        eval_count:\n          type: integer\n          description: When done, number of tokens in the response\n        eval_duration:\n          type: number\n          description: |\n            When done, time in nanoseconds spent generating the response \n\n    ChatRequest:\n      type: object\n      description: Request to generate a response in a chat\n      properties:\n        model:\n          type: string\n          description: The model name\n        messages:\n          type: array\n          items:\n            $ref: '#/components/schemas/Message'\n          description: Messages of the chat - can be used to keep a chat memory\n        stream:\n          type: boolean\n          description: Enable streaming of returned response\n        format:\n          type: string\n          description: Format to return the response in (e.g. \"json\")\n        keep_alive:\n          $ref: '#/components/schemas/Duration'\n        options:\n          $ref: '#/components/schemas/Options'\n\n    ChatResponse:\n      type: object\n      description: Response from a chat request\n      properties:\n        model:\n          type: string\n          description: The model name\n        created_at:\n          type: string\n          format: date-time\n          description: Timestamp of the response\n        message:\n          $ref: '#/components/schemas/Message'\n        done_reason:\n          type: string\n          description: Reason the model stopped generating text\n        done:\n          type: boolean\n          description: Specifies if the response is complete\n        total_duration:\n          type: number\n          description: Total duration of the request\n        load_duration:\n          type: number\n          description: Load duration of the request\n        prompt_eval_count:\n          type: integer\n          description: Count of prompt evaluations\n        prompt_eval_duration:\n          type: number\n          description: Duration of prompt evaluations\n        eval_count:\n          type: integer\n          description: Count of evaluations\n        eval_duration:\n          type: number\n          description: Duration of evaluations\n\n    Message:\n      type: object\n      description: A message in a chat\n      properties:\n        role:\n          type: string\n        content:\n          type: string\n        images:\n          type: array\n          items:\n            type: string\n            format: byte\n    Duration:\n      type: string\n      description: A string representing the duration\n      example: \"5m\"\n\n    Options:\n      type: object\n      description: |\n        Advanced model and runner options for generation and chat requests\n      properties:\n        num_keep:\n          type: integer\n          description: | \n            Specifies the number of tokens from the beginning of \n            the context ot retain when the context limit is reached. \n            (Default: 4)\n          example: 4\n        seed:\n          type: integer\n          description: |\n            Sets the random number seed to use for generation. Setting this to \n            a specific number will make the model generate the same text for \n            the same prompt. \n            (Default: 0)\n          example: -1\n        num_predict:\n          type: integer\n          description: |\n            Maximum number of tokens to predict when generating text. \n            (Default: 128, -1 = infinite generation, -2 = fill context)\n          example: -1\n        top_k:\n          type: integer\n          description: |\n            Reduces the probability of generating nonsense. A higher value \n            (e.g. 100) will give more diverse answers, while a lower value \n            (e.g. 10) will be more conservative. \n            (Default: 40)\n          example: 40\n        top_p:\n          type: number\n          format: float\n          description: |\n            Works together with top-k. A higher value (e.g., 0.95) will lead to \n            more diverse text, while a lower value (e.g., 0.5) will generate \n            more focused and conservative text. \n            (Default: 0.9)\n          example: 0.9\n        tfs_z:\n          type: number\n          format: float\n          description: |\n            Tail free sampling is used to reduce the impact of less probable \n            tokens from the output. A higher value (e.g., 2.0) will reduce the \n            impact more, while a value of 1.0 disables this setting. \n            (default: 1)\n          example: 1.0\n        typical_p:\n          type: number\n          format: float\n          description: |\n            Controls the selection of typical words based on their probability \n            distribution. A higher value (e.g., 0.95) focuses on more typical \n            words, reducing the chance of unusual words being selected. \n            (Default: 1.0)\n          example: 1.0\n        repeat_last_n:\n          type: integer\n          description: |\n            Sets how far back for the model to look back to prevent repetition. \n            (Default: 64, 0 = disabled, -1 = num_ctx)\n          example: 64\n        temperature:\n          type: number\n          format: float\n          description: |\n            The temperature of the model. Increasing the temperature will make \n            the model answer more creatively. \n            (Default: 0.8)\n          example: 0.8\n        repeat_penalty:\n          type: number\n          format: float\n          description: |\n            Sets how strongly to penalize repetitions. A higher value \n            (e.g., 1.5) will penalize repetitions more strongly, while a lower \n            value (e.g., 0.9) will be more lenient. \n            (Default: 1.1)\n          example: 1.1\n        presence_penalty:\n          type: number\n          format: float\n          description: |\n            Applies a penalty to tokens that have already appeared in the \n            generated text, encouraging the model to introduce new tokens. A \n            higher value increases this penalty, promoting more varied and less \n            repetitive output. \n            (Default: 0.8)\n          example: 0.8\n        frequency_penalty:\n          type: number\n          format: float\n          description: |\n            Penalizes tokens based on their frequency in the generated text so \n            far. A higher value reduces the likelihood of frequent tokens being \n            generated again, promoting more diverse outputs. \n            (Default: 0.8)\n          example: 0.8\n        mirostat:\n          type: number\n          format: float\n          description: |\n            Enable Mirostat sampling for controlling perplexity. \n            (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n          example: 0\n        mirostat_tau:\n          type: number\n          format: float\n          description: |\n            Controls the balance between coherence and diversity of the output.\n            A lower value will result in more focused and coherent text. \n            (Default: 5.0)\n          example: 5.8\n        mirostat_eta:\n          type: number\n          format: float\n          description: | \n            Influences how quickly the algorithm responds to feedback from the \n            generated text. A lower learning rate will result in slower \n            adjustments, while a higher learning rate will make the algorithm \n            more responsive. \n            (Default: 0.1)\n          example: 0.1\n        penalize_newline:\n          type: boolean\n          description: |\n            Determines whether the model should penalize the generation of \n            newlines, which can help control the structure and formatting of \n            the output. \n            (Default: true)\n          example: true\n        stop:\n          type: array\n          items:\n            type: string\n          description: |\n            Sets the stop sequences to use. When this pattern is encountered \n            the LLM will stop generating text and return. Multiple stop patterns \n            may be set by specifying multiple separate stop parameters in a \n            modelfile.\n          example: ['AI assistant.']\n        numa:\n          type: boolean\n          description: |\n            Indicates whether to use Non-Uniform Memory Access (NUMA) for \n            optimizing memory usage and performance on multi-processor systems. \n            (Default: false)\n          example: false\n        num_ctx:\n          type: integer\n          description: |\n            Sets the size of the context window used to generate the next token. \n            (Default: 2048)\n          example: 2048\n        num_batch:\n          type: integer\n          description: |\n            Specifies the number of batches for processing. \n            (Default: 512)\n          example: 512\n        num_gpu:\n          type: integer\n          description: |\n            Specifies the number of GPUs to use. A value of -1 uses all \n            available GPUs. \n            (Default: -1)\n          example: -1\n        main_gpu:\n          type: integer\n          description: |\n            Specifies the primary GPU to use for processing. \n            (Default: 0)\n        low_vram:\n          type: boolean\n          description: | \n            Indicates whether to optimize the model for low VRAM usage. \n            (Default: false)\n          example: false\n        f16_kv:\n          type: boolean\n          description: |\n            Indicates whether to use 16-bit floating point precision for \n            key-value pairs, reducing memory usage. \n            (Default: false)\n          example: true\n        logits_all:\n          type: boolean\n          description: |\n            Specifies whether to output logits for all tokens. \n            (Default: false)\n          example: false\n        vocab_only:\n          type: boolean\n          description: |\n            Indicates whether to only load the vocabulary without the full model. \n            (Default: false)\n          example: false\n        use_mmap:\n          type: boolean\n          description: |\n            Determines whether to use memory-mapped files for loading the model, \n            improving performance on large models. \n            (Default: true)\n          example: true\n        use_mlock:\n          type: boolean\n          description: |\n            Determines whether to use memory locking to prevent swapping the \n            model out of RAM. \n            (Default: false)\n          example: false\n        num_thread:\n          type: integer\n          description: |\n            Specifies the number of threads to use for processing. A value of \n            0 uses all available threads. \n            (Default: 0)\n          example: 0\n"
  },
  {
    "path": "clean.sh",
    "content": "rm -rf node_modules packages/backend/node_modules packages/frontend/node_modules\n"
  },
  {
    "path": "commitlint.config.js",
    "content": "module.exports = { extends: ['@commitlint/config-conventional'] };\n"
  },
  {
    "path": "docs/proposals/ai-studio.md",
    "content": "# Motivation\n\nToday, there is no notion of ordering between the containers. But we know that we have a dependency between\nthe client application and the container that is running the model.\n\nThe second issue is that there is no concept of starting point for a container so today we rely only on the\ncontainer being started by the container engine and we know that this is not adequate for the model service container\n\nSo this is handle by a kind of dirty fix: the containers are all started in parallel but as the client application\nwill fail because the model service is started (as it take a while), so we are trying to restart the client application\nuntil the model service is properly started.\n\nThe purpose of this change is to propose an update to the ai-lab.yaml so that it is as much generic as it\ncould be and inspired from the Compose specification.\n\n## Proposed changes\n\nDefine a condition for the container to be properly started: this would be based on the readinessProbe that can already\nbe defined in a Kubernetes container. In the first iteration, we would support only the ```exec``` field. If\n```readinessProbe``` is defined, then we would check for the healthcheck status field to be ```healthy```\n\nSo the current chatbot file would be updated from:\n\n```yaml\napplication:\n  type: language\n  name: chatbot\n  description: This is a LLM chatbot application that can interact with a llamacpp model-service\n  containers:\n    - name: chatbot-inference-app\n      contextdir: ai_applications\n      containerfile: builds/Containerfile\n    - name: chatbot-model-service\n      contextdir: model_services\n      containerfile: base/Containerfile\n      model-service: true\n      backend:\n        - llama\n      arch:\n        - arm64\n        - amd64\n    - name: chatbot-model-servicecuda\n      contextdir: model_services\n      containerfile: cuda/Containerfile\n      model-service: true\n      backend:\n        - llama\n      gpu-env:\n        - cuda\n      arch:\n        - amd64\n```\n\nto\n\n```yaml\napplication:\n  type: language\n  name: chatbot\n  description: This is a LLM chatbot application that can interact with a llamacpp model-service\n  containers:\n    - name: chatbot-inference-app\n      contextdir: ai_applications\n      containerfile: builds/Containerfile\n      readinessProbe:                           # added\n        exec:                                   # added\n          command:                              # added\n            - curl -f localhost:8080 || exit 1  # added\n    - name: chatbot-model-service\n      contextdir: model_services\n      containerfile: base/Containerfile\n      model-service: true\n      readinessProbe:                           # added\n        exec:                                   # added\n          command:                              # added\n            - curl -f localhost:7860 || exit 1  # added\n      backend:\n        - llama\n      arch:\n        - arm64\n        - amd64\n    - name: chatbot-model-service\n      contextdir: model_services\n      containerfile: cuda/Containerfile\n      model-service: true\n      readinessProbe:                           # added\n        exec:                                   # added\n          command:                              # added\n            - curl -f localhost:7860 || exit 1  # added\n      backend:\n        - llama\n      gpu-env:\n        - cuda\n      arch:\n        - amd64\n```\n\nFrom the Podman Desktop API point of view, this would require extending the\n[ContainerCreateOptions](https://podman-desktop.io/api/interfaces/ContainerCreateOptions) structure to support the\nHealthCheck option.\n"
  },
  {
    "path": "docs/proposals/state-management.md",
    "content": "# State management\n\nThe backend manages and persists the State. The backend pushes new state to the front-end\nwhen changes happen, and the front-end can ask for the current value of the state.\n\nThe front-end uses `readable` stores to expose the state to the different pages. The store\nlistens for new states pushed by the backend (`onMessage`), and asks for the current state\nat initial time.\n\nThe pages of the front-end subscribe to the store to get the value of the state in a reactive manner.\n\n## Catalog\n\nThe catalog is persisted as a file in the user's filesystem. The backend reads the file at startup,\nand watches the file for changes. The backend updates the state as soon as changes it detects changes.\n\nThe front-end uses a `readable` store, which waits for changes on the Catalog state\n(using `onMessage('new-catalog-state', data)`),\nand asks for the current state at startup (with `postMessage('ask-catalog-state')`).\n\nThe interested pages of the front-end subscribe to the store to get the value\nof the Catalog state in a reactive manner.\n\n## Pulled applications\n\nThe front-end initiates the pulling of an application (using `postMessage('pull-application', app-id)`).\n\nThe backend manages and persists the state of the pulled applications and pushes every update\non the state (progression, etc.) (using `postMessage('new-pulled-application-state, app-id, data)`).\n\nThe front-end uses a `readable` store, which waits for changes on the Pulled Applications state\n(using `onMessage('new-pulled-application-state)`), and asks for the current state at startup\n(with `postMessage('ask-pulled-applications-state')`).\n\nThe interested pages of the front-end subscribe to the store to get the value of the Pulled Applications state\nin a reactive manner.\n\n## Errors\n\nThe front-end initiates operations (pull application, etc). When an error happens during an operation,\nthe backend manages and persists the error in a centralized way.\n\nThe backend pushes new errors (using `postMessage('new-error-state', data)`).\nOptionally, it can push errors to the core Podman Desktop, to display errors in the notifications system.\n\nThe front-end uses a `readable` store, which waits for changes on the Errors state (using `onMessage('new-error-state')`),\nand asks for the current state at startup (using `postMessage('ask-error-state)`).\n\nThe interested pages of the front-end subscribe to the store to display the errors related to the page.\n\nThe user can acknowledge an error (using a `postMessage('ack-error', id)`).\n"
  },
  {
    "path": "eslint.config.mjs",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport globals from 'globals';\nimport js from '@eslint/js';\nimport typescriptLint from 'typescript-eslint';\nimport tsParser from '@typescript-eslint/parser';\nimport svelteParser from 'svelte-eslint-parser';\nimport importPlugin from 'eslint-plugin-import';\nimport { fixupConfigRules, fixupPluginRules } from '@eslint/compat';\nimport { fileURLToPath } from 'node:url';\nimport path from 'node:path';\nimport { FlatCompat } from '@eslint/eslintrc';\nimport unicorn from 'eslint-plugin-unicorn';\nimport noNull from 'eslint-plugin-no-null';\nimport sonarjs from 'eslint-plugin-sonarjs';\nimport etc from 'eslint-plugin-etc';\nimport svelte from 'eslint-plugin-svelte';\nimport redundantUndefined from 'eslint-plugin-redundant-undefined';\nimport simpleImportSort from 'eslint-plugin-simple-import-sort';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = path.dirname(__filename);\nconst compat = new FlatCompat({\n  baseDirectory: __dirname,\n  recommendedConfig: js.configs.recommended,\n  allConfig: js.configs.all,\n});\n\nconst TYPESCRIPT_PROJECTS = ['packages/*/tsconfig.json', 'tests/*/tsconfig.json'];\n\nexport default [\n  {\n    ignores: [\n      '*.config.*js',\n      '**/*.config.*js',\n      '**/dist/**/*',\n      '**/test-resources',\n      '**/__mocks__/',\n      '**/coverage/',\n      'packages/backend/media/**',\n      '**/.svelte-kit/',\n      'scripts/**',\n      '**/src-generated/',\n      'tests/playwright/tests/playwright/output/**',\n    ],\n  },\n  js.configs.recommended,\n  ...typescriptLint.configs.recommended,\n  sonarjs.configs.recommended,\n  ...svelte.configs['flat/recommended'],\n  ...fixupConfigRules(\n    compat.extends('plugin:import/recommended', 'plugin:import/typescript', 'plugin:etc/recommended'),\n  ),\n  {\n    plugins: {\n      // compliant v9 plug-ins\n      unicorn,\n      // non-compliant v9 plug-ins\n      etc: fixupPluginRules(etc),\n      import: fixupPluginRules(importPlugin),\n      'no-null': fixupPluginRules(noNull),\n      'redundant-undefined': fixupPluginRules(redundantUndefined),\n      'simple-import-sort': fixupPluginRules(simpleImportSort),\n    },\n    settings: {\n      'import/resolver': {\n        typescript: true,\n        node: true,\n\n        'eslint-import-resolver-custom-alias': {\n          alias: {\n            '/@': './src',\n            '/@gen': './src-generated',\n          },\n\n          extensions: ['.ts'],\n          packages: ['packages/*'],\n        },\n      },\n    },\n  },\n  {\n    linterOptions: {\n      reportUnusedDisableDirectives: 'off',\n    },\n    languageOptions: {\n      globals: {\n        ...globals.node,\n      },\n      // parser: tsParser,\n      sourceType: 'module',\n      parserOptions: {\n        extraFileExtensions: ['.svelte'],\n        warnOnUnsupportedTypeScriptVersion: false,\n        project: TYPESCRIPT_PROJECTS,\n      },\n    },\n  },\n  {\n    rules: {\n      eqeqeq: 'error',\n      'prefer-promise-reject-errors': 'error',\n      semi: ['error', 'always'],\n      'comma-dangle': ['warn', 'always-multiline'],\n\n      quotes: [\n        'error',\n        'single',\n        {\n          allowTemplateLiterals: true,\n        },\n      ],\n\n      '@typescript-eslint/explicit-function-return-type': 'off',\n      '@typescript-eslint/no-unused-vars': ['error', { argsIgnorePattern: '^_', caughtErrors: 'none' }],\n      '@typescript-eslint/no-var-requires': 'off',\n      '@typescript-eslint/consistent-type-imports': 'error',\n      '@typescript-eslint/no-explicit-any': 'error',\n      '@typescript-eslint/await-thenable': 'error',\n      '@typescript-eslint/no-floating-promises': ['error', { ignoreVoid: false }],\n      '@typescript-eslint/no-misused-promises': 'error',\n      '@typescript-eslint/prefer-optional-chain': 'error',\n      '@typescript-eslint/explicit-function-return-type': 'error',\n      '@typescript-eslint/prefer-nullish-coalescing': [\n        'error',\n        {\n          ignoreConditionalTests: true,\n        },\n      ],\n      '@typescript-eslint/no-require-imports': 'off',\n\n      // unicorn custom rules\n      'unicorn/prefer-node-protocol': 'error',\n\n      'no-null/no-null': 'error',\n      'sonarjs/no-empty-function': 'off',\n      'sonarjs/deprecation': 'off',\n      'sonarjs/todo-tag': 'off',\n      'sonarjs/sonar-no-fallthrough': 'off',\n\n      /**\n       * Having a semicolon helps the optimizer interpret your code correctly.\n       * This avoids rare errors in optimized code.\n       * @see https://twitter.com/alex_kozack/status/1364210394328408066\n       */\n      semi: ['error', 'always'],\n      /**\n       * This will make the history of changes in the hit a little cleaner\n       */\n      'comma-dangle': ['warn', 'always-multiline'],\n      /**\n       * Just for beauty\n       */\n      quotes: ['error', 'single', { allowTemplateLiterals: true }],\n\n      // disabled import/namespace rule as the plug-in is not fully compatible using the compat mode\n      'import/namespace': 'off',\n      'import/no-duplicates': 'error',\n      'import/first': 'error',\n      'import/newline-after-import': 'error',\n      'import/no-extraneous-dependencies': 'error',\n      'import/no-unresolved': 'off',\n      'import/default': 'off',\n      'import/no-named-as-default-member': 'off',\n      'import/no-named-as-default': 'off',\n      'sonarjs/cognitive-complexity': 'off',\n      'sonarjs/no-duplicate-string': 'off',\n      'sonarjs/no-empty-collection': 'off',\n      'sonarjs/no-small-switch': 'off',\n      'sonarjs/no-unused-expressions': 'off',\n      'etc/no-commented-out-code': 'error',\n      'etc/no-deprecated': 'off',\n      'etc/no-commented-out-code': 'off',\n      'redundant-undefined/redundant-undefined': 'error',\n      'import/no-extraneous-dependencies': 'error',\n      'import/no-restricted-paths': [\n        'error',\n        {\n          zones: [\n            {\n              target: './packages/backend/**/*',\n              from: ['./packages/frontend/**/*'],\n            },\n            {\n              target: './packages/frontend/**/*',\n              from: ['./packages/backend/**/*'],\n            },\n          ],\n        },\n      ],\n\n      // disabled as code in this project is not yet compliant:\n      'svelte/valid-compile': 'off',\n      'no-undef': 'off',\n    },\n  },\n\n  {\n    files: ['**/*.svelte'],\n\n    languageOptions: {\n      parser: svelteParser,\n      ecmaVersion: 5,\n      sourceType: 'script',\n      parserOptions: {\n        parser: tsParser,\n      },\n    },\n\n    rules: {\n      eqeqeq: 'off',\n      'etc/no-implicit-any-catch': 'off',\n      'no-inner-declarations': 'off',\n      'sonarjs/code-eval': 'off',\n      'sonarjs/different-types-comparison': 'off',\n      'sonarjs/prefer-nullish-coalescing': 'off',\n      'sonarjs/no-nested-template-literals': 'off',\n      'sonarjs/no-nested-conditional': 'off',\n      '@typescript-eslint/no-unused-vars': 'off',\n      '@typescript-eslint/ban-types': 'off',\n      '@typescript-eslint/no-unused-expressions': 'off',\n    },\n  },\n\n  {\n    files: ['packages/frontend/**'],\n    languageOptions: {\n      globals: {\n        ...Object.fromEntries(Object.entries(globals.node).map(([key]) => [key, 'off'])),\n        ...globals.browser,\n      },\n    },\n  },\n\n  {\n    files: ['packages/shared/**'],\n    languageOptions: {\n      globals: {\n        ...Object.fromEntries(Object.entries(globals.node).map(([key]) => [key, 'off'])),\n        ...Object.fromEntries(Object.entries(globals.browser).map(([key]) => [key, 'off'])),\n      },\n    },\n  },\n];\n"
  },
  {
    "path": "package.json",
    "content": "{\n  \"name\": \"ai-lab-monorepo\",\n  \"displayName\": \"ai-lab-monorepo\",\n  \"description\": \"ai-lab-monorepo\",\n  \"publisher\": \"redhat\",\n  \"version\": \"1.10.0-next\",\n  \"license\": \"Apache-2.0\",\n  \"private\": true,\n  \"engines\": {\n    \"node\": \">=24.0.0\",\n    \"npm\": \">=10.2.3\"\n  },\n  \"scripts\": {\n    \"build\": \"concurrently \\\"cd packages/frontend && pnpm run build\\\" \\\"cd packages/backend && pnpm run build\\\"\",\n    \"watch\": \"concurrently \\\"cd packages/frontend && pnpm run watch\\\" \\\"cd packages/backend && pnpm run watch\\\"\",\n    \"format:check\": \"prettier --check \\\"**/src/**/*.{ts,svelte}\\\"\",\n    \"format:fix\": \"prettier --write \\\"**/src/**/*.{ts,svelte}\\\"\",\n    \"lint:check\": \"eslint . --cache\",\n    \"lint:fix\": \"eslint . --cache --fix\",\n    \"svelte:check\": \"svelte-check\",\n    \"test:backend\": \"vitest run -r packages/backend --passWithNoTests --coverage\",\n    \"test:frontend\": \"vitest -c packages/frontend/vite.config.js run packages/frontend --passWithNoTests --coverage\",\n    \"test:shared\": \"vitest run -r packages/shared --passWithNoTests --coverage\",\n    \"test:unit\": \"pnpm run test:backend && pnpm run test:shared && pnpm run test:frontend\",\n    \"test:e2e\": \"cd tests/playwright && pnpm run test:e2e\",\n    \"test:e2e:smoke\": \"cd tests/playwright && pnpm run test:e2e:smoke\",\n    \"test:e2e:instructlab\": \"cd tests/playwright && pnpm run test:e2e:instructlab\",\n    \"typecheck:shared\": \"tsc --noEmit --project packages/shared\",\n    \"typecheck:frontend\": \"tsc --noEmit --project packages/frontend\",\n    \"typecheck:backend\": \"cd packages/backend && pnpm run typecheck\",\n    \"typecheck\": \"pnpm run typecheck:shared && pnpm run typecheck:frontend && pnpm run typecheck:backend\",\n    \"prepare\": \"husky\"\n  },\n  \"resolutions\": {\n    \"string-width\": \"^4.2.0\",\n    \"wrap-ansi\": \"^7.0.0\",\n    \"postman-code-generators\": \"1.10.1\"\n  },\n  \"lint-staged\": {\n    \"*.{js,ts,tsx,svelte}\": [\n      \"eslint --cache --fix\",\n      \"prettier --cache --write\"\n    ],\n    \"*.{md,css,json}\": \"prettier --write\"\n  },\n  \"devDependencies\": {\n    \"@commitlint/cli\": \"^20.5.2\",\n    \"@commitlint/config-conventional\": \"^20.5.0\",\n    \"@eslint/compat\": \"^2.0.5\",\n    \"@typescript-eslint/eslint-plugin\": \"^8.59.1\",\n    \"@typescript-eslint/parser\": \"^8.59.1\",\n    \"@vitest/coverage-v8\": \"^3.2.3\",\n    \"autoprefixer\": \"^10.5.0\",\n    \"commitlint\": \"^20.5.2\",\n    \"concurrently\": \"^9.2.1\",\n    \"eslint\": \"^9.39.2\",\n    \"eslint-import-resolver-custom-alias\": \"^1.3.2\",\n    \"eslint-import-resolver-typescript\": \"^4.3.5\",\n    \"eslint-plugin-etc\": \"^2.0.3\",\n    \"eslint-plugin-import\": \"^2.31.0\",\n    \"eslint-plugin-no-null\": \"^1.0.2\",\n    \"eslint-plugin-redundant-undefined\": \"^1.0.0\",\n    \"eslint-plugin-simple-import-sort\": \"^13.0.0\",\n    \"eslint-plugin-sonarjs\": \"^4.0.3\",\n    \"eslint-plugin-svelte\": \"^3.17.1\",\n    \"eslint-plugin-unicorn\": \"^64.0.0\",\n    \"globals\": \"^17.5.0\",\n    \"husky\": \"^9.1.7\",\n    \"lint-staged\": \"^16.4.0\",\n    \"msw\": \"^2.14.2\",\n    \"prettier\": \"^3.8.3\",\n    \"prettier-plugin-svelte\": \"^3.5.1\",\n    \"svelte-check\": \"^4.4.6\",\n    \"svelte-eslint-parser\": \"^1.6.0\",\n    \"typescript\": \"5.9.3\",\n    \"typescript-eslint\": \"^8.59.1\",\n    \"vite\": \"^7.3.1\",\n    \"vitest\": \"^3.0.5\"\n  },\n  \"workspaces\": {\n    \"packages\": [\n      \"packages/*\",\n      \"tests/*\"\n    ]\n  },\n  \"dependencies\": {\n    \"js-yaml\": \"^4.1.1\",\n    \"zod\": \"^4.3.6\"\n  },\n  \"scarfSettings\": {\n    \"enabled\": false\n  },\n  \"pnpm\": {\n    \"overrides\": {\n      \"postman-collection>semver\": \"^7.5.2\"\n    },\n    \"ignoredBuiltDependencies\": [\n      \"@scarf/scarf\",\n      \"@tailwindcss/oxide\",\n      \"esbuild\",\n      \"postman-code-generators\",\n      \"svelte-preprocess\",\n      \"unrs-resolver\"\n    ]\n  },\n  \"packageManager\": \"pnpm@10.12.4+sha512.5ea8b0deed94ed68691c9bad4c955492705c5eeb8a87ef86bc62c74a26b037b08ff9570f108b2e4dbd1dd1a9186fea925e527f141c648e85af45631074680184\"\n}\n"
  },
  {
    "path": "packages/backend/.gitignore",
    "content": "media\n/src-generated\n"
  },
  {
    "path": "packages/backend/__mocks__/@podman-desktop/api.js",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\n/**\n * Mock the extension API for vitest.\n * This file is referenced from vitest.config.js file.\n */\nconst plugin = {};\nmodule.exports = plugin;\n"
  },
  {
    "path": "packages/backend/package.json",
    "content": "{\n  \"name\": \"ai-lab\",\n  \"displayName\": \"Podman AI Lab\",\n  \"description\": \"Podman AI Lab lets you work with LLMs locally, exploring AI fundamentals, experimenting with models and prompts, and serving models while maintaining data security and privacy.\",\n  \"version\": \"1.10.0-next\",\n  \"icon\": \"icon.png\",\n  \"type\": \"module\",\n  \"publisher\": \"redhat\",\n  \"license\": \"Apache-2.0\",\n  \"engines\": {\n    \"podman-desktop\": \">=1.8.0\"\n  },\n  \"main\": \"./dist/extension.cjs\",\n  \"contributes\": {\n    \"commands\": [\n      {\n        \"command\": \"ai-lab.navigation.inference.start\",\n        \"title\": \"AI Lab: navigate to inference start page\",\n        \"hidden\": true\n      },\n      {\n        \"command\": \"ai-lab.navigation.recipe.start\",\n        \"title\": \"AI Lab: navigate to recipe start page\",\n        \"hidden\": true\n      }\n    ],\n    \"configuration\": {\n      \"title\": \"AI Lab\",\n      \"properties\": {\n        \"ai-lab.models.path\": {\n          \"type\": \"string\",\n          \"format\": \"folder\",\n          \"default\": \"\",\n          \"description\": \"Custom path where to download models. Note: The extension must be restarted for changes to take effect. (Default is blank)\"\n        },\n        \"ai-lab.modelUploadDisabled\": {\n          \"type\": \"boolean\",\n          \"default\": false,\n          \"description\": \"Disable the model upload to the podman machine\",\n          \"hidden\": true\n        },\n        \"ai-lab.experimentalGPU\": {\n          \"type\": \"boolean\",\n          \"default\": false,\n          \"description\": \"Experimental GPU support for inference servers\"\n        },\n        \"ai-lab.apiPort\": {\n          \"type\": \"number\",\n          \"default\": 10434,\n          \"minimum\": 1024,\n          \"maximum\": 65535,\n          \"description\": \"Port on which the API is listening (requires restart of extension)\"\n        },\n        \"ai-lab.inferenceRuntime\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"all\",\n            \"llama-cpp\",\n            \"whisper-cpp\",\n            \"none\"\n          ],\n          \"description\": \"Choose the default inferencing runtime for AI Lab\"\n        },\n        \"ai-lab.experimentalTuning\": {\n          \"type\": \"boolean\",\n          \"default\": false,\n          \"description\": \"Display InstructLab Tuning screens (experimental)\",\n          \"hidden\": true\n        },\n        \"ai-lab.showGPUPromotion\": {\n          \"type\": \"boolean\",\n          \"default\": true,\n          \"description\": \"Display GPU promotion banner\",\n          \"hidden\": true\n        }\n      }\n    },\n    \"icons\": {\n      \"brain-icon\": {\n        \"description\": \"Brain icon\",\n        \"default\": {\n          \"fontPath\": \"brain.woff2\",\n          \"fontCharacter\": \"\\\\E001\"\n        }\n      }\n    },\n    \"views\": {\n      \"icons/containersList\": [\n        {\n          \"when\": \"ai-lab-model-id in containerLabelKeys\",\n          \"icon\": \"${brain-icon}\"\n        }\n      ],\n      \"icons/image\": [\n        {\n          \"when\": \"ai-lab-recipe-id in imageLabelKeys\",\n          \"icon\": \"${brain-icon}\"\n        }\n      ]\n    }\n  },\n  \"scripts\": {\n    \"generate\": \"npx openapi-typescript ../../api/openapi.yaml -o src-generated/openapi.ts\",\n    \"build\": \"pnpm run generate && vite build\",\n    \"test\": \"vitest run --coverage\",\n    \"test:watch\": \"vitest watch --coverage\",\n    \"format:check\": \"prettier --check \\\"src/**/*.ts\\\"\",\n    \"format:fix\": \"prettier --write \\\"src/**/*.ts\\\"\",\n    \"watch\": \"pnpm run generate && npx vite --mode development build -w\",\n    \"typecheck\": \"pnpm run generate && tsc --noEmit\"\n  },\n  \"dependencies\": {\n    \"@ai-sdk/mcp\": \"^1.0.36\",\n    \"@ai-sdk/openai-compatible\": \"^2.0.42\",\n    \"@huggingface/gguf\": \"^0.4.2\",\n    \"@huggingface/hub\": \"^2.11.0\",\n    \"ai\": \"^6.0.168\",\n    \"express\": \"^5.2.1\",\n    \"express-openapi-validator\": \"^5.6.2\",\n    \"isomorphic-git\": \"^1.37.6\",\n    \"js-yaml\": \"^4.1.1\",\n    \"mustache\": \"^4.2.0\",\n    \"openai\": \"^6.35.0\",\n    \"postman-code-generators\": \"^1.14.1\",\n    \"postman-collection\": \"^5.3.0\",\n    \"semver\": \"^7.7.4\",\n    \"swagger-ui-dist\": \"^5.32.5\",\n    \"swagger-ui-express\": \"^5.0.1\",\n    \"systeminformation\": \"^5.31.5\",\n    \"xml-js\": \"^1.6.11\"\n  },\n  \"devDependencies\": {\n    \"@podman-desktop/api\": \"1.13.0-202409181313-78725a6565\",\n    \"@ai-sdk/provider\": \"^3.0.8\",\n    \"@ai-sdk/provider-utils\": \"^4.0.24\",\n    \"@rollup/plugin-replace\": \"^6.0.3\",\n    \"@types/express\": \"^5.0.6\",\n    \"@types/js-yaml\": \"^4.0.9\",\n    \"@types/mustache\": \"^4.2.6\",\n    \"@types/node\": \"^24\",\n    \"@types/postman-collection\": \"^3.5.11\",\n    \"@types/supertest\": \"^7.2.0\",\n    \"@types/swagger-ui-dist\": \"^3.30.5\",\n    \"@types/swagger-ui-express\": \"^4.1.8\",\n    \"openapi-typescript\": \"^7.13.0\",\n    \"supertest\": \"^7.2.2\",\n    \"vitest\": \"^3.0.5\"\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/assets/ai.json",
    "content": "{\n  \"version\": \"1.0\",\n  \"recipes\": [\n    {\n      \"id\": \"chatbot\",\n      \"description\": \"This recipe provides a blueprint for developers to create their own AI-powered chat applications using Streamlit.\",\n      \"name\": \"ChatBot\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/chatbot\",\n      \"readme\": \"# Chat Application\\n\\n  This recipe helps developers start building their own custom LLM enabled chat applications. It consists of two main components: the Model Service and the AI Application.\\n\\n  There are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\\n\\n  The AI Application will connect to the Model Service via its OpenAI compatible API. The recipe relies on [Langchain's](https://python.langchain.com/docs/get_started/introduction) python package to simplify communication with the Model Service and uses [Streamlit](https://streamlit.io/) for the UI layer. You can find an example of the chat application below.\\n\\n![](/assets/chatbot_ui.png) \\n\\n\\n## Try the Chat Application\\n\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Chatbot` and follow the instructions to start the application.\\n\\n# Build the Application\\n\\nThe rest of this document will explain how to build and run the application from the terminal, and will\\ngo into greater detail on how each container in the Pod above is built, run, and \\nwhat purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\\n\\n\\nThis application requires a model, a model service and an AI inferencing application.\\n\\n* [Quickstart](#quickstart)\\n* [Download a model](#download-a-model)\\n* [Build the Model Service](#build-the-model-service)\\n* [Deploy the Model Service](#deploy-the-model-service)\\n* [Build the AI Application](#build-the-ai-application)\\n* [Deploy the AI Application](#deploy-the-ai-application)\\n* [Interact with the AI Application](#interact-with-the-ai-application)\\n* [Embed the AI Application in a Bootable Container Image](#embed-the-ai-application-in-a-bootable-container-image)\\n\\n\\n## Quickstart\\nTo run the application with pre-built images from `quay.io/ai-lab`, use `make quadlet`. This command\\nbuilds the application's metadata and generates Kubernetes YAML at `./build/chatbot.yaml` to spin up a Pod that can then be launched locally.\\nTry it with:\\n\\n```\\nmake quadlet\\npodman kube play build/chatbot.yaml\\n```\\n\\nThis will take a few minutes if the model and model-server container images need to be downloaded. \\nThe Pod is named `chatbot`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\\n\\n```\\npodman pod list\\npodman ps\\n```\\n\\nOnce the Pod and its containers are running, the application can be accessed at `http://localhost:8501`. \\nPlease refer to the section below for more details about [interacting with the chatbot application](#interact-with-the-ai-application).\\n\\nTo stop and remove the Pod, run:\\n\\n```\\npodman pod stop chatbot\\npodman pod rm chatbot\\n```\\n\\n## Download a model\\n\\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct). This is a well\\nperformant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted\\nand quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of\\nways to get a GGUF version of granite-3.3-8b-instruct, but the simplest is to download a pre-converted one from\\n[huggingface.co](https://huggingface.co) here: https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF.\\n\\nThe recommended model can be downloaded using the code snippet below:\\n\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\\ncd ../recipes/natural_language_processing/chatbot\\n```\\n\\n_A full list of supported open models is forthcoming._  \\n\\n\\n## Build the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the\\n[llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\\n\\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake build\\n```\\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\\n\\n## Deploy the Model Service\\n\\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake run\\n```\\n\\n## Build the AI Application\\n\\nThe AI Application can be built from the make command:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/chatbot from repo containers/ai-lab-recipes)\\nmake build\\n```\\n\\n## Deploy the AI Application\\n\\nMake sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/chatbot from repo containers/ai-lab-recipes)\\nmake run \\n```\\n\\n## Interact with the AI Application\\n\\nEverything should now be up an running with the chat application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled chatbot applications.   \\n\\n## Embed the AI Application in a Bootable Container Image\\n\\nTo build a bootable container image that includes this sample chatbot workload as a service that starts when a system is booted, run: `make -f Makefile bootc`. You can optionally override the default image / tag you want to give the make command by specifying it as follows: `make -f Makefile BOOTC_IMAGE=<your_bootc_image> bootc`.\\n\\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\\n\\n```bash\\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 bootc\\n```\\n\\nSelecting the ARCH for the bootc/Containerfile is simple using the Makefile ARCH= variable.\\n\\n```\\nmake ARCH=x86_64 bootc\\n```\\n\\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\\nwith the chatbot application, it's as simple as ssh-ing into the bootc system and running:\\n\\n```bash\\nbootc switch quay.io/ai-lab/chatbot-bootc:latest\\n```\\n\\nUpon a reboot, you'll see that the chatbot service is running on the system. Check on the service with:\\n\\n```bash\\nssh user@bootc-system-ip\\nsudo systemctl status chatbot\\n```\\n\\n### What are bootable containers?\\n\\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\\n\\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than\\nat _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system.\\nBootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization\\ntools. Might I suggest [podman](https://podman.io/)?\\n\\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI\\nimage registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think\\nfactories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\\n\\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\\n\\n#### Creating bootable disk images\\n\\nYou can convert a bootc image to a bootable disk image using the\\n[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\\n\\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\\n\\nDefault image types can be set via the DISK_TYPE Makefile variable.\\n\\n`make bootc-image-builder DISK_TYPE=ami`\\n\",\n      \"recommended\": [\n        \"hf.ibm-granite.granite-4.0-micro-GGUF\",\n        \"hf.ibm-granite.granite-4.0-tiny-GGUF\",\n        \"hf.ibm-granite.granite-3.3-8b-instruct-GGUF\",\n        \"hf.ibm-research.granite-3.2-8b-instruct-GGUF\"\n      ],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"python\"],\n      \"frameworks\": [\"streamlit\", \"langchain\"]\n    },\n    {\n      \"id\": \"chatbot-pydantic-ai\",\n      \"description\": \"This recipe provides a blueprint for developers to create their own AI-powered chat applications with the pydantic framework using Streamlit\",\n      \"name\": \"Chatbot PydanticAI\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/chatbot-pydantic-ai\",\n      \"readme\": \"# Chatbot Pydantic Application\\n\\n  This recipe helps developers start building their own custom LLM enabled chat applications. It consists of two main components: the Model Service and the AI Application.\\n\\n  There are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\\n\\n  The AI Application will connect to the Model Service via its OpenAI compatible API. The recipe relies on [Langchain's](https://python.langchain.com/docs/get_started/introduction) python package to simplify communication with the Model Service and uses [Streamlit](https://streamlit.io/) for the UI layer. You can find an example of the chat application below.\\n\\n![](/assets/chatbot_ui.png) \\n\\n\\n## Try the Chat Application\\n\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Chatbot Pydantic AI` and follow the instructions to start the application.\\n\\n# Build the Application\\n\\nThe rest of this document will explain how to build and run the application from the terminal, and will\\ngo into greater detail on how each container in the Pod above is built, run, and \\nwhat purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\\n\\n\\nThis application requires a model, a model service and an AI inferencing application.\\n\\n* [Quickstart](#quickstart)\\n* [Download a model](#download-a-model)\\n* [Build the Model Service](#build-the-model-service)\\n* [Deploy the Model Service](#deploy-the-model-service)\\n* [Build the AI Application](#build-the-ai-application)\\n* [Deploy the AI Application](#deploy-the-ai-application)\\n* [Interact with the AI Application](#interact-with-the-ai-application)\\n* [Embed the AI Application in a Bootable Container Image](#embed-the-ai-application-in-a-bootable-container-image)\\n\\n\\n## Quickstart\\nTo run the application with pre-built images from `quay.io/ai-lab`, use `make quadlet`. This command\\nbuilds the application's metadata and generates Kubernetes YAML at `./build/chatbot-pydantic-ai.yaml` to spin up a Pod that can then be launched locally.\\nTry it with:\\n\\n```\\nmake quadlet\\npodman kube play build/chatbot-pydantic-ai.yaml\\n```\\n\\nThis will take a few minutes if the model and model-server container images need to be downloaded. \\nThe Pod is named `chatbot-pydantic-ai`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\\n\\n```\\npodman pod list\\npodman ps\\n```\\n\\nOnce the Pod and its containers are running, the application can be accessed at `http://localhost:8501`. \\nPlease refer to the section below for more details about [interacting with the chatbot-pydantic-ai application](#interact-with-the-ai-application).\\n\\nTo stop and remove the Pod, run:\\n\\n```\\npodman pod stop chatbot-pydantic-ai\\npodman pod rm chatbot-pydantic-ai\\n```\\n\\n## Download a model\\n\\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct). This is a well\\nperformant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted\\nand quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of\\nways to get a GGUF version of granite-3.3-8b-instruct, but the simplest is to download a pre-converted one from\\n[huggingface.co](https://huggingface.co) here: https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF.\\n\\nThe recommended model can be downloaded using the code snippet below:\\n\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\\ncd ../recipes/natural_language_processing/chatbot-pydantic-ai\\n```\\n\\n_A full list of supported open models is forthcoming._  \\n\\n\\n## Build the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the\\n[llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\\n\\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake build\\n```\\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\\n\\n## Deploy the Model Service\\n\\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake run\\n```\\n\\n## Build the AI Application\\n\\nThe AI Application can be built from the make command:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/chatbot-pydantic-ai from repo containers/ai-lab-recipes)\\nmake build\\n```\\n\\n## Deploy the AI Application\\n\\nMake sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/chatbot-pydantic-ai from repo containers/ai-lab-recipes)\\nmake run \\n```\\n\\n## Interact with the AI Application\\n\\nEverything should now be up an running with the chat application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled chatbot-pydantic-ai applications.   \\n\\n## Embed the AI Application in a Bootable Container Image\\n\\nTo build a bootable container image that includes this sample chatbot-pydantic-ai workload as a service that starts when a system is booted, run: `make -f Makefile bootc`. You can optionally override the default image / tag you want to give the make command by specifying it as follows: `make -f Makefile BOOTC_IMAGE=<your_bootc_image> bootc`.\\n\\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\\n\\n```bash\\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 bootc\\n```\\n\\nSelecting the ARCH for the bootc/Containerfile is simple using the Makefile ARCH= variable.\\n\\n```\\nmake ARCH=x86_64 bootc\\n```\\n\\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\\nwith the chatbot-pydantic-ai application, it's as simple as ssh-ing into the bootc system and running:\\n\\n```bash\\nbootc switch quay.io/ai-lab/chatbot-pydantic-ai-bootc:latest\\n```\\n\\nUpon a reboot, you'll see that the chatbot-pydantic-ai service is running on the system. Check on the service with:\\n\\n```bash\\nssh user@bootc-system-ip\\nsudo systemctl status chatbot-pydantic-ai\\n```\\n\\n### What are bootable containers?\\n\\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\\n\\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than\\nat _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system.\\nBootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization\\ntools. Might I suggest [podman](https://podman.io/)?\\n\\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI\\nimage registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think\\nfactories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\\n\\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\\n\\n#### Creating bootable disk images\\n\\nYou can convert a bootc image to a bootable disk image using the\\n[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\\n\\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\\n\\nDefault image types can be set via the DISK_TYPE Makefile variable.\\n\\n`make bootc-image-builder DISK_TYPE=ami`\\n\",\n      \"recommended\": [],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"python\"],\n      \"frameworks\": [\"streamlit\", \"PydanticAI\"]\n    },\n    {\n      \"id\": \"agents\",\n      \"description\": \"This recipe shows how ReAct can be used to create an intelligent music discovery assistant with Spotify API.\",\n      \"name\": \"ReAct Agent Application\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/agents\",\n      \"readme\": \"# ReAct Agent Application\\n\\n This recipe demonstrates the ReAct (Reasoning and Acting) framework in action through a music exploration application. ReAct enables AI to think step-by-step about tasks, take appropriate actions, and provide reasoned responses. The application shows how ReAct can be used to create an intelligent music discovery assistant that combines reasoning with Spotify API interactions.\\nThe application utilizes [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) for the Model Service and integrates with Spotify's API for music data. The recipe uses [Langchain](https://python.langchain.com/docs/get_started/introduction) for the ReAct implementation and [Streamlit](https://streamlit.io/) for the UI layer.\\n\\n## Spotify API Access\\nTo use this application, you'll need Spotify API credentials (follow the link here for documentation https://developer.spotify.com/documentation/web-api):\\n- Create a Spotify Developer account\\n- Create an application in the Spotify Developer Dashboard (https://developer.spotify.com/documentation/web-api/concepts/apps dont worry about adding web/redirect url use the defaults)\\n- Get your Client ID and Client Secret once the app is created (https://developer.spotify.com/dashboard)\\n\\nThese can be provided through environment variables or the application's UI.\\n\\n## Try the ReAct Agent Application\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `ReAct Agent` and follow the instructions to start the application.\\n\\n# Build the Application\\nThe rest of this document will explain how to build and run the application from the terminal, and will go into greater detail on how each container in the Pod above is built, run, and what purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\\n\\n## Download a model\\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct). This is a well performant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted and quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of ways to get a GGUF version of granite-3.3-8b-instruct, but the simplest is to download a pre-converted one from [huggingface.co](https://huggingface.co) here: https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF.\\nThe recommended model can be downloaded using the code snippet below:\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\\ncd ../recipes/natural_language_processing/agents\\n```\\n_A full list of supported open models is forthcoming._ \\n\\n## Build the Model Service\\nThe complete instructions for building and deploying the Model Service can be found in the [llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake build\\n```\\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\\n\\n## Deploy the Model Service\\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake run\\n```\\n\\n## Build the AI Application\\nThe AI Application can be built from the make command:\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/agents from repo containers/ai-lab-recipes)\\nmake build\\n```\\n\\n## Deploy the AI Application\\nMake sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following:\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/agents from repo containers/ai-lab-recipes)\\nmake run \\n```\\n\\n## Interact with the AI Application\\nEverything should now be up an running with the chat application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled applications.\",\n      \"recommended\": [\n        \"hf.ibm-granite.granite-4.0-micro-GGUF\",\n        \"hf.ibm-granite.granite-4.0-tiny-GGUF\",\n        \"hf.ibm-granite.granite-3.3-8b-instruct-GGUF\",\n        \"hf.ibm-research.granite-3.2-8b-instruct-GGUF\"\n      ],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"python\"],\n      \"frameworks\": [\"streamlit\", \"langchain\"]\n    },\n    {\n      \"id\": \"summarizer\",\n      \"description\": \"This recipe guides into creating custom LLM-powered summarization applications using Streamlit.\",\n      \"name\": \"Summarizer\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/summarizer\",\n      \"readme\": \"# Text Summarizer Application\\n\\n  This recipe helps developers start building their own custom LLM enabled summarizer applications. It consists of two main components: the Model Service and the AI Application.\\n\\n  There are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\\n\\n  The AI Application will connect to the Model Service via its OpenAI compatible API. The recipe relies on [Langchain's](https://python.langchain.com/docs/get_started/introduction) python package to simplify communication with the Model Service and uses [Streamlit](https://streamlit.io/) for the UI layer. You can find an example of the summarizer application below.\\n\\n![](/assets/summarizer_ui.png) \\n\\n\\n## Try the Summarizer Application\\n\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Summarizer` and follow the instructions to start the application.\\n\\n# Build the Application\\n\\nThe rest of this document will explain how to build and run the application from the terminal, and will\\ngo into greater detail on how each container in the Pod above is built, run, and \\nwhat purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\\n\\n\\nThis application requires a model, a model service and an AI inferencing application.\\n\\n* [Quickstart](#quickstart)\\n* [Download a model](#download-a-model)\\n* [Build the Model Service](#build-the-model-service)\\n* [Deploy the Model Service](#deploy-the-model-service)\\n* [Build the AI Application](#build-the-ai-application)\\n* [Deploy the AI Application](#deploy-the-ai-application)\\n* [Interact with the AI Application](#interact-with-the-ai-application)\\n* [Embed the AI Application in a Bootable Container Image](#embed-the-ai-application-in-a-bootable-container-image)\\n\\n\\n## Quickstart\\nTo run the application with pre-built images from `quay.io/ai-lab`, use `make quadlet`. This command\\nbuilds the application's metadata and generates Kubernetes YAML at `./build/summarizer.yaml` to spin up a Pod that can then be launched locally.\\nTry it with:\\n\\n```\\nmake quadlet\\npodman kube play build/summarizer.yaml\\n```\\n\\nThis will take a few minutes if the model and model-server container images need to be downloaded. \\nThe Pod is named `summarizer`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\\n\\n```\\npodman pod list\\npodman ps\\n```\\n\\nOnce the Pod and its containers are running, the application can be accessed at `http://localhost:8501`. \\nPlease refer to the section below for more details about [interacting with the summarizer application](#interact-with-the-ai-application).\\n\\nTo stop and remove the Pod, run:\\n\\n```\\npodman pod stop summarizer\\npodman pod rm summarizer\\n```\\n\\n## Download a model\\n\\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct). This is a well\\nperformant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted\\nand quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of\\nways to get a GGUF version of granite-3.3-8b-instruct, but the simplest is to download a pre-converted one from\\n[huggingface.co](https://huggingface.co) here: https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF.\\n\\nThe recommended model can be downloaded using the code snippet below:\\n\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\\ncd ../recipes/natural_language_processing/summarizer\\n```\\n\\n_A full list of supported open models is forthcoming._  \\n\\n\\n## Build the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the\\n[llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\\n\\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake build\\n```\\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\\n\\n## Deploy the Model Service\\n\\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake run\\n```\\n\\n## Build the AI Application\\n\\nThe AI Application can be built from the make command:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/summarizer from repo containers/ai-lab-recipes)\\nmake build\\n```\\n\\n## Deploy the AI Application\\n\\nMake sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/summarizer from repo containers/ai-lab-recipes)\\nmake run \\n```\\n\\n## Interact with the AI Application\\n\\nEverything should now be up an running with the summarizer application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled summarizer applications.   \\n\\n## Embed the AI Application in a Bootable Container Image\\n\\nTo build a bootable container image that includes this sample summarizer workload as a service that starts when a system is booted, run: `make -f Makefile bootc`. You can optionally override the default image / tag you want to give the make command by specifying it as follows: `make -f Makefile BOOTC_IMAGE=<your_bootc_image> bootc`.\\n\\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\\n\\n```bash\\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 bootc\\n```\\n\\nSelecting the ARCH for the bootc/Containerfile is simple using the Makefile ARCH= variable.\\n\\n```\\nmake ARCH=x86_64 bootc\\n```\\n\\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\\nwith the summarizer application, it's as simple as ssh-ing into the bootc system and running:\\n\\n```bash\\nbootc switch quay.io/ai-lab/summarizer-bootc:latest\\n```\\n\\nUpon a reboot, you'll see that the summarizer service is running on the system. Check on the service with:\\n\\n```bash\\nssh user@bootc-system-ip\\nsudo systemctl status summarizer\\n```\\n\\n### What are bootable containers?\\n\\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\\n\\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than\\nat _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system.\\nBootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization\\ntools. Might I suggest [podman](https://podman.io/)?\\n\\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI\\nimage registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think\\nfactories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\\n\\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\\n\\n#### Creating bootable disk images\\n\\nYou can convert a bootc image to a bootable disk image using the\\n[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\\n\\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\\n\\nDefault image types can be set via the DISK_TYPE Makefile variable.\\n\\n`make bootc-image-builder DISK_TYPE=ami`\\n\",\n      \"recommended\": [\n        \"hf.ibm-granite.granite-4.0-micro-GGUF\",\n        \"hf.ibm-granite.granite-4.0-tiny-GGUF\",\n        \"hf.ibm-granite.granite-3.3-8b-instruct-GGUF\",\n        \"hf.ibm-research.granite-3.2-8b-instruct-GGUF\"\n      ],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"python\"],\n      \"frameworks\": [\"streamlit\", \"langchain\"]\n    },\n\n    {\n      \"id\": \"codegeneration\",\n      \"description\": \"This recipes showcases how to leverage LLM to build your own custom code generation application.\",\n      \"name\": \"Code Generation\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"generator\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/codegen\",\n      \"readme\": \"# Code Generation Application\\n\\n  This recipe helps developers start building their own custom LLM enabled code generation applications. It consists of two main components: the Model Service and the AI Application.\\n\\n  There are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\\n\\n  The AI Application will connect to the Model Service via its OpenAI compatible API. The recipe relies on [Langchain's](https://python.langchain.com/docs/get_started/introduction) python package to simplify communication with the Model Service and uses [Streamlit](https://streamlit.io/) for the UI layer. You can find an example of the code generation application below.\\n\\n![](/assets/codegen_ui.png) \\n\\n\\n## Try the Code Generation Application\\n\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Code Generation` and follow the instructions to start the application.\\n\\n# Build the Application\\n\\nThe rest of this document will explain how to build and run the application from the terminal, and will\\ngo into greater detail on how each container in the Pod above is built, run, and \\nwhat purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\\n\\n\\nThis application requires a model, a model service and an AI inferencing application.\\n\\n* [Quickstart](#quickstart)\\n* [Download a model](#download-a-model)\\n* [Build the Model Service](#build-the-model-service)\\n* [Deploy the Model Service](#deploy-the-model-service)\\n* [Build the AI Application](#build-the-ai-application)\\n* [Deploy the AI Application](#deploy-the-ai-application)\\n* [Interact with the AI Application](#interact-with-the-ai-application)\\n* [Embed the AI Application in a Bootable Container Image](#embed-the-ai-application-in-a-bootable-container-image)\\n\\n\\n## Quickstart\\nTo run the application with pre-built images from `quay.io/ai-lab`, use `make quadlet`. This command\\nbuilds the application's metadata and generates Kubernetes YAML at `./build/codegen.yaml` to spin up a Pod that can then be launched locally.\\nTry it with:\\n\\n```\\nmake quadlet\\npodman kube play build/codegen.yaml\\n```\\n\\nThis will take a few minutes if the model and model-server container images need to be downloaded. \\nThe Pod is named `codegen`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\\n\\n```\\npodman pod list\\npodman ps\\n```\\n\\nOnce the Pod and its containers are running, the application can be accessed at `http://localhost:8501`. \\nPlease refer to the section below for more details about [interacting with the codegen application](#interact-with-the-ai-application).\\n\\nTo stop and remove the Pod, run:\\n\\n```\\npodman pod stop codegen\\npodman pod rm codgen\\n```\\n\\n## Download a model\\n\\nIf you are just getting started, we recommend using [granite-8b-code-instruct](https://huggingface.co/ibm-granite/granite-8b-code-instruct-4k). This is a well\\nperformant mid-sized model with an apache-2.0 license fine tuned for code generation. In order to use it with our Model Service we need it converted\\nand quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of\\nways to get a GGUF version of granite-8b-code-instruct, but the simplest is to download a pre-converted one from\\n[huggingface.co](https://huggingface.co) here:https://huggingface.co/ibm-granite/granite-8b-code-instruct-4k-GGUF.\\n\\nThere are a number of options for quantization level, but we recommend `Q4_K_M`. \\n\\nThe recommended model can be downloaded using the code snippet below:\\n\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ibm-granite/granite-8b-code-instruct-4k-GGUF/resolve/main/granite-8b-code-instruct.Q4_K_M.gguf\\ncd ../recipes/natural_language_processing/codgen\\n```\\n\\n_A full list of supported open models is forthcoming._  \\n\\n\\n## Build the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the\\n[llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\\n\\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake build\\n```\\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\\n\\n## Deploy the Model Service\\n\\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake run\\n```\\n\\n## Build the AI Application\\n\\nThe AI Application can be built from the make command:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/codegen from repo containers/ai-lab-recipes)\\nmake build\\n```\\n\\n## Deploy the AI Application\\n\\nMake sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/codegen from repo containers/ai-lab-recipes)\\nmake run \\n```\\n\\n## Interact with the AI Application\\n\\nEverything should now be up an running with the code generation application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled code generation applications.   \\n\\n## Embed the AI Application in a Bootable Container Image\\n\\nTo build a bootable container image that includes this sample code generation workload as a service that starts when a system is booted, run: `make -f Makefile bootc`. You can optionally override the default image / tag you want to give the make command by specifying it as follows: `make -f Makefile BOOTC_IMAGE=<your_bootc_image> bootc`.\\n\\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\\n\\n```bash\\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 bootc\\n```\\n\\nSelecting the ARCH for the bootc/Containerfile is simple using the Makefile ARCH= variable.\\n\\n```\\nmake ARCH=x86_64 bootc\\n```\\n\\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\\nwith the code generation application, it's as simple as ssh-ing into the bootc system and running:\\n\\n```bash\\nbootc switch quay.io/ai-lab/codegen-bootc:latest\\n```\\n\\nUpon a reboot, you'll see that the codegen service is running on the system. Check on the service with:\\n\\n```bash\\nssh user@bootc-system-ip\\nsudo systemctl status codegen\\n```\\n\\n### What are bootable containers?\\n\\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\\n\\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than\\nat _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system.\\nBootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization\\ntools. Might I suggest [podman](https://podman.io/)?\\n\\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI\\nimage registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think\\nfactories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\\n\\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\\n\\n#### Creating bootable disk images\\n\\nYou can convert a bootc image to a bootable disk image using the\\n[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\\n\\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\\n\\nDefault image types can be set via the DISK_TYPE Makefile variable.\\n\\n`make bootc-image-builder DISK_TYPE=ami`\\n\",\n      \"recommended\": [\n        \"hf.ibm-granite.granite-4.0-micro-GGUF\",\n        \"hf.ibm-granite.granite-4.0-tiny-GGUF\",\n        \"hf.ibm-granite.granite-3.3-8b-instruct-GGUF\",\n        \"hf.ibm-research.granite-3.2-8b-instruct-GGUF\",\n        \"hf.ibm-granite.granite-8b-code-instruct\"\n      ],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"python\"],\n      \"frameworks\": [\"streamlit\", \"langchain\"]\n    },\n    {\n      \"id\": \"rag\",\n      \"description\": \"This application illustrates how to integrate RAG (Retrieval Augmented Generation) into LLM applications enabling to interact with your own documents.\",\n      \"name\": \"RAG Chatbot\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/rag\",\n      \"readme\": \"# RAG (Retrieval Augmented Generation) Chat Application\\n\\nThis demo provides a simple recipe to help developers start to build out their own custom RAG (Retrieval Augmented Generation) applications. It consists of three main components; the Model Service, the Vector Database and the AI Application.\\n\\nThere are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\\n\\nIn order for the LLM to interact with our documents, we need them stored and available in such a manner that we can retrieve a small subset of them that are relevant to our query. To do this we employ a Vector Database alongside an embedding model. The embedding model converts our documents into numerical representations, vectors, such that similarity searches can be easily performed. The Vector Database stores these vectors for us and makes them available to the LLM. In this recipe we can use [chromaDB](https://docs.trychroma.com/) or [Milvus](https://milvus.io/) as our Vector Database.\\n\\nOur AI Application will connect to our Model Service via it's OpenAI compatible API. In this example we rely on [Langchain's](https://python.langchain.com/docs/get_started/introduction) python package to simplify communication with our Model Service and we use [Streamlit](https://streamlit.io/) for our UI layer. Below please see an example of the RAG application.     \\n\\n![](/assets/rag_ui.png)\\n\\n\\n## Try the RAG chat application\\n\\n_COMING SOON to AI LAB_\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `RAG Chatbot` and follow the instructions to start the application.\\n\\nIf you prefer building and running the application from terminal, please run the following commands from this directory.\\n\\nFirst, build application's meta data and run the generated Kubernetes YAML which will spin up a Pod along with a number of containers:\\n```\\nmake quadlet\\npodman kube play build/rag.yaml\\n```\\n\\nThe Pod is named `rag`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\\n```\\npodman pod list\\npodman ps\\n```\\n\\nTo stop and remove the Pod, run:\\n```\\npodman pod stop rag\\npodman pod rm rag\\n```\\n\\nOnce the Pod is running, please refer to the section below to [interact with the RAG chatbot application](#interact-with-the-ai-application).\\n\\n# Build the Application\\n\\nIn order to build this application we will need two models, a Vector Database, a Model Service and an AI Application.  \\n\\n* [Download models](#download-models)\\n* [Deploy the Vector Database](#deploy-the-vector-database)\\n* [Build the Model Service](#build-the-model-service)\\n* [Deploy the Model Service](#deploy-the-model-service)\\n* [Build the AI Application](#build-the-ai-application)\\n* [Deploy the AI Application](#deploy-the-ai-application)\\n* [Interact with the AI Application](#interact-with-the-ai-application)\\n\\n### Download models\\n\\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF). This is a well\\nperformant mid-sized model with an apache-2.0 license that has been quanitzed and served into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md).\\n\\nThe recommended model can be downloaded using the code snippet below:\\n\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\\ncd ../recipes/natural_language_processing/rag\\n```\\n\\n_A full list of supported open models is forthcoming._  \\n\\nIn addition to the LLM, RAG applications also require an embedding model to convert documents between natural language and vector representations. For this demo we will use [`BAAI/bge-base-en-v1.5`](https://huggingface.co/BAAI/bge-base-en-v1.5) it is a fairly standard model for this use case and has an MIT license.    \\n\\nThe code snippet below can be used to pull a copy of the `BAAI/bge-base-en-v1.5` embedding model and store it in your `models/` directory. \\n\\n```python \\nfrom huggingface_hub import snapshot_download\\nsnapshot_download(repo_id=\\\"BAAI/bge-base-en-v1.5\\\",\\n                cache_dir=\\\"models/\\\",\\n                local_files_only=False)\\n```\\n\\n### Deploy the Vector Database \\n\\nTo deploy the Vector Database service locally, simply use the existing ChromaDB or Milvus image. The Vector Database is ephemeral and will need to be re-populated each time the container restarts. When implementing RAG in production, you will want a long running and backed up Vector Database.\\n\\n\\n#### ChromaDB\\n```bash\\npodman pull chromadb/chroma\\n```\\n```bash\\npodman run --rm -it -p 8000:8000 chroma\\n```\\n#### Milvus\\n```bash\\npodman pull milvusdb/milvus:master-20240426-bed6363f\\n```\\n```bash\\npodman run -it \\\\\\n        --name milvus-standalone \\\\\\n        --security-opt seccomp:unconfined \\\\\\n        -e ETCD_USE_EMBED=true \\\\\\n        -e ETCD_CONFIG_PATH=/milvus/configs/embedEtcd.yaml \\\\\\n        -e COMMON_STORAGETYPE=local \\\\\\n        -v $(pwd)/volumes/milvus:/var/lib/milvus \\\\\\n        -v $(pwd)/embedEtcd.yaml:/milvus/configs/embedEtcd.yaml \\\\\\n        -p 19530:19530 \\\\\\n        -p 9091:9091 \\\\\\n        -p 2379:2379 \\\\\\n        --health-cmd=\\\"curl -f http://localhost:9091/healthz\\\" \\\\\\n        --health-interval=30s \\\\\\n        --health-start-period=90s \\\\\\n        --health-timeout=20s \\\\\\n        --health-retries=3 \\\\\\n        milvusdb/milvus:master-20240426-bed6363f \\\\\\n        milvus run standalone  1> /dev/null\\n```\\nNote: For running the Milvus instance, make sure you have the `$(pwd)/volumes/milvus` directory and `$(pwd)/embedEtcd.yaml` file as shown in this repository. These are required by the database for its operations.\\n\\n\\n### Build the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the [the llamacpp_python model-service document](../model_servers/llamacpp_python/README.md).\\n\\nThe Model Service can be built with the following code snippet:\\n\\n```bash\\ncd model_servers/llamacpp_python\\npodman build -t llamacppserver -f ./base/Containerfile .\\n```\\n\\n\\n### Deploy the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the [the llamacpp_python model-service document](../model_servers/llamacpp_python/README.md).\\n\\nThe local Model Service relies on a volume mount to the localhost to access the model files. You can start your local Model Service using the following Podman command:\\n```\\npodman run --rm -it \\\\\\n        -p 8001:8001 \\\\\\n        -v Local/path/to/locallm/models:/locallm/models \\\\\\n        -e MODEL_PATH=models/<model-filename> \\\\\\n        -e HOST=0.0.0.0 \\\\\\n        -e PORT=8001 \\\\\\n        llamacppserver\\n```\\n\\n### Build the AI Application\\n\\nNow that the Model Service is running we want to build and deploy our AI Application. Use the provided Containerfile to build the AI Application image in the `rag-langchain/` directory.\\n\\n```bash\\ncd rag\\nmake APP_IMAGE=rag build\\n```\\n\\n### Deploy the AI Application\\n\\nMake sure the Model Service and the Vector Database are up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. The same goes for the Vector Database. Make sure the `VECTORDB_HOST` is correctly set to `10.88.0.1` for communication within the Podman virtual machine.\\n\\nThere also needs to be a volume mount into the `models/` directory so that the application can access the embedding model as well as a volume mount into the `data/` directory where it can pull documents from to populate the Vector Database.  \\n\\nThe following Podman command can be used to run your AI Application:\\n\\n```bash\\npodman run --rm -it -p 8501:8501 \\\\\\n-e MODEL_ENDPOINT=http://10.88.0.1:8001 \\\\\\n-e VECTORDB_HOST=10.88.0.1 \\\\\\n-v Local/path/to/locallm/models/:/rag/models \\\\\\nrag   \\n```\\n\\n### Interact with the AI Application\\n\\nEverything should now be up an running with the rag application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled RAG applications.   \\n\\n### Embed the AI Application in a Bootable Container Image\\n\\nTo build a bootable container image that includes this sample RAG chatbot workload as a service that starts when a system is booted, cd into this folder\\nand run:\\n\\n\\n```\\nmake BOOTC_IMAGE=quay.io/your/rag-bootc:latest bootc\\n```\\n\\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\\n\\n```\\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 BOOTC_IMAGE=quay.io/your/rag-bootc:latest bootc\\n```\\n\\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\\nwith the RAG chatbot application, it's as simple as ssh-ing into the bootc system and running:\\n\\n```\\nbootc switch quay.io/your/rag-bootc:latest\\n```\\n\\nUpon a reboot, you'll see that the RAG chatbot service is running on the system.\\n\\nCheck on the service with\\n\\n```\\nssh user@bootc-system-ip\\nsudo systemctl status rag\\n```\\n\\n#### What are bootable containers?\\n\\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\\n\\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than\\nat _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system.\\nBootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization\\ntools. Might I suggest [podman](https://podman.io/)?\\n\\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI\\nimage registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think\\nfactories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\\n\\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\\n\\n##### Creating bootable disk images\\n\\nYou can convert a bootc image to a bootable disk image using the\\n[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\\n\\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\\n\\nDefault image types can be set via the DISK_TYPE Makefile variable.\\n\\n`make bootc-image-builder DISK_TYPE=ami`\\n\\n### Makefile variables\\n\\nThere are several [Makefile variables](../../common/README.md) defined within each `recipe` Makefile which can be\\nused to override defaults for a variety of make targets.\\n\",\n      \"recommended\": [\n        \"hf.ibm-granite.granite-4.0-micro-GGUF\",\n        \"hf.ibm-granite.granite-4.0-tiny-GGUF\",\n        \"hf.ibm-granite.granite-3.3-8b-instruct-GGUF\",\n        \"hf.ibm-research.granite-3.2-8b-instruct-GGUF\"\n      ],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"python\"],\n      \"frameworks\": [\"streamlit\", \"langchain\", \"vectordb\"]\n    },\n    {\n      \"id\": \"rag-nodejs\",\n      \"description\": \"This application illustrates how to integrate RAG (Retrieval Augmented Generation) into LLM applications written in Node.js enabling to interact with your own documents.\",\n      \"name\": \"Node.js RAG Chatbot\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/rag-nodejs\",\n      \"readme\": \"# RAG (Retrieval Augmented Generation) Chat Application\\n\\nThis demo provides a simple recipe to help Node.js developers start to build out their own custom RAG (Retrieval Augmented Generation) applications. It consists of three main components; the Model Service, the Vector Database and the AI Application.\\n\\nThere are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\\n\\nIn order for the LLM to interact with our documents, we need them stored and available in such a manner that we can retrieve a small subset of them that are relevant to our query. To do this we employ a Vector Database alongside an embedding model. The embedding model converts our documents into numerical representations, vectors, such that similarity searches can be easily performed. The Vector Database stores these vectors for us and makes them available to the LLM. In this recipe we can use [chromaDB](https://docs.trychroma.com/) as our Vector Database.\\n\\nOur AI Application will connect to our Model Service via it's OpenAI compatible API. In this example we rely on [Langchain's](https://js.langchain.com/docs/introduction/) package to simplify communication with our Model Service and we use [React Chatbotify](https://react-chatbotify.com/) and [Next.js](https://nextjs.org/) for our UI layer. Below please see an example of the RAG application.     \\n\\n![](/assets/rag_nodejs.png)\\n\\n\\n## Try the RAG chat application\\n\\n_COMING SOON to AI LAB_\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `RAG Node.js Chatbot` and follow the instructions to start the application.\\n\\nIf you prefer building and running the application from terminal, please run the following commands from this directory.\\n\\nFirst, build application's meta data and run the generated Kubernetes YAML which will spin up a Pod along with a number of containers:\\n```\\nmake quadlet\\npodman kube play build/rag-nodesjs.yaml\\n```\\n\\nThe Pod is named `rag_nodejs`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\\n```\\npodman pod list\\npodman ps\\n```\\n\\nTo stop and remove the Pod, run:\\n```\\npodman pod stop rag_nodejs\\npodman pod rm rag_nodejs\\n```\\n\\nOnce the Pod is running, please refer to the section below to [interact with the RAG chatbot application](#interact-with-the-ai-application).\\n\\n# Build the Application\\n\\nIn order to build this application we will need two models, a Vector Database, a Model Service and an AI Application.  \\n\\n* [Download models](#download-models)\\n* [Deploy the Vector Database](#deploy-the-vector-database)\\n* [Build the Model Service](#build-the-model-service)\\n* [Deploy the Model Service](#deploy-the-model-service)\\n* [Build the AI Application](#build-the-ai-application)\\n* [Deploy the AI Application](#deploy-the-ai-application)\\n* [Interact with the AI Application](#interact-with-the-ai-application)\\n\\n### Download models\\n\\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF). This is a well\\nperformant mid-sized model with an apache-2.0 license that has been quanitzed and served into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md).\\n\\nThe recommended model can be downloaded using the code snippet below:\\n\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\\ncd ../recipes/natural_language_processing/rag_nodejs\\n```\\n\\n_A full list of supported open models is forthcoming._  \\n\\n### Deploy the Vector Database \\n\\nTo deploy the Vector Database service locally, simply use the existing ChromaDB. The Vector Database is ephemeral and will need to be re-populated each time the container restarts. When implementing RAG in production, you will want a long running and backed up Vector Database.\\n\\n\\n#### ChromaDB\\n```bash\\npodman pull chromadb/chroma\\n```\\n```bash\\npodman run --rm -it -p 8000:8000 chroma\\n```\\n\\n### Build the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the [the llamacpp_python model-service document](../model_servers/llamacpp_python/README.md).\\n\\nThe Model Service can be built with the following code snippet:\\n\\n```bash\\ncd model_servers/llamacpp_python\\npodman build -t llamacppserver -f ./base/Containerfile .\\n```\\n\\n\\n### Deploy the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the [the llamacpp_python model-service document](../model_servers/llamacpp_python/README.md).\\n\\nThe local Model Service relies on a volume mount to the localhost to access the model files. You can start your local Model Service using the following Podman command:\\n```\\npodman run --rm -it \\\\\\n        -p 8001:8001 \\\\\\n        -v Local/path/to/locallm/models:/locallm/models \\\\\\n        -e MODEL_PATH=models/<model-filename> \\\\\\n        -e HOST=0.0.0.0 \\\\\\n        -e PORT=8001 \\\\\\n        llamacppserver\\n```\\n\\n### Build the AI Application\\n\\nNow that the Model Service is running we want to build and deploy our AI Application. Use the provided Containerfile to build the AI Application image in the `rag-nodejs/` directory.\\n\\n```bash\\ncd rag-nodejs\\nmake APP_IMAGE=rag-nodejs build\\n```\\n\\n### Deploy the AI Application\\n\\nMake sure the Model Service and the Vector Database are up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. The same goes for the Vector Database. Make sure the `VECTORDB_HOST` is correctly set to `10.88.0.1` for communication within the Podman virtual machine.\\n\\nThere also needs to be a volume mount into the `models/` directory so that the application can access the embedding model as well as a volume mount into the `data/` directory where it can pull documents from to populate the Vector Database.  \\n\\nThe following Podman command can be used to run your AI Application:\\n\\n```bash\\npodman run --rm -it -p 8501:8501 \\\\\\n-e MODEL_ENDPOINT=http://10.88.0.1:8001 \\\\\\n-e VECTORDB_HOST=10.88.0.1 \\\\\\n-v Local/path/to/locallm/models/:/rag/models \\\\\\nrag-nodejs   \\n```\\n\\n### Interact with the AI Application\\n\\nEverything should now be up an running with the rag application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled RAG applications.   \\n\\n### Embed the AI Application in a Bootable Container Image\\n\\nTo build a bootable container image that includes this sample RAG chatbot workload as a service that starts when a system is booted, cd into this folder\\nand run:\\n\\n\\n```\\nmake BOOTC_IMAGE=quay.io/your/rag-nodejs-bootc:latest bootc\\n```\\n\\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\\n\\n```\\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 BOOTC_IMAGE=quay.io/your/rag-nodejs-bootc:latest bootc\\n```\\n\\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\\nwith the RAG Node.js chatbot application, it's as simple as ssh-ing into the bootc system and running:\\n\\n```\\nbootc switch quay.io/your/rag-nodejs-bootc:latest\\n```\\n\\nUpon a reboot, you'll see that the RAG Node.js chatbot service is running on the system.\\n\\nCheck on the service with\\n\\n```\\nssh user@bootc-system-ip\\nsudo systemctl status raa-nodejsg\\n```\\n\\n#### What are bootable containers?\\n\\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\\n\\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than\\nat _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system.\\nBootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization\\ntools. Might I suggest [podman](https://podman.io/)?\\n\\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI\\nimage registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think\\nfactories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\\n\\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\\n\\n##### Creating bootable disk images\\n\\nYou can convert a bootc image to a bootable disk image using the\\n[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\\n\\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\\n\\nDefault image types can be set via the DISK_TYPE Makefile variable.\\n\\n`make bootc-image-builder DISK_TYPE=ami`\\n\\n### Makefile variables\\n\\nThere are several [Makefile variables](../../common/README.md) defined within each `recipe` Makefile which can be\\nused to override defaults for a variety of make targets.\\n\",\n      \"recommended\": [\n        \"hf.ibm-granite.granite-4.0-micro-GGUF\",\n        \"hf.ibm-granite.granite-4.0-tiny-GGUF\",\n        \"hf.ibm-granite.granite-3.3-8b-instruct-GGUF\",\n        \"hf.ibm-research.granite-3.2-8b-instruct-GGUF\"\n      ],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"javascript\"],\n      \"frameworks\": [\"react\", \"langchain\", \"vectordb\"]\n    },\n    {\n      \"id\": \"chatbot-java-quarkus\",\n      \"description\": \"This is a Java Quarkus-based recipe demonstrating how to create an AI-powered chat applications.\",\n      \"name\": \"Java-based ChatBot (Quarkus)\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/chatbot-java-quarkus\",\n      \"readme\": \"# Java-based chatbot application\\n\\nThis application implements a simple chatbot backed by Quarkus and its\\nLangChain4j extension. The UI communicates with the backend application via\\nweb sockets and the backend uses the OpenAI API to talk to the model served\\nby Podman AI Lab.\\n\\nDocumentation for Quarkus+LangChain4j can be found at\\nhttps://docs.quarkiverse.io/quarkus-langchain4j/dev/.\",\n      \"recommended\": [\n        \"hf.ibm-granite.granite-4.0-micro-GGUF\",\n        \"hf.ibm-granite.granite-4.0-tiny-GGUF\",\n        \"hf.ibm-granite.granite-3.3-8b-instruct-GGUF\",\n        \"hf.ibm-research.granite-3.2-8b-instruct-GGUF\"\n      ],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"java\"],\n      \"frameworks\": [\"quarkus\", \"langchain4j\"]\n    },\n    {\n      \"id\": \"chatbot-javascript-react\",\n      \"description\": \"This is a NodeJS based recipe demonstrating how to create an AI-powered chat applications.\",\n      \"name\": \"Node.js based ChatBot\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/chatbot-nodejs\",\n      \"readme\": \"# Chat Application\\n\\n  This recipe helps developers start building their own custom LLM enabled chat applications using Node.js and JavaScript. It consists of two main components: the Model Service and the AI Application.\\n\\n  There are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\\n\\n  The AI Application will connect to the Model Service via its OpenAI compatible API. The recipe relies on [Langchain's]( https://js.langchain.com/docs/introduction) JavaScript package to simplify communication with the Model Service and uses [react-chatbotify](https://react-chatbotify.com/) for the UI layer. You can find an example of the chat application below.\\n\\n![](/assets/chatbot_nodejs_ui.png) \\n\\n\\n## Try the Chat Application\\n\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Node.js based Chatbot` and follow the instructions to start the application.\\n\\n# Build the Application\\n\\nThe rest of this document will explain how to build and run the application from the terminal, and will\\ngo into greater detail on how each container in the Pod above is built, run, and \\nwhat purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\\n\\n\\nThis application requires a model, a model service and an AI inferencing application.\\n\\n* [Quickstart](#quickstart)\\n* [Download a model](#download-a-model)\\n* [Build the Model Service](#build-the-model-service)\\n* [Deploy the Model Service](#deploy-the-model-service)\\n* [Build the AI Application](#build-the-ai-application)\\n* [Deploy the AI Application](#deploy-the-ai-application)\\n* [Interact with the AI Application](#interact-with-the-ai-application)\\n* [Embed the AI Application in a Bootable Container Image](#embed-the-ai-application-in-a-bootable-container-image)\\n\\n\\n## Quickstart\\nTo run the application with pre-built images from `quay.io/ai-lab`, use `make quadlet`. This command\\nbuilds the application's metadata and generates Kubernetes YAML at `./build/chatbot-nodejs.yaml` to spin up a Pod that can then be launched locally.\\nTry it with:\\n\\n```\\nmake quadlet\\npodman kube play build/chatbot-nodejs.yaml\\n```\\n\\nThis will take a few minutes if the model and model-server container images need to be downloaded. \\nThe Pod is named `nodejs chat app`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\\n\\n```\\npodman pod list\\npodman ps\\n```\\n\\nOnce the Pod and its containers are running, the application can be accessed at `http://localhost:8501`. \\nPlease refer to the section below for more details about [interacting with the chatbot application](#interact-with-the-ai-application).\\n\\nTo stop and remove the Pod, run:\\n\\n```\\npodman pod stop chatbot-nodejs\\npodman pod rm chatbot-nodejs\\n```\\n\\n## Download a model\\n\\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct). This is a well\\nperformant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted\\nand quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of\\nways to get a GGUF version of granite-3.3-8b-instruct, but the simplest is to download a pre-converted one from\\n[huggingface.co](https://huggingface.co) here: https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF.\\n\\nThe recommended model can be downloaded using the code snippet below:\\n\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\\ncd ../recipes/natural_language_processing/chatbot-nodejs\\n```\\n\\n_A full list of supported open models is forthcoming._  \\n\\n\\n## Build the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the\\n[llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\\n\\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake build\\n```\\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\\n\\n## Deploy the Model Service\\n\\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake run\\n```\\n\\n## Build the AI Application\\n\\nThe AI Application can be built from the make command:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/chatbot-nodejs from repo containers/ai-lab-recipes)\\nmake build\\n```\\n\\n## Deploy the AI Application\\n\\nMake sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/chatbot-nodejs from repo containers/ai-lab-recipes)\\nmake run \\n```\\n\\n## Interact with the AI Application\\n\\nEverything should now be up an running with the chat application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled chatbot applications.   \\n\\n## Embed the AI Application in a Bootable Container Image\\n\\nTo build a bootable container image that includes this sample chatbot workload as a service that starts when a system is booted, run: `make -f Makefile bootc`. You can optionally override the default image / tag you want to give the make command by specifying it as follows: `make -f Makefile BOOTC_IMAGE=<your_bootc_image> bootc`.\\n\\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\\n\\n```bash\\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 bootc\\n```\\n\\nSelecting the ARCH for the bootc/Containerfile is simple using the Makefile ARCH= variable.\\n\\n```\\nmake ARCH=x86_64 bootc\\n```\\n\\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\\nwith the chatbot application, it's as simple as ssh-ing into the bootc system and running:\\n\\n```bash\\nbootc switch quay.io/ai-lab/chatbot-nodejs-bootc:latest\\n```\\n\\nUpon a reboot, you'll see that the chatbot service is running on the system. Check on the service with:\\n\\n```bash\\nssh user@bootc-system-ip\\nsudo systemctl status chatbot-nodejs\\n```\\n\\n### What are bootable containers?\\n\\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\\n\\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than\\nat _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system.\\nBootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization\\ntools. Might I suggest [podman](https://podman.io/)?\\n\\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI\\nimage registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think\\nfactories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\\n\\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\\n\\n#### Creating bootable disk images\\n\\nYou can convert a bootc image to a bootable disk image using the\\n[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\\n\\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\\n\\nDefault image types can be set via the DISK_TYPE Makefile variable.\\n\\n`make bootc-image-builder DISK_TYPE=ami`\\n\",\n      \"recommended\": [\n        \"hf.ibm-granite.granite-4.0-micro-GGUF\",\n        \"hf.ibm-granite.granite-4.0-tiny-GGUF\",\n        \"hf.ibm-granite.granite-3.3-8b-instruct-GGUF\",\n        \"hf.ibm-research.granite-3.2-8b-instruct-GGUF\"\n      ],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"javascript\"],\n      \"frameworks\": [\"react\", \"langchain\"]\n    },\n    {\n      \"id\": \"function-calling\",\n      \"description\": \"This recipes guides into multiple function calling use cases, showing the ability to structure data and chain multiple tasks, using Streamlit.\",\n      \"name\": \"Function calling\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/function_calling\",\n      \"readme\": \"# Function Calling Application\\n\\n  This recipe helps developers start building their own custom function calling enabled chat applications. It consists of two main components: the Model Service and the AI Application.\\n\\n  There are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\\n\\n  The AI Application will connect to the Model Service via its OpenAI compatible API. The recipe relies on [Langchain's](https://python.langchain.com/docs/get_started/introduction) python package to simplify communication with the Model Service and uses [Streamlit](https://streamlit.io/) for the UI layer. You can find an example of the chat application below.\\n\\n![](/assets/chatbot_ui.png) \\n\\n\\n## Try the Function Calling Application\\n\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Function Calling` and follow the instructions to start the application.\\n\\n# Build the Application\\n\\nThe rest of this document will explain how to build and run the application from the terminal, and will\\ngo into greater detail on how each container in the Pod above is built, run, and \\nwhat purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\\n\\n\\nThis application requires a model, a model service and an AI inferencing application.\\n\\n* [Quickstart](#quickstart)\\n* [Download a model](#download-a-model)\\n* [Build the Model Service](#build-the-model-service)\\n* [Deploy the Model Service](#deploy-the-model-service)\\n* [Build the AI Application](#build-the-ai-application)\\n* [Deploy the AI Application](#deploy-the-ai-application)\\n* [Interact with the AI Application](#interact-with-the-ai-application)\\n* [Embed the AI Application in a Bootable Container Image](#embed-the-ai-application-in-a-bootable-container-image)\\n\\n\\n## Quickstart\\nTo run the application with pre-built images from `quay.io/ai-lab`, use `make quadlet`. This command\\nbuilds the application's metadata and generates Kubernetes YAML at `./build/chatbot.yaml` to spin up a Pod that can then be launched locally.\\nTry it with:\\n\\n```\\nmake quadlet\\npodman kube play build/chatbot.yaml\\n```\\n\\nThis will take a few minutes if the model and model-server container images need to be downloaded. \\nThe Pod is named `chatbot`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\\n\\n```\\npodman pod list\\npodman ps\\n```\\n\\nOnce the Pod and its containers are running, the application can be accessed at `http://localhost:8501`. However, if you started the app via the podman desktop UI, a random port will be assigned instead of `8501`. Please use the AI App Details `Open AI App` button to access it instead. \\nPlease refer to the section below for more details about [interacting with the chatbot application](#interact-with-the-ai-application).\\n\\nTo stop and remove the Pod, run:\\n\\n```\\npodman pod stop chatbot\\npodman pod rm chatbot\\n```\\n\\n## Download a model\\n\\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct). This is a well\\nperformant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted\\nand quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of\\nways to get a GGUF version of granite-3.3-8b-instruct, but the simplest is to download a pre-converted one from\\n[huggingface.co](https://huggingface.co) here: https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF.\\n\\nThe recommended model can be downloaded using the code snippet below:\\n\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\\ncd ../recipes/natural_language_processing/chatbot\\n```\\n\\n_A full list of supported open models is forthcoming._  \\n\\n\\n## Build the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the\\n[llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\\n\\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake build\\n```\\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\\n\\n## Deploy the Model Service\\n\\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake run\\n```\\n\\n## Build the AI Application\\n\\nThe AI Application can be built from the make command:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/chatbot from repo containers/ai-lab-recipes)\\nmake build\\n```\\n\\n## Deploy the AI Application\\n\\nMake sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/chatbot from repo containers/ai-lab-recipes)\\nmake run \\n```\\n\\n## Interact with the AI Application\\n\\nEverything should now be up an running with the chat application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled chatbot applications.   \\n\\n## Embed the AI Application in a Bootable Container Image\\n\\nTo build a bootable container image that includes this sample chatbot workload as a service that starts when a system is booted, run: `make -f Makefile bootc`. You can optionally override the default image / tag you want to give the make command by specifying it as follows: `make -f Makefile BOOTC_IMAGE=<your_bootc_image> bootc`.\\n\\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\\n\\n```bash\\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 bootc\\n```\\n\\nSelecting the ARCH for the bootc/Containerfile is simple using the Makefile ARCH= variable.\\n\\n```\\nmake ARCH=x86_64 bootc\\n```\\n\\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\\nwith the chatbot application, it's as simple as ssh-ing into the bootc system and running:\\n\\n```bash\\nbootc switch quay.io/ai-lab/chatbot-bootc:latest\\n```\\n\\nUpon a reboot, you'll see that the chatbot service is running on the system. Check on the service with:\\n\\n```bash\\nssh user@bootc-system-ip\\nsudo systemctl status chatbot\\n```\\n\\n### What are bootable containers?\\n\\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\\n\\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than\\nat _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system.\\nBootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization\\ntools. Might I suggest [podman](https://podman.io/)?\\n\\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI\\nimage registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think\\nfactories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\\n\\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\\n\\n#### Creating bootable disk images\\n\\nYou can convert a bootc image to a bootable disk image using the\\n[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\\n\\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\\n\\nDefault image types can be set via the DISK_TYPE Makefile variable.\\n\\n`make bootc-image-builder DISK_TYPE=ami`\\n\",\n      \"recommended\": [\n        \"hf.ibm-granite.granite-4.0-micro-GGUF\",\n        \"hf.ibm-granite.granite-4.0-tiny-GGUF\",\n        \"hf.ibm-granite.granite-3.3-8b-instruct-GGUF\"\n      ],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"python\"],\n      \"frameworks\": [\"streamlit\", \"langchain\"]\n    },\n    {\n      \"id\": \"function-calling-nodejs\",\n      \"description\": \"This recipes guides into multiple function calling use cases, showing the ability to structure data and chain multiple tasks, using Streamlit.\",\n      \"name\": \"Node.js Function calling\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/function-calling-nodejs\",\n      \"readme\": \"# Function Calling Application\\n\\n  This recipe helps developers start building their own AI applications with function calling capabilities. It consists of two main components: the Model Service and the AI Application.\\n\\n  There are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\\n\\n  The AI Application will connect to the Model Service via its OpenAI compatible API. The recipe relies on [Langchain's](https://js.langchain.com/v0.2/docs/introduction/) Typescript package to simplify communication with the Model Service and [langgraph.js](https://langchain-ai.github.io/langgraphjs/) to enable the LLM to call functions.  It uses [fastify](https://fastify.dev/) as the backend-server and chart.js to plot the weather data returned. You can find an example of the chat application below.\\n\\n![](/assets/function_calling_nodejs_ui.png)\\n\\n\\n## Try the Function Application\\n\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `function-calling-nodejs` and follow the instructions to start the application.\\n\\n# Build the Application\\n\\nThe rest of this document will explain how to build and run the application from the terminal, and will\\ngo into greater detail on how each container in the Pod above is built, run, and \\nwhat purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\\n\\n\\nThis application requires a model, a model service and an AI inferencing application.\\n\\n* [Quickstart](#quickstart)\\n* [Download a model](#download-a-model)\\n* [Build the Model Service](#build-the-model-service)\\n* [Deploy the Model Service](#deploy-the-model-service)\\n* [Build the AI Application](#build-the-ai-application)\\n* [Deploy the AI Application](#deploy-the-ai-application)\\n* [Interact with the AI Application](#interact-with-the-ai-application)\\n* [Embed the AI Application in a Bootable Container Image](#embed-the-ai-application-in-a-bootable-container-image)\\n\\n\\n## Quickstart\\nTo run the application with pre-built images from `quay.io/ai-lab`, use `make quadlet`. This command\\nbuilds the application's metadata and generates Kubernetes YAML at `./build/chatbot.yaml` to spin up a Pod that can then be launched locally.\\nTry it with:\\n\\n```\\nmake quadlet\\npodman kube play build/function-calling-nodejs.yaml\\n```\\n\\nThis will take a few minutes if the model and model-server container images need to be downloaded. \\nThe Pod is named `function-calling-nodejs`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\\n\\n```\\npodman pod list\\npodman ps\\n```\\n\\nOnce the Pod and its containers are running, the application can be accessed at `http://localhost:8501`. However, if you started the app via the podman desktop UI, a random port will be assigned instead of `8501`. Please use the AI App Details `Open AI App` button to access it instead. \\nPlease refer to the section below for more details about [interacting with the function calling application](#interact-with-the-ai-application).\\n\\nTo stop and remove the Pod, run:\\n\\n```\\npodman pod stop function-calling-nodejs\\npodman pod rm function-calling-nodejs\\n```\\n\\n## Download a model\\n\\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct). This is a well\\nperformant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted\\nand quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of\\nways to get a GGUF version of granite-3.3-8b-instruct, but the simplest is to download a pre-converted one from\\n[huggingface.co](https://huggingface.co) here: https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF.\\n\\nThe recommended model can be downloaded using the code snippet below:\\n\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\\ncd ../recipes/natural_language_processing/function-calling-nodejs\\n```\\n\\n_A full list of supported open models is forthcoming._  \\n\\n\\n## Build the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the\\n[llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\\n\\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake build\\n```\\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\\n\\n## Deploy the Model Service\\n\\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\\n\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake run\\n```\\n\\n## Build the AI Application\\n\\nThe AI Application can be built from the make command:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/chatbot from repo containers/ai-lab-recipes)\\nmake build\\n```\\n\\n## Deploy the AI Application\\n\\nMake sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following:\\n\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/chatbot from repo containers/ai-lab-recipes)\\nmake run \\n```\\n\\n## Interact with the AI Application\\n\\nEverything should now be up an running with the function calling application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled function calling applications.\\n\\n## Embed the AI Application in a Bootable Container Image\\n\\nTo build a bootable container image that includes this sample chatbot workload as a service that starts when a system is booted, run: `make -f Makefile bootc`. You can optionally override the default image / tag you want to give the make command by specifying it as follows: `make -f Makefile BOOTC_IMAGE=<your_bootc_image> bootc`.\\n\\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\\n\\n```bash\\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 bootc\\n```\\n\\nSelecting the ARCH for the bootc/Containerfile is simple using the Makefile ARCH= variable.\\n\\n```\\nmake ARCH=x86_64 bootc\\n```\\n\\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\\nwith the chatbot application, it's as simple as ssh-ing into the bootc system and running:\\n\\n```bash\\nbootc switch quay.io/ai-lab/function-calling-nodejs-bootc:latest\\n```\\n\\nUpon a reboot, you'll see that the chatbot service is running on the system. Check on the service with:\\n\\n```bash\\nssh user@bootc-system-ip\\nsudo systemctl status function-calling-nodejs\\n```\\n\\n### What are bootable containers?\\n\\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\\n\\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than\\nat _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system.\\nBootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization\\ntools. Might I suggest [podman](https://podman.io/)?\\n\\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI\\nimage registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think\\nfactories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\\n\\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\\n\\n#### Creating bootable disk images\\n\\nYou can convert a bootc image to a bootable disk image using the\\n[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\\n\\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\\n\\nDefault image types can be set via the DISK_TYPE Makefile variable.\\n\\n`make bootc-image-builder DISK_TYPE=ami`\\n\",\n      \"recommended\": [\n        \"hf.ibm-granite.granite-4.0-micro-GGUF\",\n        \"hf.ibm-granite.granite-4.0-tiny-GGUF\",\n        \"hf.ibm-granite.granite-3.3-8b-instruct-GGUF\"\n      ],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"javascript\"],\n      \"frameworks\": [\"langchain.js\", \"langgraph\", \"fastify\"]\n    },\n    {\n      \"id\": \"graph-rag\",\n      \"description\": \"This demo provides a recipe to build out a custom Graph RAG (Graph Retrieval Augmented Generation) application using the repo LightRag which abstracts Microsoft's GraphRag implementation. It consists of two main components; the Model Service, and the AI Application with a built in Database.\",\n      \"name\": \"Graph RAG Chat Application\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/graph-rag\",\n      \"readme\": \"# Graph RAG (Retrieval Augmented Generation) Chat Application\\nThis demo provides a recipe to build out a custom Graph RAG (Graph Retrieval Augmented Generation) application using the repo LightRag which abstracts Microsoft's GraphRag implementation. It consists of two main components; the Model Service, and the AI Application with a built in Database.\\nThere are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\\nLightRag simplifies development by handling the Vectordb setup automatically, while also offering experienced developers the flexibility to choose from various Vectordb options based on their preferences for usability and scalability.\\nOur AI Application will connect to our Model Service via it's OpenAI compatible API. In this example we rely on [Langchain's](https://python.langchain.com/docs/get_started/introduction) python package to simplify communication with our Model Service and we use [Streamlit](https://streamlit.io/) for our UI layer. Below please see an example of the RAG application. \\n\\n## Try the RAG chat application\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Graph Rag` and follow the instructions to start the application.\\n\\n## Models that work with this Recipe\\nNot all models work with this Recipe try out mistral or llama models! \\n\\n# Build the Application\\nThe rest of this document will explain how to build and run the application from the terminal, and will go into greater detail on how each container in the Pod above is built, run, and what purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\\n\\n## Quickstart\\nTo run the application with pre-built images from `quay.io/ai-lab`, use `make quadlet`. This command builds the application's metadata and generates Kubernetes YAML at `./build/graph-rag.yaml` to spin up a Pod that can then be launched locally. Try it with:\\n```\\nmake quadlet\\npodman kube play build/graph-rag.yaml\\n```\\nThis will take a few minutes if the model and model-server container images need to be downloaded. \\nThe Pod is named `graph-rag`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\\n```\\npodman pod list\\npodman ps\\n```\\nOnce the Pod and its containers are running, the application can be accessed at `http://localhost:8501`. However, if you started the app via the podman desktop UI, a random port will be assigned instead of `8501`. Please use the AI App Details `Open AI App` button to access it instead. Please refer to the section below for more details about [interacting with the Graph Rag application](#interact-with-the-ai-application).\\nTo stop and remove the Pod, run:\\n```\\npodman pod stop graph-rag\\npodman pod rm graph-rag\\n```\\n\\n## Download a model\\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct). This is a well performant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted and quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of ways to get a GGUF version of granite-3.3-8b-instruct, but the simplest is to download a pre-converted one from [huggingface.co](https://huggingface.co) here: https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF.\\nThe recommended model can be downloaded using the code snippet below:\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\\ncd ../recipes/natural_language_processing/graph-rag\\n```\\n_A full list of supported open models is forthcoming._  \\n\\n## Build the Model Service\\nThe complete instructions for building and deploying the Model Service can be found in the [llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake build\\n```\\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\\n\\n## Deploy the Model Service\\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\\n```bash\\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\\nmake run\\n```\\n\\n## Build the AI Application\\nThe AI Application can be built from the make command:\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/graph-rag from repo containers/ai-lab-recipes)\\nmake build\\n```\\n\\n## Deploy the AI Application\\nMake sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following:\\n```bash\\n# Run this from the current directory (path recipes/natural_language_processing/graph-rag from repo containers/ai-lab-recipes)\\nmake run \\n```\\n\\n## Interact with the AI Application\\nEverything should now be up an running with the chat application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled graph-rag applications.   \\n\\n## Embed the AI Application in a Bootable Container Image\\nTo build a bootable container image that includes this sample graph-rag workload as a service that starts when a system is booted, run: `make -f Makefile bootc`. You can optionally override the default image / tag you want to give the make command by specifying it as follows: `make -f Makefile BOOTC_IMAGE=<your_bootc_image> bootc`.\\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\\n```bash\\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 bootc\\n```\\nSelecting the ARCH for the bootc/Containerfile is simple using the Makefile ARCH= variable.\\n```\\nmake ARCH=x86_64 bootc\\n```\\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\\nwith the graph-rag application, it's as simple as ssh-ing into the bootc system and running:\\n```bash\\nbootc switch quay.io/ai-lab/graph-rag-bootc:latest\\n```\\nUpon a reboot, you'll see that the graph-rag service is running on the system. Check on the service with:\\n```bash\\nssh user@bootc-system-ip\\nsudo systemctl status graph-rag\\n```\\n\\n### What are bootable containers?\\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than at _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system. Bootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization tools. Might I suggest [podman](https://podman.io/)?\\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI image registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think factories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\\n\\n#### Creating bootable disk images\\nYou can convert a bootc image to a bootable disk image using the [quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\\nDefault image types can be set via the DISK_TYPE Makefile variable.\\n`make bootc-image-builder DISK_TYPE=ami`\",\n      \"recommended\": [],\n      \"backend\": \"llama-cpp\",\n      \"languages\": [\"python\"],\n      \"frameworks\": [\"streamlit\", \"lightrag\"]\n    },\n    {\n      \"id\": \"audio_to_text\",\n      \"description\": \"This application demonstrate how to use LLM for transcripting an audio into text.\",\n      \"name\": \"Audio to Text\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"generator\",\n      \"categories\": [\"audio\"],\n      \"basedir\": \"recipes/audio/audio_to_text\",\n      \"readme\": \"# Audio to Text Application\\n\\nThis recipe helps developers start building their own custom AI enabled audio transcription applications. It consists of two main components: the Model Service and the AI Application.\\n\\nThere are a few options today for local Model Serving, but this recipe will use [`whisper-cpp`](https://github.com/ggerganov/whisper.cpp.git) and its included Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/whispercpp/base/Containerfile`](/model_servers/whispercpp/base/Containerfile).\\n\\nThe AI Application will connect to the Model Service via an API. The recipe relies on [Langchain's](https://python.langchain.com/docs/get_started/introduction) python package to simplify communication with the Model Service and uses [Streamlit](https://streamlit.io/) for the UI layer. You can find an example of the audio to text application below.\\n\\n\\n![](/assets/whisper.png) \\n\\n## Try the Audio to Text Application:\\n\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Audio to Text` and follow the instructions to start the application.\\n\\n# Build the Application\\n\\nThe rest of this document will explain how to build and run the application from the terminal, and will go into greater detail on how each container in the application above is built, run, and  what purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\\n\\n* [Download a model](#download-a-model)\\n* [Build the Model Service](#build-the-model-service)\\n* [Deploy the Model Service](#deploy-the-model-service)\\n* [Build the AI Application](#build-the-ai-application)\\n* [Deploy the AI Application](#deploy-the-ai-application)\\n* [Interact with the AI Application](#interact-with-the-ai-application)\\n    * [Input audio files](#input-audio-files)\\n\\n## Download a model\\n\\nIf you are just getting started, we recommend using [ggerganov/whisper.cpp](https://huggingface.co/ggerganov/whisper.cpp).\\nThis is a well performant model with an MIT license.\\nIt's simple to download a pre-converted whisper model from [huggingface.co](https://huggingface.co)\\nhere: https://huggingface.co/ggerganov/whisper.cpp. There are a number of options, but we recommend to start with `ggml-small.bin`.\\n\\nThe recommended model can be downloaded using the code snippet below:\\n\\n```bash\\ncd ../../../models\\ncurl -sLO https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-small.bin\\ncd ../recipes/audio/audio_to_text\\n```\\n\\n_A full list of supported open models is forthcoming._\\n\\n\\n## Build the Model Service\\n\\nThe complete instructions for building and deploying the Model Service can be found in the [whispercpp model-service document](../../../model_servers/whispercpp/README.md).\\n\\n```bash\\n# from path model_servers/whispercpp from repo containers/ai-lab-recipes\\nmake build\\n```\\nCheckout the [Makefile](../../../model_servers/whispercpp/Makefile) to get more details on different options for how to build.\\n\\n## Deploy the Model Service\\n\\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/whispercpp` set with reasonable defaults:\\n\\n```bash\\n# from path model_servers/whispercpp from repo containers/ai-lab-recipes\\nmake run\\n```\\n\\n## Build the AI Application\\n\\nNow that the Model Service is running we want to build and deploy our AI Application. Use the provided Containerfile to build the AI Application\\nimage from the [`audio-to-text/`](./) directory.\\n\\n```bash\\n# from path recipes/audio/audio_to_text from repo containers/ai-lab-recipes\\npodman build -t audio-to-text app\\n```\\n### Deploy the AI Application\\n\\nMake sure the Model Service is up and running before starting this container image.\\nWhen starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`.\\nThis could be any appropriately hosted Model Service (running locally or in the cloud) using a compatible API.\\nThe following Podman command can be used to run your AI Application:\\n\\n```bash\\npodman run --rm -it -p 8501:8501 -e MODEL_ENDPOINT=http://10.88.0.1:8001/inference audio-to-text \\n```\\n\\n### Interact with the AI Application\\n\\nOnce the streamlit application is up and running, you should be able to access it at `http://localhost:8501`.\\nFrom here, you can upload audio files from your local machine and translate the audio files as shown below.\\n\\nBy using this recipe and getting this starting point established,\\nusers should now have an easier time customizing and building their own AI enabled applications.\\n\\n#### Input audio files\\n\\nWhisper.cpp requires as an input 16-bit WAV audio files.\\nTo convert your input audio files to 16-bit WAV format you can use `ffmpeg` like this:\\n\\n```bash\\nffmpeg -i <input.mp3> -ar 16000 -ac 1 -c:a pcm_s16le <output.wav>\\n```\\n\",\n      \"recommended\": [\"hf.ggerganov.whisper.cpp\"],\n      \"backend\": \"whisper-cpp\",\n      \"languages\": [\"python\"],\n      \"frameworks\": [\"streamlit\"]\n    },\n    {\n      \"id\": \"object_detection\",\n      \"description\": \"This recipe illustrates how to use LLM to interact with images and build object detection applications.\",\n      \"name\": \"Object Detection\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"generator\",\n      \"categories\": [\"computer-vision\"],\n      \"basedir\": \"recipes/computer_vision/object_detection\",\n      \"readme\": \"# Object Detection\\n\\nThis recipe helps developers start building their own custom AI enabled object detection applications. It consists of two main components: the Model Service and the AI Application.\\n\\nThere are a few options today for local Model Serving, but this recipe will use our FastAPI [`object_detection_python`](../../../model_servers/object_detection_python/src/object_detection_server.py) model server. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/object_detection_python/base/Containerfile`](/model_servers/object_detection_python/base/Containerfile).\\n\\nThe AI Application will connect to the Model Service via an API. The recipe relies on [Streamlit](https://streamlit.io/) for the UI layer. You can find an example of the object detection application below.\\n\\n![](/assets/object_detection.png) \\n\\n## Try the Object Detection Application:\\n\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Object Detection` and follow the instructions to start the application.\\n\\n# Build the Application\\n\\nThe rest of this document will explain how to build and run the application from the terminal, and will go into greater detail on how each container in the application above is built, run, and  what purpose it serves in the overall application. All the Model Server elements of the recipe use a central Model Server [Makefile](../../../model_servers/common/Makefile.common) that includes variables populated with default values to simplify getting started. Currently we do not have a Makefile for the Application elements of the Recipe, but this coming soon, and will leverage the recipes common [Makefile](../../common/Makefile.common) to provide variable configuration and reasonable defaults to this Recipe's application.\\n\\n* [Download a model](#download-a-model)\\n* [Build the Model Service](#build-the-model-service)\\n* [Deploy the Model Service](#deploy-the-model-service)\\n* [Build the AI Application](#build-the-ai-application)\\n* [Deploy the AI Application](#deploy-the-ai-application)\\n* [Interact with the AI Application](#interact-with-the-ai-application)\\n\\n## Download a model\\n\\nIf you are just getting started, we recommend using [facebook/detr-resnet-101](https://huggingface.co/facebook/detr-resnet-101).\\nThis is a well performant model with an Apache-2.0 license.\\nIt's simple to download a copy of the model from [huggingface.co](https://huggingface.co)\\n\\nYou can use the `download-model-facebook-detr-resnet-101` make target in the `model_servers/object_detection_python` directory to download and move the model into the models directory for you:\\n\\n```bash\\n# from path model_servers/object_detection_python from repo containers/ai-lab-recipes\\n make download-model-facebook-detr-resnet-101\\n```\\n\\n## Build the Model Service\\n\\nThe You can build the Model Service from the [object_detection_python model-service directory](../../../model_servers/object_detection_python).\\n\\n```bash\\n# from path model_servers/object_detection_python from repo containers/ai-lab-recipes\\nmake build\\n```\\n\\nCheckout the [Makefile](../../../model_servers/object_detection_python/Makefile) to get more details on different options for how to build.\\n\\n## Deploy the Model Service\\n\\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from the [`model_servers/object_detection_python`](../../../model_servers/object_detection_python) directory, which will be set with reasonable defaults:\\n\\n```bash\\n# from path model_servers/object_detection_python from repo containers/ai-lab-recipes\\nmake run\\n```\\n\\nAs stated above, by default the model service will use [`facebook/detr-resnet-101`](https://huggingface.co/facebook/detr-resnet-101). However you can use other compatabale models. Simply pass the new `MODEL_NAME` and `MODEL_PATH` to the make command. Make sure the model is downloaded and exists in the [models directory](../../../models/):\\n\\n```bash\\n# from path model_servers/object_detection_python from repo containers/ai-lab-recipes\\nmake MODEL_NAME=facebook/detr-resnet-50 MODEL_PATH=/models/facebook/detr-resnet-50 run\\n```\\n\\n## Build the AI Application\\n\\nNow that the Model Service is running we want to build and deploy our AI Application. Use the provided Containerfile to build the AI Application\\nimage from the [`object_detection/`](./) recipe directory.\\n\\n```bash\\n# from path recipes/computer_vision/object_detection from repo containers/ai-lab-recipes\\npodman build -t object_detection_client .\\n```\\n\\n### Deploy the AI Application\\n\\nMake sure the Model Service is up and running before starting this container image.\\nWhen starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`.\\nThis could be any appropriately hosted Model Service (running locally or in the cloud) using a compatible API.\\nThe following Podman command can be used to run your AI Application:\\n\\n```bash\\npodman run -p 8501:8501 -e MODEL_ENDPOINT=http://10.88.0.1:8000/detection object_detection_client\\n```\\n\\n### Interact with the AI Application\\n\\nOnce the client is up a running, you should be able to access it at `http://localhost:8501`. From here you can upload images from your local machine and detect objects in the image as shown below. \\n\\nBy using this recipe and getting this starting point established,\\nusers should now have an easier time customizing and building their own AI enabled applications.\\n\",\n      \"recommended\": [\"hf.facebook.detr-resnet-101\"],\n      \"backend\": \"none\",\n      \"languages\": [\"python\"],\n      \"frameworks\": [\"streamlit\"]\n    },\n    {\n      \"id\": \"chatbot-llama-stack\",\n      \"description\": \"This recipe provides a blueprint for developers to create their own AI-powered chat applications using Streamlit and llama-stack.\",\n      \"name\": \"ChatBot using Llama Stack\",\n      \"repository\": \"https://github.com/containers/ai-lab-recipes\",\n      \"ref\": \"v1.8.0\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"recipes/natural_language_processing/chatbot-llama-stack\",\n      \"readme\": \"# Chat Application\\n\\n  This recipe helps developers start building their own custom LLM enabled chat applications.\\n\\n  There are a few options today for local Model Serving, but this recipe will use [`Llama Stack`](https://llama-stack.readthedocs.io/en/latest/).\\n\\n  The AI Application will connect to the Model Service via its API. The recipe relies on [Llama Stack Client Python SDK](https://github.com/meta-llama/llama-stack-client-python) to simplify communication with the Model Service and uses [Streamlit](https://streamlit.io/) for the UI layer. \\n\\n## Try the Chat Application\\n\\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Chatbot using Llama Stack` and follow the instructions to start the application.\\n\",\n      \"backend\": \"llama-stack\",\n      \"languages\": [\"python\"],\n      \"frameworks\": [\"streamlit\", \"llama-stack\"]\n    }\n  ],\n  \"models\": [\n    {\n      \"id\": \"hf.mistralai.mistral-small-3.2-24b-instruct-2506\",\n      \"name\": \"mistralai/Mistral-Small-3.2-24B-Instruct-2506\",\n      \"description\": \"Mistral-Small-3.2-24B-Instruct-2506 is a minor update of [Mistral-Small-3.1-24B-Instruct-2503](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Base-2503).\\r\\n\\r\\nSmall-3.2 improves in the following categories:\\r\\n- **Instruction following**: Small-3.2 is better at following precise instructions\\r\\n- **Repetition errors**: Small-3.2 produces less infinite generations or repetitive answers\\r\\n- **Function calling**: Small-3.2's function calling template is more robust (see [here](https://github.com/mistralai/mistral-common/blob/535b4d0a0fc94674ea17db6cf8dc2079b81cbcfa/src/mistral_common/tokens/tokenizers/instruct.py#L778) and [examples](#function-calling))\\r\\n\\r\\nIn all other categories Small-3.2 should match or slightly improve compared to [Mistral-Small-3.1-24B-Instruct-2503](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Base-2503).\\r\\n\\r\\n## Key Features\\r\\n- same as [Mistral-Small-3.1-24B-Instruct-2503](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Base-2503#key-features)\\r\\n\\r\\n## Benchmark Results\\r\\nWe compare Mistral-Small-3.2-24B to [Mistral-Small-3.1-24B-Instruct-2503](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Base-2503).\\r\\nFor more comparison against other models of similar size, please check [Mistral-Small-3.1's Benchmarks'](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Base-2503#benchmark-results)\\r\\n\\r\\n### Text \\r\\n#### Instruction Following / Chat / Tone\\r\\n| Model | Wildbench v2 | Arena Hard v2 | IF (Internal; accuracy) |\\r\\n|-------|---------------|---------------|------------------------|\\r\\n| Small 3.1 24B Instruct | 55.6% | 19.56% | 82.75% |\\r\\n| **Small 3.2 24B Instruct** | **65.33%** | **43.1%** | **84.78%** |\\r\\n\\r\\n#### Infinite Generations\\r\\nSmall 3.2 reduces infinite generations by 2x on challenging, long and repetitive prompts.\\r\\n| Model | Infinite Generations (Internal; Lower is better) |\\r\\n|-------|-------|\\r\\n| Small 3.1 24B Instruct | 2.11% |\\r\\n| **Small 3.2 24B Instruct** | **1.29%** |\\r\\n\\r\\n#### STEM\\r\\n| Model | MMLU | MMLU Pro (5-shot CoT) | MATH | GPQA Main (5-shot CoT) | GPQA Diamond (5-shot CoT) | MBPP Plus - Pass@5 | HumanEval Plus - Pass@5 | SimpleQA (TotalAcc) |\\r\\n|-------|------|---------------------|------|------------------------|---------------------------|-------------------|-------------------------|-------------------|\\r\\n| Small 3.1 24B Instruct | 80.62% | 66.76% | 69.30% | 44.42% | 45.96% | 74.63% | 88.99% | 10.43% |\\r\\n| **Small 3.2 24B Instruct** | 80.50% | **69.06%** | 69.42% | 44.22% | 46.13% | **78.33%** | **92.90%** | **12.10%** |\\r\\n\\r\\n### Vision\\r\\n| Model | MMMU | Mathvista | ChartQA | DocVQA | AI2D |\\r\\n|-------|------|-----------|---------|--------|------|\\r\\n| Small 3.1 24B Instruct | **64.00%** | **68.91%** | 86.24% | 94.08% | 93.72% |\\r\\n| **Small 3.2 24B Instruct** | 62.50% | 67.09% | **87.4%** | 94.86% | 92.91% |\\r\\n\\r\\n## Usage\\r\\nThe model can be used with the following frameworks:\\r\\n- [`vllm (recommended)`](https://github.com/vllm-project/vllm)\\r\\n- [`transformers`](https://github.com/huggingface/transformers)\\r\\n\\r\\n**Note 1**: We recommend using a relatively low temperature, such as `temperature=0.15`.\\r\\n**Note 2**: Add a system prompt from [SYSTEM_PROMPT.txt](https://huggingface.co/mistralai/Mistral-Small-3.2-24B-Instruct-2506/blob/main/SYSTEM_PROMPT.txt) for best results.\\r\\n\\r\\n### vLLM (recommended)\\r\\n#### Installation\\r\\n```\\r\\npip install vllm --upgrade\\r\\n```\\r\\nCheck installation:\\r\\n```\\r\\npython -c \\\"import mistral_common; print(mistral_common.__version__)\\\"\\r\\n```\\r\\n#### Serve\\r\\n```\\r\\nvllm serve mistralai/Mistral-Small-3.2-24B-Instruct-2506 --tokenizer_mode mistral --config_format mistral --load_format mistral --tool-call-parser mistral --enable-auto-tool-choice --limit_mm_per_prompt 'image=10' --tensor-parallel-size 2\\r\\n```\\r\\nRequires ~55 GB GPU RAM in bf16/fp16.\\r\\n\\r\\n#### Function Calling, Vision Reasoning & Instruction Following\\r\\nSupports multi-modal reasoning, function/tool calls, and precise instruction following using vLLM API or Transformers. See examples in original README.\\r\\n\\r\\n### Transformers\\r\\nInstall:\\r\\n```\\r\\npip install mistral-common --upgrade\\r\\n```\\r\\nUse `MistralTokenizer` and `Mistral3ForConditionalGeneration` with the system prompt and optional images for reasoning. Multi-modal inputs and outputs supported. Refer to Python snippets for examples of instruction following, vision reasoning, and function calls.\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"https://huggingface.co/unsloth/Mistral-Small-3.2-24B-Instruct-2506-GGUF/resolve/main/Mistral-Small-3.2-24B-Instruct-2506-Q4_K_M.gguf\",\n      \"memory\": 14300000000,\n      \"sha256\": \"a3cc56310807ed0d145eaf9f018ccda9ae7ad8edb41ec870aa2454b0d4700b3c\",\n      \"backend\": \"llama-cpp\",\n      \"properties\": {\n        \"jinja\": \"true\"\n      }\n    },\n    {\n      \"id\": \"hf.openai.gpt-oss-20b\",\n      \"name\": \"openai/gpt-oss-20b (Unsloth quantization)\",\n      \"description\": \"\\r\\n# Welcome to the gpt-oss series, [OpenAI’s open-weight models](https://openai.com/open-models) designed for powerful reasoning, agentic tasks, and versatile developer use cases.\\r\\n\\r\\nWe’re releasing two flavors of the open models:\\r\\n- `gpt-oss-120b` — for production, general purpose, high reasoning use cases that fits into a single H100 GPU (117B parameters with 5.1B active parameters)\\r\\n- `gpt-oss-20b` — for lower latency, and local or specialized use cases (21B parameters with 3.6B active parameters)\\r\\n\\r\\nBoth models were trained on our [harmony response format](https://github.com/openai/harmony) and should only be used with the harmony format as it will not work correctly otherwise.\\r\\n\\r\\n> [!NOTE]\\r\\n> This model card is dedicated to the smaller `gpt-oss-20b` model. Check out [`gpt-oss-120b`](https://huggingface.co/openai/gpt-oss-120b) for the larger model.\\r\\n\\r\\n# Highlights\\r\\n\\r\\n* **Permissive Apache 2.0 license:** Build freely without copyleft restrictions or patent risk—ideal for experimentation, customization, and commercial deployment.\\r\\n* **Configurable reasoning effort:** Easily adjust the reasoning effort (low, medium, high) based on your specific use case and latency needs.\\r\\n* **Full chain-of-thought:** Gain complete access to the model’s reasoning process, facilitating easier debugging and increased trust in outputs. It’s not intended to be shown to end users.\\r\\n* **Fine-tunable:** Fully customize models to your specific use case through parameter fine-tuning.\\r\\n* **Agentic capabilities:** Use the models’ native capabilities for function calling, [web browsing](https://github.com/openai/gpt-oss/tree/main?tab=readme-ov-file#browser), [Python code execution](https://github.com/openai/gpt-oss/tree/main?tab=readme-ov-file#python), and Structured Outputs.\\r\\n* **Native MXFP4 quantization:** The models are trained with native MXFP4 precision for the MoE layer, making `gpt-oss-120b` run on a single H100 GPU and the `gpt-oss-20b` model run within 16GB of memory.\\r\\n\\r\\n---\\r\\n\\r\\n# Inference examples\\r\\n\\r\\n## Transformers\\r\\nYou can use `gpt-oss-120b` and `gpt-oss-20b` with Transformers. If you use the Transformers chat template, it will automatically apply the [harmony response format](https://github.com/openai/harmony). If you use `model.generate` directly, you need to apply the harmony format manually using the chat template or use our [openai-harmony](https://github.com/openai/harmony) package.\\r\\n\\r\\nTo get started, install the necessary dependencies:\\r\\n```\\r\\npip install -U transformers kernels torch \\r\\n```\\r\\n\\r\\n```py\\r\\nfrom transformers import pipeline\\r\\nimport torch\\r\\n\\r\\nmodel_id = \\\"openai/gpt-oss-20b\\\"\\r\\n\\r\\npipe = pipeline(\\r\\n    \\\"text-generation\\\",\\r\\n    model=model_id,\\r\\n    torch_dtype=\\\"auto\\\",\\r\\n    device_map=\\\"auto\\\",\\r\\n)\\r\\n\\r\\nmessages = [\\r\\n    {\\\"role\\\": \\\"user\\\", \\\"content\\\": \\\"Explain quantum mechanics clearly and concisely.\\\"},\\r\\n]\\r\\n\\r\\noutputs = pipe(\\r\\n    messages,\\r\\n    max_new_tokens=256,\\r\\n)\\r\\nprint(outputs[0][\\\"generated_text\\\"][-1])\\r\\n```\\r\\n\\r\\n## vLLM\\r\\nvLLM recommends using [uv](https://docs.astral.sh/uv/) for Python dependency management. You can spin up an OpenAI-compatible webserver:\\r\\n```\\r\\nuv pip install --pre vllm==0.10.1+gptoss \\\\\\r\\n    --extra-index-url https://wheels.vllm.ai/gpt-oss/ \\\\\\r\\n    --extra-index-url https://download.pytorch.org/whl/nightly/cu128 \\\\\\r\\n    --index-strategy unsafe-best-match\\r\\n\\r\\nvllm serve openai/gpt-oss-20b\\r\\n```\\r\\n\\r\\n## PyTorch / Triton\\r\\nSee [reference implementations](https://github.com/openai/gpt-oss?tab=readme-ov-file#reference-pytorch-implementation).\\r\\n\\r\\n## Ollama\\r\\n```bash\\r\\n# gpt-oss-20b\\r\\nollama pull gpt-oss:20b\\r\\nollama run gpt-oss:20b\\r\\n```\\r\\n\\r\\n## LM Studio\\r\\n```bash\\r\\n# gpt-oss-20b\\r\\nlms get openai/gpt-oss-20b\\r\\n```\\r\\n\\r\\n# Download the model\\r\\n```bash\\r\\n# gpt-oss-20b\\r\\nhuggingface-cli download openai/gpt-oss-20b --include \\\"original/*\\\" --local-dir gpt-oss-20b/\\npip install gpt-oss\\npython -m gpt_oss.chat model/\\r\\n```\\r\\n\\r\\n# Reasoning levels\\r\\n* **Low:** Fast responses for general dialogue.\\r\\n* **Medium:** Balanced speed and detail.\\r\\n* **High:** Deep and detailed analysis.\\r\\n\\r\\n# Tool use\\r\\n* Web browsing (built-in tools)\\r\\n* Function calling with schemas\\r\\n* Agentic operations\\r\\n\\r\\n# Fine-tuning\\r\\nThe smaller model `gpt-oss-20b` can be fine-tuned on consumer hardware, larger `gpt-oss-120b` can be fine-tuned on a single H100 node.\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"https://huggingface.co/unsloth/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-Q4_K_M.gguf\",\n      \"memory\": 11600000000,\n      \"sha256\": \"c27536640e410032865dc68781d80a08b98f8db5e93575919af8ccc0568aeb4f\",\n      \"backend\": \"llama-cpp\"\n    },\n    {\n      \"id\": \"hf.qwen.qwen3-4b-GGUF\",\n      \"name\": \"qwen/qwen3-4b-GGUF\",\n      \"description\": \"\\r\\n# Qwen3-4B-GGUF\\r\\n<a href=\\\"https:\\/\\/chat.qwen.ai\\/\\\" target=\\\"_blank\\\" style=\\\"margin: 2px;\\\">\\r\\n    <img alt=\\\"Chat\\\" src=\\\"https:\\/\\/img.shields.io\\/badge\\/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5\\\" style=\\\"display: inline-block; vertical-align: middle;\\\"\\/>\\r\\n<\\/a>\\r\\n\\r\\n## Qwen3 Highlights\\r\\n\\r\\nQwen3 is the latest generation of large language models in Qwen series, offering a comprehensive suite of dense and mixture-of-experts (MoE) models. Built upon extensive training, Qwen3 delivers groundbreaking advancements in reasoning, instruction-following, agent capabilities, and multilingual support, with the following key features:\\r\\n\\r\\n- **Uniquely support of seamless switching between thinking mode** (for complex logical reasoning, math, and coding) and **non-thinking mode** (for efficient, general-purpose dialogue) **within single model**, ensuring optimal performance across various scenarios.\\r\\n- **Significantly enhancement in its reasoning capabilities**, surpassing previous QwQ (in thinking mode) and Qwen2.5 instruct models (in non-thinking mode) on mathematics, code generation, and commonsense logical reasoning.\\r\\n- **Superior human preference alignment**, excelling in creative writing, role-playing, multi-turn dialogues, and instruction following, to deliver a more natural, engaging, and immersive conversational experience.\\r\\n- **Expertise in agent capabilities**, enabling precise integration with external tools in both thinking and unthinking modes and achieving leading performance among open-source models in complex agent-based tasks.\\r\\n- **Support of 100+ languages and dialects** with strong capabilities for **multilingual instruction following** and **translation**.\\r\\n\\r\\n\\r\\n## Model Overview\\r\\n\\r\\n**Qwen3-4B** has the following features:\\r\\n- Type: Causal Language Models\\r\\n- Training Stage: Pretraining & Post-training\\r\\n- Number of Parameters: 4.0B\\r\\n- Number of Paramaters (Non-Embedding): 3.6B\\r\\n- Number of Layers: 36\\r\\n- Number of Attention Heads (GQA): 32 for Q and 8 for KV\\r\\n- Context Length: 32,768 natively and [131,072 tokens with YaRN](#processing-long-texts).\\r\\n\\r\\n- Quantization: q4_K_M, q5_0, q5_K_M, q6_K, q8_0\\r\\n\\r\\nFor more details, including benchmark evaluation, hardware requirements, and inference performance, please refer to our [blog](https:\\/\\/qwenlm.github.io\\/blog\\/qwen3\\/), [GitHub](https:\\/\\/github.com\\/QwenLM\\/Qwen3), and [Documentation](https:\\/\\/qwen.readthedocs.io\\/en\\/latest\\/).\\r\\n\\r\\n## Quickstart\\r\\n\\r\\n### llama.cpp\\r\\n\\r\\nCheck out our [llama.cpp documentation](https:\\/\\/qwen.readthedocs.io\\/en\\/latest\\/run_locally\\/llama.cpp.html) for more usage guide.\\r\\n\\r\\nWe advise you to clone [`llama.cpp`](https:\\/\\/github.com\\/ggerganov\\/llama.cpp) and install it following the official guide. We follow the latest version of llama.cpp. \\r\\nIn the following demonstration, we assume that you are running commands under the repository `llama.cpp`.\\r\\n\\r\\n```shell\\r\\n.\\/llama-cli -hf Qwen\\/Qwen3-4B-GGUF:Q8_0 --jinja --color -ngl 99 -fa -sm row --temp 0.6 --top-k 20 --top-p 0.95 --min-p 0 --presence-penalty 1.5 -c 40960 -n 32768 --no-context-shift\\r\\n```\\r\\n\\r\\n### ollama\\r\\n\\r\\nCheck out our [ollama documentation](https:\\/\\/qwen.readthedocs.io\\/en\\/latest\\/run_locally\\/ollama.html) for more usage guide.\\r\\n\\r\\nYou can run Qwen3 with one command:\\r\\n\\r\\n```shell\\r\\nollama run hf.co\\/Qwen\\/Qwen3-4B-GGUF:Q8_0\\r\\n```\\r\\n\\r\\n## Switching Between Thinking and Non-Thinking Mode\\r\\n\\r\\nYou can add `\\/think` and `\\/no_think` to user prompts or system messages to switch the model's thinking mode from turn to turn. The model will follow the most recent instruction in multi-turn conversations.\\r\\n\\r\\nHere is an example of multi-turn conversation:\\r\\n\\r\\n```\\r\\n> Who are you \\/no_think\\r\\n\\r\\n<think>\\r\\n\\r\\n<\\/think>\\r\\n\\r\\nI am Qwen, a large-scale language model developed by Alibaba Cloud. [...]\\r\\n\\r\\n> How many 'r's are in 'strawberries'? \\/think\\r\\n\\r\\n<think>\\r\\nOkay, let's see. The user is asking how many times the letter 'r' appears in the word \\\"strawberries\\\". [...]\\r\\n<\\/think>\\r\\n\\r\\nThe word strawberries contains 3 instances of the letter r. [...]\\r\\n```\\r\\n\\r\\n\\r\\n## Processing Long Texts\\r\\n\\r\\nQwen3 natively supports context lengths of up to 32,768 tokens. For conversations where the total length (including both input and output) significantly exceeds this limit, we recommend using RoPE scaling techniques to handle long texts effectively. We have validated the model's performance on context lengths of up to 131,072 tokens using the [YaRN](https:\\/\\/arxiv.org\\/abs\\/2309.00071) method.\\r\\n\\r\\nTo enable YARN in ``llama.cpp``:\\r\\n\\r\\n```shell\\r\\n.\\/llama-cli ... -c 131072 --rope-scaling yarn --rope-scale 4 --yarn-orig-ctx 32768\\r\\n```\\r\\n\\r\\n> [!NOTE]\\r\\n> All the notable open-source frameworks implement static YaRN, which means the scaling factor remains constant regardless of input length, **potentially impacting performance on shorter texts.**\\r\\n> We advise adding the `rope_scaling` configuration only when processing long contexts is required. \\r\\n> It is also recommended to modify the `factor` as needed. For example, if the typical context length for your application is 65,536 tokens, it would be better to set `factor` as 2.0. \\r\\n\\r\\n> [!TIP]\\r\\n> The endpoint provided by Alibaba Model Studio supports dynamic YaRN by default and no extra configuration is needed.\\r\\n\\r\\n\\r\\n## Best Practices\\r\\n\\r\\nTo achieve optimal performance, we recommend the following settings:\\r\\n\\r\\n1. **Sampling Parameters**:\\r\\n   - For thinking mode (`enable_thinking=True`), use `Temperature=0.6`, `TopP=0.95`, `TopK=20`, `MinP=0`, and `PresencePenalty=1.5`. **DO NOT use greedy decoding**, as it can lead to performance degradation and endless repetitions.\\r\\n   - For non-thinking mode (`enable_thinking=False`), we suggest using `Temperature=0.7`, `TopP=0.8`, `TopK=20`, `MinP=0`, and `PresencePenalty=1.5`.\\r\\n   - **We recommend setting `presence_penalty` to 1.5 for quantized models to suppress repetitive outputs.** You can adjust the `presence_penalty` parameter between 0 and 2. A higher value may occasionally lead to language mixing and a slight reduction in model performance. \\r\\n\\r\\n2. **Adequate Output Length**: We recommend using an output length of 32,768 tokens for most queries. For benchmarking on highly complex problems, such as those found in math and programming competitions, we suggest setting the max output length to 38,912 tokens. This provides the model with sufficient space to generate detailed and comprehensive responses, thereby enhancing its overall performance.\\r\\n\\r\\n3. **Standardize Output Format**: We recommend using prompts to standardize model outputs when benchmarking.\\r\\n   - **Math Problems**: Include \\\"Please reason step by step, and put your final answer within \\\\boxed{}.\\\" in the prompt.\\r\\n   - **Multiple-Choice Questions**: Add the following JSON structure to the prompt to standardize responses: \\\"Please show your choice in the `answer` field with only the choice letter, e.g., `\\\"answer\\\": \\\"C\\\"`.\\\"\\r\\n\\r\\n4. **No Thinking Content in History**: In multi-turn conversations, the historical model output should only include the final output part and does not need to include the thinking content. It is implemented in the provided chat template in Jinja2. However, for frameworks that do not directly use the Jinja2 chat template, it is up to the developers to ensure that the best practice is followed.\\r\\n\\r\\n### Citation\\r\\n\\r\\nIf you find our work helpful, feel free to give us a cite.\\r\\n\\r\\n```\\r\\n@misc{qwen3technicalreport,\\r\\n      title={Qwen3 Technical Report}, \\r\\n      author={Qwen Team},\\r\\n      year={2025},\\r\\n      eprint={2505.09388},\\r\\n      archivePrefix={arXiv},\\r\\n      primaryClass={cs.CL},\\r\\n      url={https:\\/\\/arxiv.org\\/abs\\/2505.09388}, \\r\\n}\\r\\n```\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"https://huggingface.co/Qwen/Qwen3-4B-GGUF/resolve/main/Qwen3-4B-Q4_K_M.gguf\",\n      \"sha256\": \"7485fe6f11af29433bc51cab58009521f205840f5b4ae3a32fa7f92e8534fdf5\",\n      \"backend\": \"llama-cpp\"\n    },\n    {\n      \"id\": \"hf.unsloth.qwen3-4b-thinking-GGUF\",\n      \"name\": \"qwen/Qwen3-4B-Thinking-2507-GGUF (Unsloth quantization)\",\n      \"description\": \"---\\nlibrary_name: transformers\\nlicense: apache-2.0\\nlicense_link: https://huggingface.co/Qwen/Qwen3-4B-Thinking-2507/blob/main/LICENSE\\nbase_model:\\n- Qwen/Qwen3-4B-Thinking-2507\\ntags:\\n- qwen\\n- qwen3\\n- unsloth\\n---\\n<div>\\n  <p style=\\\"margin-bottom: 0; margin-top: 0;\\\">\\n    <strong>See <a href=\\\"https://huggingface.co/collections/unsloth/qwen3-680edabfb790c8c34a242f95\\\">our collection</a> for all versions of Qwen3 including GGUF, 4-bit & 16-bit formats.</strong>\\n  </p>\\n  <p style=\\\"margin-bottom: 0;\\\">\\n    <em>Learn to run Qwen3-2507 correctly - <a href=\\\"https://docs.unsloth.ai/basics/qwen3-2507\\\">Read our Guide</a>.</em>\\n  </p>\\n<p style=\\\"margin-top: 0;margin-bottom: 0;\\\">\\n    <em><a href=\\\"https://docs.unsloth.ai/basics/unsloth-dynamic-v2.0-gguf\\\">Unsloth Dynamic 2.0</a> achieves superior accuracy & outperforms other leading quants.</em>\\n  </p>\\n  <div style=\\\"display: flex; gap: 5px; align-items: center; \\\">\\n    <a href=\\\"https://github.com/unslothai/unsloth/\\\">\\n      <img src=\\\"https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png\\\" width=\\\"133\\\">\\n    </a>\\n    <a href=\\\"https://discord.gg/unsloth\\\">\\n      <img src=\\\"https://github.com/unslothai/unsloth/raw/main/images/Discord%20button.png\\\" width=\\\"173\\\">\\n    </a>\\n    <a href=\\\"https://docs.unsloth.ai/basics/qwen3-2507\\\">\\n      <img src=\\\"https://raw.githubusercontent.com/unslothai/unsloth/refs/heads/main/images/documentation%20green%20button.png\\\" width=\\\"143\\\">\\n    </a>\\n  </div>\\n<h1 style=\\\"margin-top: 0rem;\\\">✨ Read our Qwen3-2507 Guide <a href=\\\"https://docs.unsloth.ai/basics/qwen3-2507\\\">here</a>!</h1>\\n</div>\\n\\n- Fine-tune Qwen3 (14B) for free using our Google [Colab notebook here](https://docs.unsloth.ai/get-started/unsloth-notebooks)!\\n- Read our Blog about Qwen3 support: [unsloth.ai/blog/qwen3](https://unsloth.ai/blog/qwen3)\\n- View the rest of our notebooks in our [docs here](https://docs.unsloth.ai/get-started/unsloth-notebooks).\\n- Run & export your fine-tuned model to Ollama, llama.cpp or HF.\\n\\n| Unsloth supports          |    Free Notebooks                                                                                           | Performance | Memory use |\\n|-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------|\\n| **Qwen3 (14B)**      | [▶\\uFE0F Start on Colab](https://docs.unsloth.ai/get-started/unsloth-notebooks)               | 3x faster | 70% less |\\n| **GRPO with Qwen3 (8B)**      | [▶\\uFE0F Start on Colab](https://docs.unsloth.ai/get-started/unsloth-notebooks)               | 3x faster | 80% less |\\n| **Llama-3.2 (3B)**      | [▶\\uFE0F Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb)               | 2.4x faster | 58% less |\\n| **Llama-3.2 (11B vision)**      | [▶\\uFE0F Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb)               | 2x faster | 60% less |\\n| **Qwen2.5 (7B)**      | [▶\\uFE0F Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2.5_(7B)-Alpaca.ipynb)               | 2x faster | 60% less |\\n\\n# Qwen3-4B-Thinking-2507\\n<a href=\\\"https://chat.qwen.ai/\\\" target=\\\"_blank\\\" style=\\\"margin: 2px;\\\">\\n    <img alt=\\\"Chat\\\" src=\\\"https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5\\\" style=\\\"display: inline-block; vertical-align: middle;\\\"/>\\n</a>\\n\\n## Highlights\\n\\nOver the past three months, we have continued to scale the **thinking capability** of Qwen3-4B, improving both the **quality and depth** of reasoning. We are pleased to introduce **Qwen3-4B-Thinking-2507**, featuring the following key enhancements:\\n\\n- **Significantly improved performance** on reasoning tasks, including logical reasoning, mathematics, science, coding, and academic benchmarks that typically require human expertise.\\n- **Markedly better general capabilities**, such as instruction following, tool usage, text generation, and alignment with human preferences.\\n- **Enhanced 256K long-context understanding** capabilities.\\n\\n**NOTE**: This version has an increased thinking length. We strongly recommend its use in highly complex reasoning tasks.\\n\\n![image/jpeg](https://qianwen-res.oss-accelerate.aliyuncs.com/Qwen3-2507/Qwen3-4B-Instruct.001.jpeg)\\n\\n## Model Overview\\n\\n**Qwen3-4B-Thinking-2507** has the following features:\\n- Type: Causal Language Models\\n- Training Stage: Pretraining & Post-training\\n- Number of Parameters: 4.0B\\n- Number of Paramaters (Non-Embedding): 3.6B\\n- Number of Layers: 36\\n- Number of Attention Heads (GQA): 32 for Q and 8 for KV\\n- Context Length: **262,144 natively**. \\n\\n**NOTE: This model supports only thinking mode. Meanwhile, specifying `enable_thinking=True` is no longer required.**\\n\\nAdditionally, to enforce model thinking, the default chat template automatically includes `<think>`. Therefore, it is normal for the model's output to contain only `</think>` without an explicit opening `<think>` tag.\\n\\nFor more details, including benchmark evaluation, hardware requirements, and inference performance, please refer to our [blog](https://qwenlm.github.io/blog/qwen3/), [GitHub](https://github.com/QwenLM/Qwen3), and [Documentation](https://qwen.readthedocs.io/en/latest/).\\n\\n\\n## Performance\\n\\n\\n|  | Qwen3-30B-A3B Thinking | Qwen3-4B Thinking | Qwen3-4B-Thinking-2507 |\\n|--- | --- | --- | --- |\\n| **Knowledge** | | |\\n| MMLU-Pro | **78.5** | 70.4 | 74.0 |\\n| MMLU-Redux | **89.5** | 83.7 | 86.1 |\\n| GPQA | **65.8** | 55.9 | **65.8** |\\n| SuperGPQA | **51.8** | 42.7 | 47.8 |\\n| **Reasoning** | | |\\n| AIME25 | 70.9 | 65.6 | **81.3** |\\n| HMMT25 | 49.8 | 42.1 | **55.5** |\\n| LiveBench 20241125 | **74.3** | 63.6 | 71.8 |\\n| **Coding** | | |\\n| LiveCodeBench v6 (25.02-25.05) | **57.4** | 48.4 | 55.2 |\\n| CFEval | **1940** | 1671 | 1852 |\\n| OJBench | **20.7** | 16.1 | 17.9 |\\n| **Alignment** | | |\\n| IFEval | 86.5 | 81.9 | **87.4** |\\n| Arena-Hard v2$ | **36.3** | 13.7 | 34.9 |\\n| Creative Writing v3 | **79.1** | 61.1 | 75.6 |\\n| WritingBench | 77.0 | 73.5 | **83.3** |\\n| **Agent** | | |\\n| BFCL-v3 | 69.1 | 65.9 | **71.2** |\\n| TAU1-Retail | 61.7 | 33.9 | **66.1** |\\n| TAU1-Airline | 32.0 | 32.0 | **48.0** |\\n| TAU2-Retail | 34.2 | 38.6 | **53.5** |\\n| TAU2-Airline | 36.0 | 28.0 | **58.0** |\\n| TAU2-Telecom | 22.8 | 17.5 | **27.2** |\\n| **Multilingualism** | | |\\n| MultiIF | 72.2 | 66.3 | **77.3** |\\n| MMLU-ProX | **73.1** | 61.0 | 64.2 |\\n| INCLUDE | **71.9** | 61.8 | 64.4 |\\n| PolyMATH | 46.1 | 40.0 | **46.2** |\\n\\n$ For reproducibility, we report the win rates evaluated by GPT-4.1.\\n\\n\\\\& For highly challenging tasks (including PolyMATH and all reasoning and coding tasks), we use an output length of 81,920 tokens. For all other tasks, we set the output length to 32,768.\\n\\n## Quickstart\\n\\nThe code of Qwen3 has been in the latest Hugging Face `transformers` and we advise you to use the latest version of `transformers`.\\n\\nWith `transformers<4.51.0`, you will encounter the following error:\\n```\\nKeyError: 'qwen3'\\n```\\n\\nThe following contains a code snippet illustrating how to use the model generate content based on given inputs. \\n```python\\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\\n\\nmodel_name = \\\"Qwen/Qwen3-4B-Thinking-2507\\\"\\n\\n# load the tokenizer and the model\\ntokenizer = AutoTokenizer.from_pretrained(model_name)\\nmodel = AutoModelForCausalLM.from_pretrained(\\n    model_name,\\n    torch_dtype=\\\"auto\\\",\\n    device_map=\\\"auto\\\"\\n)\\n\\n# prepare the model input\\nprompt = \\\"Give me a short introduction to large language model.\\\"\\nmessages = [\\n    {\\\"role\\\": \\\"user\\\", \\\"content\\\": prompt}\\n]\\ntext = tokenizer.apply_chat_template(\\n    messages,\\n    tokenize=False,\\n    add_generation_prompt=True,\\n)\\nmodel_inputs = tokenizer([text], return_tensors=\\\"pt\\\").to(model.device)\\n\\n# conduct text completion\\ngenerated_ids = model.generate(\\n    **model_inputs,\\n    max_new_tokens=32768\\n)\\noutput_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() \\n\\n# parsing thinking content\\ntry:\\n    # rindex finding 151668 (</think>)\\n    index = len(output_ids) - output_ids[::-1].index(151668)\\nexcept ValueError:\\n    index = 0\\n\\nthinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip(\\\"\\\\n\\\")\\ncontent = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip(\\\"\\\\n\\\")\\n\\nprint(\\\"thinking content:\\\", thinking_content) # no opening <think> tag\\nprint(\\\"content:\\\", content)\\n\\n```\\n\\nFor deployment, you can use `sglang>=0.4.6.post1` or `vllm>=0.8.5` or to create an OpenAI-compatible API endpoint:\\n- SGLang:\\n    ```shell\\n    python -m sglang.launch_server --model-path Qwen/Qwen3-4B-Thinking-2507 --context-length 262144  --reasoning-parser deepseek-r1\\n    ```\\n- vLLM:\\n    ```shell\\n    vllm serve Qwen/Qwen3-4B-Thinking-2507 --max-model-len 262144 --enable-reasoning --reasoning-parser deepseek_r1\\n    ```\\n\\n**Note: If you encounter out-of-memory (OOM) issues, you may consider reducing the context length to a smaller value. However, since the model may require longer token sequences for reasoning, we strongly recommend using a context length greater than 131,072 when possible.**\\n\\nFor local use, applications such as Ollama, LMStudio, MLX-LM, llama.cpp, and KTransformers have also supported Qwen3.\\n\\n## Agentic Use\\n\\nQwen3 excels in tool calling capabilities. We recommend using [Qwen-Agent](https://github.com/QwenLM/Qwen-Agent) to make the best use of agentic ability of Qwen3. Qwen-Agent encapsulates tool-calling templates and tool-calling parsers internally, greatly reducing coding complexity.\\n\\nTo define the available tools, you can use the MCP configuration file, use the integrated tool of Qwen-Agent, or integrate other tools by yourself.\\n```python\\nfrom qwen_agent.agents import Assistant\\n\\n# Define LLM\\n# Using OpenAI-compatible API endpoint. It is recommended to disable the reasoning and the tool call parsing\\n# functionality of the deployment frameworks and let Qwen-Agent automate the related operations. For example, \\n# `VLLM_USE_MODELSCOPE=true vllm serve Qwen/Qwen3-4B-Thinking-2507 --served-model-name Qwen3-4B-Thinking-2507 --max-model-len 262144`.\\nllm_cfg = {\\n    'model': 'Qwen3-4B-Thinking-2507',\\n\\n    # Use a custom endpoint compatible with OpenAI API:\\n    'model_server': 'http://localhost:8000/v1',  # api_base without reasoning and tool call parsing\\n    'api_key': 'EMPTY',\\n    'generate_cfg': {\\n        'thought_in_content': True,\\n    },\\n}\\n\\n# Define Tools\\ntools = [\\n    {'mcpServers': {  # You can specify the MCP configuration file\\n            'time': {\\n                'command': 'uvx',\\n                'args': ['mcp-server-time', '--local-timezone=Asia/Shanghai']\\n            },\\n            \\\"fetch\\\": {\\n                \\\"command\\\": \\\"uvx\\\",\\n                \\\"args\\\": [\\\"mcp-server-fetch\\\"]\\n            }\\n        }\\n    },\\n  'code_interpreter',  # Built-in tools\\n]\\n\\n# Define Agent\\nbot = Assistant(llm=llm_cfg, function_list=tools)\\n\\n# Streaming generation\\nmessages = [{'role': 'user', 'content': 'https://qwenlm.github.io/blog/ Introduce the latest developments of Qwen'}]\\nfor responses in bot.run(messages=messages):\\n    pass\\nprint(responses)\\n```\\n\\n## Best Practices\\n\\nTo achieve optimal performance, we recommend the following settings:\\n\\n1. **Sampling Parameters**:\\n   - We suggest using `Temperature=0.6`, `TopP=0.95`, `TopK=20`, and `MinP=0`.\\n   - For supported frameworks, you can adjust the `presence_penalty` parameter between 0 and 2 to reduce endless repetitions. However, using a higher value may occasionally result in language mixing and a slight decrease in model performance.\\n\\n2. **Adequate Output Length**: We recommend using an output length of 32,768 tokens for most queries. For benchmarking on highly complex problems, such as those found in math and programming competitions, we suggest setting the max output length to 81,920 tokens. This provides the model with sufficient space to generate detailed and comprehensive responses, thereby enhancing its overall performance.\\n\\n3. **Standardize Output Format**: We recommend using prompts to standardize model outputs when benchmarking.\\n   - **Math Problems**: Include \\\"Please reason step by step, and put your final answer within \\\\boxed{}.\\\" in the prompt.\\n   - **Multiple-Choice Questions**: Add the following JSON structure to the prompt to standardize responses: \\\"Please show your choice in the `answer` field with only the choice letter, e.g., `\\\"answer\\\": \\\"C\\\"`.\\\"\\n\\n4. **No Thinking Content in History**: In multi-turn conversations, the historical model output should only include the final output part and does not need to include the thinking content. It is implemented in the provided chat template in Jinja2. However, for frameworks that do not directly use the Jinja2 chat template, it is up to the developers to ensure that the best practice is followed.\\n\\n\\n### Citation\\n\\nIf you find our work helpful, feel free to give us a cite.\\n\\n```\\n@misc{qwen3technicalreport,\\n      title={Qwen3 Technical Report}, \\n      author={Qwen Team},\\n      year={2025},\\n      eprint={2505.09388},\\n      archivePrefix={arXiv},\\n      primaryClass={cs.CL},\\n      url={https://arxiv.org/abs/2505.09388}, \\n}\\n```\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"https://huggingface.co/unsloth/Qwen3-4B-Thinking-2507-GGUF/resolve/main/Qwen3-4B-Thinking-2507-Q4_K_M.gguf\",\n      \"backend\": \"llama-cpp\",\n      \"sha256\": \"ddd52e18200baab281c5c46f70d544ce4d4fe4846eab1608f2fff48a64554212\",\n      \"properties\": {\n        \"jinja\": \"true\"\n      }\n    },\n    {\n      \"id\": \"hf.ibm-granite.granite-4.0-tiny-GGUF\",\n      \"name\": \"ibm-granite/granite-4.0-tiny-GGUF\",\n      \"description\": \"# Granite-4.0-H-Tiny\\n\\n**Model Summary:**\\nGranite-4.0-H-Tiny is a 7B parameter long-context instruct model finetuned from *Granite-4.0-H-Tiny-Base* using a combination of open source instruction datasets with permissive license and internally collected synthetic datasets. This model is developed using a diverse set of techniques with a structured chat format, including supervised finetuning, model alignment using reinforcement learning, and model merging. Granite 4.0 instruct models feature improved *instruction following (IF)* and *tool-calling* capabilities, making them more effective in enterprise applications.\\n\\n- **Developers:** Granite Team, IBM\\n- **HF Collection:** [Granite 4.0 Language Models HF Collection](https://huggingface.co/collections/ibm-granite/granite-40-language-models-6811a18b820ef362d9e5a82c)\\n- **GitHub Repository:** [ibm-granite/granite-4.0-language-models](https://github.com/ibm-granite/granite-4.0-language-models)\\n- **Website**: [Granite Docs](https://www.ibm.com/granite/docs/) \\n- **Release Date**: October 2nd, 2025\\n- **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\\n\\n**Supported Languages:** \\nEnglish, German, Spanish, French, Japanese, Portuguese, Arabic, Czech, Italian, Korean, Dutch, and Chinese. Users may finetune Granite 4.0 models for languages beyond these languages.\\n\\n**Intended use:** \\nThe model is designed to respond to general instructions and can be used to build AI assistants for multiple domains, including business applications.\\n\\n*Capabilities*\\n* Summarization\\n* Text classification\\n* Text extraction\\n* Question-answering\\n* Retrieval Augmented Generation (RAG)\\n* Code related tasks\\n* Function-calling tasks\\n* Multilingual dialog use cases\\n* Fill-In-the-Middle (FIM) code completions\\n\\n<!-- <todo>Need to test the examples. (especially the tool calling and RAG ones)</todo>\\n -->\\n \\n**Generation:** \\nThis is a simple example of how to use Granite-4.0-H-Tiny model.\\n\\nInstall the following libraries:\\n\\n```shell\\npip install torch torchvision torchaudio\\npip install accelerate\\npip install transformers\\n```\\nThen, copy the snippet from the section that is relevant for your use case.\\n\\n```python\\nimport torch\\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\\n\\ndevice = \\\"cuda\\\"\\nmodel_path = \\\"ibm-granite/granite-4.0-h-tiny\\\"\\ntokenizer = AutoTokenizer.from_pretrained(model_path)\\n# drop device_map if running on CPU\\nmodel = AutoModelForCausalLM.from_pretrained(model_path, device_map=device)\\nmodel.eval()\\n# change input text as desired\\nchat = [\\n    { \\\"role\\\": \\\"user\\\", \\\"content\\\": \\\"Please list one IBM Research laboratory located in the United States. You should only output its name and location.\\\" },\\n]\\nchat = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)\\n# tokenize the text\\ninput_tokens = tokenizer(chat, return_tensors=\\\"pt\\\").to(device)\\n# generate output tokens\\noutput = model.generate(**input_tokens, \\n                        max_new_tokens=100)\\n# decode output tokens into text\\noutput = tokenizer.batch_decode(output)\\n# print output\\nprint(output[0])\\n```\\n\\nExpected output:\\n```shell\\n<|start_of_role|>user<|end_of_role|>Please list one IBM Research laboratory located in the United States. You should only output its name and location.<|end_of_text|>\\n<|start_of_role|>assistant<|end_of_role|>Almaden Research Center, San Jose, California<|end_of_text|>\\n```\\n\\n**Tool-calling:** \\nGranite-4.0-H-Tiny comes with enhanced tool calling capabilities, enabling seamless integration with external functions and APIs. To define a list of  tools please follow OpenAI's function [definition schema](https://platform.openai.com/docs/guides/function-calling?api-mode=responses#defining-functions). \\n\\nThis is an example of how to use Granite-4.0-H-Tiny model tool-calling ability:\\n\\n```python\\ntools = [\\n    {\\n        \\\"type\\\": \\\"function\\\",\\n        \\\"function\\\": {\\n            \\\"name\\\": \\\"get_current_weather\\\",\\n            \\\"description\\\": \\\"Get the current weather for a specified city.\\\",\\n            \\\"parameters\\\": {\\n                \\\"type\\\": \\\"object\\\",\\n                \\\"properties\\\": {\\n                    \\\"city\\\": {\\n                        \\\"type\\\": \\\"string\\\",\\n                        \\\"description\\\": \\\"Name of the city\\\"\\n                    }\\n                },\\n                \\\"required\\\": [\\\"city\\\"]\\n            }\\n        }\\n    }\\n]\\n\\n# change input text as desired\\nchat = [\\n    { \\\"role\\\": \\\"user\\\", \\\"content\\\": \\\"What's the weather like in Boston right now?\\\" },\\n]\\nchat = tokenizer.apply_chat_template(chat, \\\\\\n                                     tokenize=False, \\\\\\n                                     tools=tools, \\\\\\n                                     add_generation_prompt=True)\\n# tokenize the text\\ninput_tokens = tokenizer(chat, return_tensors=\\\"pt\\\").to(device)\\n# generate output tokens\\noutput = model.generate(**input_tokens, \\n                        max_new_tokens=100)\\n# decode output tokens into text\\noutput = tokenizer.batch_decode(output)\\n# print output\\nprint(output[0])\\n```\\n\\nExpected output:\\n```shell\\n<|start_of_role|>system<|end_of_role|>You are a helpful assistant with access to the following tools. You may call one or more tools to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\\n{\\\"type\\\": \\\"function\\\", \\\"function\\\": {\\\"name\\\": \\\"get_current_weather\\\", \\\"description\\\": \\\"Get the current weather for a specified city.\\\", \\\"parameters\\\": {\\\"type\\\": \\\"object\\\", \\\"properties\\\": {\\\"city\\\": {\\\"type\\\": \\\"string\\\", \\\"description\\\": \\\"Name of the city\\\"}}, \\\"required\\\": [\\\"city\\\"]}}}\\n</tools>\\n\\nFor each tool call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call>. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request.<|end_of_text|>\\n<|start_of_role|>user<|end_of_role|>What's the weather like in Boston right now?<|end_of_text|>\\n<|start_of_role|>assistant<|end_of_role|><tool_call>\\n{\\\"name\\\": \\\"get_current_weather\\\", \\\"arguments\\\": {\\\"city\\\": \\\"Boston\\\"}}\\n</tool_call><|end_of_text|>\\n```\\n\\n<!-- **Retrieval Augmented Generation:** \\n*Coming soon* -->\\n\\n**Evaluation Results:** \\n\\n<table>\\n<!--   <caption><b> All Results</b></caption> -->\\n<thead>\\n  <tr>\\n    <th style=\\\"text-align:left; background-color: #001d6c; color: white;\\\">Benchmarks</th>\\n    <th style=\\\"text-align:left; background-color: #001d6c; color: white;\\\">Metric</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">Micro Dense</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">H Micro Dense</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">H Tiny MoE</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">H Small MoE</th>\\n  </tr>\\n</thead>\\n  <tbody>\\n<tr>\\n  <td colspan=\\\"6\\\" style=\\\"text-align:center; background-color:  #FFFFFF; color: #2D2D2D; font-style:italic;\\\">\\n    General Tasks\\n  </td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">MMLU</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">5-shot</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">65.98</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">67.43</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">68.65</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">78.44</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">MMLU-Pro</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">5-shot, CoT</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">44.5</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">43.48</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">44.94</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">55.47</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">BBH</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">3-shot, CoT</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">72.48</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">69.36</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">66.34</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">81.62</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">AGI EVAL</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">0-shot, CoT</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">64.29</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">59</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">62.15</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">70.63</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">GPQA</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">0-shot, CoT</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">30.14</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">32.15</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">32.59</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">40.63</td>\\n</tr>\\n<tr>\\n  <td colspan=\\\"6\\\" style=\\\"text-align:center; background-color:  #FFFFFF; color: #2D2D2D; font-style:italic;\\\">\\n    Alignment Tasks\\n  </td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">AlpacaEval 2.0</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\"></td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">29.49</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">31.49</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">30.61</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">42.48</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">IFEval</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Instruct, Strict</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">85.5</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">86.94</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">84.78</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">89.87</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">IFEval</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Prompt, Strict</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">79.12</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">81.71</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">78.1</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">85.22</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">IFEval</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Average</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">82.31</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">84.32</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">81.44</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">87.55</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">ArenaHard</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\"></td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">25.84</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">36.15</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">35.75</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">46.48</td>\\n</tr>\\n<tr>\\n  <td colspan=\\\"6\\\" style=\\\"text-align:center; background-color:  #FFFFFF; color: #2D2D2D; font-style:italic;\\\">\\n    Math Tasks\\n  </td>\\n</tr>      \\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">GSM8K</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">8-shot</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">85.45</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">81.35</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">84.69</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">87.27</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">GSM8K Symbolic</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">8-shot</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">79.82</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">77.5</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">81.1</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">87.38</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Minerva Math</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">0-shot, CoT</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">62.06</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">66.44</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">69.64</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">74</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">DeepMind Math</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">0-shot, CoT</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">44.56</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">43.83</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">49.92</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">59.33</td>\\n</tr>\\n<tr>\\n  <td colspan=\\\"6\\\" style=\\\"text-align:center; background-color:  #FFFFFF; color: #2D2D2D; font-style:italic;\\\">\\n    Code Tasks\\n  </td>\\n</tr> \\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">HumanEval</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">pass@1</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">80</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">81</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">83</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">88</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">HumanEval+</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">pass@1</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">72</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">75</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">76</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">83</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">MBPP</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">pass@1</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">72</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">73</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">80</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">84</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">MBPP+</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">pass@1</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">64</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">64</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">69</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">71</td>\\n</tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">CRUXEval-O</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">pass@1</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">41.5</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">41.25</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">39.63</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">50.25</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">BigCodeBench</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">pass@1</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">39.21</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">37.9</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">41.06</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">46.23</td>\\n</tr>\\n<tr>\\n  <td colspan=\\\"6\\\" style=\\\"text-align:center; background-color:  #FFFFFF; color: #2D2D2D; font-style:italic;\\\">\\n    Tool Calling Tasks\\n  </td>\\n</tr> \\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">BFCL v3</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\"></td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">59.98</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">57.56</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">57.65</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">64.69</td>\\n</tr>\\n<tr>\\n  <td colspan=\\\"6\\\" style=\\\"text-align:center; background-color:  #FFFFFF; color: #2D2D2D; font-style:italic;\\\">\\n    Multilingual Tasks\\n  </td>\\n</tr> \\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">MULTIPLE</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">pass@1</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">49.21</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">49.46</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">55.83</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">57.37</td>\\n</tr> \\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">MMMLU</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">5-shot</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">55.14</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">55.19</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">61.87</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">69.69</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">INCLUDE</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">5-shot</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">51.62</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">50.51</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">53.12</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">63.97</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">MGSM</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">8-shot</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">28.56</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">44.48</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">45.36</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">38.72</td>\\n</tr>\\n<tr>\\n  <td colspan=\\\"6\\\" style=\\\"text-align:center; background-color:  #FFFFFF; color: #2D2D2D; font-style:italic;\\\">\\n    Safety\\n  </td>\\n</tr> \\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">SALAD-Bench</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\"></td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">97.06</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">96.28</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">97.77</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">97.3</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">AttaQ</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\"></td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">86.05</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">84.44</td>\\n    <td style=\\\"text-align:right; background-color: #DAE8FF; color: #2D2D2D;\\\">86.61</td>\\n    <td style=\\\"text-align:right; background-color: #FFFFFF; color: #2D2D2D;\\\">86.64</td>\\n</tr>\\n</tbody></table>\\n\\n\\n<table>\\n  <caption><b>Multilingual Benchmarks and thr included languages:</b></caption>\\n<thead>\\n  <tr>\\n    <th style=\\\"text-align:left; background-color: #001d6c; color: white;\\\">Benchmarks</th>\\n    <th style=\\\"text-align:left; background-color: #001d6c; color: white;\\\"># Langs</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">Languages</th>\\n  </tr>\\n</thead>\\n<tbody>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">MMMLU</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">11</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">ar, de, en, es, fr, ja, ko, pt, zh, bn, hi</td>\\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">INCLUDE</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">14</td>\\n<!--     <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">hindi, bengali, tamil, telugu, arabic, german, spanish, french, italian, japanese, korean, dutch, portuguese, chinese</td> -->\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">hi, bn, ta, te, ar, de, es, fr, it, ja, ko, nl, pt, zh</td>\\n    \\n</tr>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">MGSM</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">5</td>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">en, es, fr, ja, zh</td>\\n</tr>\\n</tbody>\\n</table>\\n\\n**Model Architecture:** \\nGranite-4.0-H-Tiny baseline is built on a decoder-only MoE transformer architecture. Core components of this architecture are: GQA, Mamba2, MoEs with shared experts, SwiGLU activation, RMSNorm, and shared input/output embeddings.\\n\\n<table>\\n<thead>\\n  <tr>\\n    <th style=\\\"text-align:left; background-color: #001d6c; color: white;\\\">Model</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">Micro Dense</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">H Micro Dense</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">H Tiny MoE</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">H Small MoE</th>\\n  </tr></thead>\\n<tbody>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Embedding size</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">2560</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">2048</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">1536</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">4096</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Number of layers</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">40 attention</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">4 attention / 36 Mamba2</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">4 attention / 36 Mamba2</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">4 attention / 36 Mamba2</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Attention head size</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">64</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">64</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">128</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">128</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Number of attention heads</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">40</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">32</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">12</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">32</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Number of KV heads</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">8</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">8</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">4</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">8</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Mamba2 state size</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">-</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">128</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">128</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">128</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Number of Mamba2 heads</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">-</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">64</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">48</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">128</td>\\n  </tr>\\n\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">MLP / Shared expert hidden size</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">8192</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">8192</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">1024</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">1536</td>\\n  </tr>\\n    \\n\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Num. Experts</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">-</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">-</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">64</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">72</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Num. active Experts</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">-</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">-</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">6</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">10</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Expert hidden size</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">-</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">-</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">512</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">768</td>\\n  </tr>\\n\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">MLP activation</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">SwiGLU</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">SwiGLU</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">SwiGLU</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">SwiGLU</td>\\n  </tr>\\n\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Sequence length</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">128K</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">128K</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">128K</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">128K</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\">Position embedding</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">RoPE</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">NoPE</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">NoPE</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">NoPE</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\"># Parameters</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">3B</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">3B</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">7B</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">32B</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: black;\\\"># Active parameters</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">3B</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">3B</td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">1B</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: black;\\\">9B</td>\\n  </tr>\\n</tbody></table>\\n\\n**Training Data:** \\nOverall, our SFT data is largely comprised of three key sources: (1) publicly available datasets with permissive license, (2) internal synthetic data targeting specific capabilities, and (3) a select set of human-curated data.\\n\\n**Infrastructure:**\\nWe trained the Granite 4.0 Language Models utilizing an NVIDIA GB200 NVL72 cluster hosted in CoreWeave. Intra-rack communication occurs via the 72-GPU NVLink domain, and a non-blocking, full Fat-Tree NDR 400 Gb/s InfiniBand network provides inter-rack communication. This cluster provides a scalable and efficient infrastructure for training our models over thousands of GPUs.\\n\\n**Ethical Considerations and Limitations:** \\nGranite 4.0 Instruction Models are primarily finetuned using instruction-response pairs mostly in English, but also multilingual data covering multiple languages. Although this model can handle multilingual dialog use cases, its performance might not be similar to English tasks. In such case, introducing a small number of examples (few-shot) can help the model in generating more accurate outputs. While this model has been aligned by keeping safety in consideration, the model may in some cases produce inaccurate, biased, or unsafe responses to user prompts. So we urge the community to use this model with proper safety testing and tuning tailored for their specific tasks.\\n\\n**Resources**\\n- ⭐\\uFE0F Learn about the latest updates with Granite: https://www.ibm.com/granite\\n- \\uD83D\\uDCC4 Get started with tutorials, best practices, and prompt engineering advice: https://www.ibm.com/granite/docs/\\n- \\uD83D\\uDCA1 Learn about the latest Granite learning resources: https://ibm.biz/granite-learning-resources\\n\\n<!-- ## Citation\\n```\\n@misc{granite-models,\\n  author = {author 1, author2, ...},\\n  title = {},\\n  journal = {},\\n  volume = {},\\n  year = {2024},\\n  url = {https://arxiv.org/abs/0000.00000},\\n}\\n``` -->\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"https://huggingface.co/ibm-granite/granite-4.0-h-tiny-GGUF/resolve/3971ea11968c34d4e4dbee55cfb55b9cba134b21/granite-4.0-h-tiny-Q4_K_M.gguf\",\n      \"memory\": 4224733676,\n      \"properties\": {\n        \"jinja\": \"true\"\n      },\n      \"sha256\": \"491ba81786c46a345a5da9a60cdb9f9a3056960c8411dd857153c194b1f91313\",\n      \"backend\": \"llama-cpp\"\n    },\n    {\n      \"id\": \"hf.ibm-granite.granite-4.0-micro-GGUF\",\n      \"name\": \"ibm-granite/granite-4.0-micro-GGUF\",\n      \"description\": \"# Granite-4.0-Micro\\n\\n**Model Summary:**\\nGranite-4.0-Micro is a compact language model from the Granite 4.0 family designed for efficient deployment with strong performance. This Q4_K_M quantized GGUF version provides a good balance between model size and quality, making it suitable for resource-constrained environments while maintaining the core capabilities of the Granite 4.0 series.\\n\\n- **Developers:** Granite Team, IBM\\n- **HF Collection:** [Granite 4.0 Language Models HF Collection](https://huggingface.co/collections/ibm-granite/granite-40-language-models-6811a18b820ef362d9e5a82c)\\n- **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\\n\\n**Capabilities:**\\n* General instruction following\\n* Question-answering\\n* Text generation\\n* Conversational AI\\n* Multilingual dialog use cases\\n\\n**Intended Use:**\\nThe model is designed to respond to general instructions and can be used to build AI assistants for multiple domains, particularly in scenarios where model size and inference speed are important considerations.\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"https://huggingface.co/ibm-granite/granite-4.0-micro-GGUF/resolve/397e2dcbd97dcdfa016934bffed65cf5df3ca55f/granite-4.0-micro-Q4_K_M.gguf\",\n      \"memory\": 2100000000,\n      \"properties\": {\n        \"jinja\": \"true\"\n      },\n      \"sha256\": \"6c02683809a8dc4eb05c78d44bc63bcd707703b078998fa58829c858ab337bb0\",\n      \"backend\": \"llama-cpp\"\n    },\n    {\n      \"id\": \"hf.ibm-granite.granite-3.3-8b-instruct-GGUF\",\n      \"name\": \"ibm-granite/granite-3.3-8b-instruct-GGUF\",\n      \"description\": \"# Granite-3.3-8B-Instruct\\n\\n**Model Summary:**\\nGranite-3.3-8B-Instruct is a 8-billion parameter 128K context length language model fine-tuned for improved reasoning and instruction-following capabilities. Built on top of Granite-3.3-8B-Base, the model delivers significant gains on benchmarks for measuring generic performance including AlpacaEval-2.0 and Arena-Hard, and improvements in mathematics, coding, and instruction following. It supprts structured reasoning through \\\\<think\\\\>\\\\<\\\\/think\\\\> and \\\\<response\\\\>\\\\<\\\\/response\\\\> tags, providing clear separation between internal thoughts and final outputs. The model has been trained on a carefully balanced combination of permissively licensed data and curated synthetic tasks.\\n\\n- **Developers:** Granite Team, IBM\\n- **Website**: [Granite Docs](https://www.ibm.com/granite/docs/)\\n- **Release Date**: April 16th, 2025\\n- **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\\n\\n**Supported Languages:** \\nEnglish, German, Spanish, French, Japanese, Portuguese, Arabic, Czech, Italian, Korean, Dutch, and Chinese. However, users may finetune this Granite model for languages beyond these 12 languages.\\n\\n**Intended Use:** \\nThis model is designed to handle general instruction-following tasks and can be integrated into AI assistants across various domains, including business applications.\\n\\n**Capabilities**\\n* Thinking\\n* Summarization\\n* Text classification\\n* Text extraction\\n* Question-answering\\n* Retrieval Augmented Generation (RAG)\\n* Code related tasks\\n* Function-calling tasks\\n* Multilingual dialog use cases\\n* Fill-in-the-middle\\n* Long-context tasks including long document/meeting summarization, long document QA, etc.\\n\\n\\n**Generation:** \\nThis is a simple example of how to use Granite-3.3-8B-Instruct model.\\n\\nInstall the following libraries:\\n\\n```shell\\npip install torch torchvision torchaudio\\npip install accelerate\\npip install transformers\\n```\\nThen, copy the snippet from the section that is relevant for your use case.\\n\\n```python\\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, set_seed\\nimport torch\\n\\nmodel_path=\\\"ibm-granite/granite-3.3-8b-instruct\\\"\\ndevice=\\\"cuda\\\"\\nmodel = AutoModelForCausalLM.from_pretrained(\\n        model_path,\\n        device_map=device,\\n        torch_dtype=torch.bfloat16,\\n    )\\ntokenizer = AutoTokenizer.from_pretrained(\\n        model_path\\n)\\n\\nconv = [{\\\"role\\\": \\\"user\\\", \\\"content\\\":\\\"Redesign a common household item to make it more sustainable and user-friendly. Explain the changes and their benefits.\\\"}]\\n\\ninput_ids = tokenizer.apply_chat_template(conv, return_tensors=\\\"pt\\\", thinking=True, return_dict=True, add_generation_prompt=True).to(device)\\n\\nset_seed(42)\\noutput = model.generate(\\n    **input_ids,\\n    max_new_tokens=8192,\\n)\\n\\nprediction = tokenizer.decode(output[0, input_ids[\\\"input_ids\\\"].shape[1]:], skip_special_tokens=True)\\nprint(prediction)\\n```\\n\\n**Example Outputs**\\n- thinking=True\\n```md\\n<think>To address this prompt, I'll focus on redesigning a common household item: the plastic water bottle. The goal is to make it more sustainable and user-friendly. Here are the proposed changes and their benefits:\\n\\n1. **Material**: Replace plastic with biodegradable or recyclable materials like glass or plant-based plastics (PLA).\\n2. **Design**: Introduce a modular design for easy refilling and cleaning.\\n3. **Functionality**: Incorporate a built-in water filter for purification.\\n4. **Durability**: Enhance the bottle's lifespan through robust construction.\\n5. **User-friendliness**: Simplify the cap mechanism for easy opening and closing.\\n\\nThese changes would significantly reduce plastic waste, promote recycling, ensure access to clean drinking water, and enhance the overall user experience.\\n\\nNow, I will detail each change and its benefits in a structured format.</think>\\n\\n<response>### Redesign of the Plastic Water Bottle for Sustainability and User-Friendliness\\n\\n#### 1. Material: Biodegradable or Recyclable Alternatives\\n\\n**Change**: Transition from traditional PET plastic to glass or plant-based plastics (Polylactic Acid - PLA).\\n\\n**Benefits**:\\n- **Sustainability**: Glass and PLA are biodegradable or highly recyclable, drastically reducing plastic pollution.\\n- **Health**: Unlike PET, these materials do not leach chemicals into the water, ensuring safer consumption.\\n\\n#### 2. Modular Design for Easy Refilling and Cleaning\\n\\n**Change**: Implement a detachable, interchangeable inner liner within a reusable outer casing.\\n\\n**Benefits**:\\n- **User-Friendliness**: Users can easily remove the inner liner for thorough cleaning, reducing the risk of bacterial growth.\\n- **Sustainability**: The outer casing remains intact, encouraging reuse, while the inner liner can be replaced or recycled when worn out.\\n\\n#### 3. Built-in Water Filter\\n\\n**Change**: Integrate a removable, replaceable water filter into the bottle cap.\\n\\n**Benefits**:\\n- **Accessibility**: Provides clean drinking water directly from any source (tap, river, etc.), eliminating the need for additional filtration devices.\\n- **Health**: Removes contaminants, bacteria, and impurities, ensuring safe hydration.\\n- **Sustainability**: Reduces the use of single-use plastic water bottles by offering a purification solution within a reusable container.\\n\\n#### 4. Enhanced Durability\\n\\n**Change**: Use high-quality, durable materials and robust construction techniques for both the outer casing and inner liner.\\n\\n**Benefits**:\\n- **Longevity**: A more robust design increases the bottle's lifespan, reducing the frequency of replacements.\\n- **Cost-effectiveness**: Although the initial cost might be higher, the long-term savings from reduced replacements make it economical.\\n- **Sustainability**: Less frequent disposal means lower environmental impact.\\n\\n#### 5. Simplified Cap Mechanism\\n\\n**Change**: Adopt a one-touch, easy-to-open and close cap design, possibly inspired by sports bottle caps.\\n\\n**Benefits**:\\n- **User-Friendliness**: Especially beneficial for children, elderly, or those with limited hand dexterity.\\n- **Convenience**: Quick access to water encourages more frequent hydration, promoting healthier habits.\\n\\n### Conclusion\\n\\nThis redesigned water bottle addresses key sustainability concerns by minimizing plastic use and waste, while also enhancing user-friendliness through improved functionality, durability, and accessibility. By integrating biodegradable materials, a modular design, a built-in filter, robust construction, and a simple cap mechanism, the bottle not only reduces environmental footprint but also offers a superior user experience.</response>\\n```\\n- thinking=False\\n```md\\nItem for Redesign: Plastic Bottle (e.g., water or beverage bottle)\\n\\nRedesign Concept: Eco-Friendly, Modular, and Multi-Functional Reusable Bottle\\n\\nChanges:\\n\\n1. Material: Replace the conventional plastic with food-grade, BPA-free, and recyclable materials such as stainless steel or glass. These materials are durable, non-toxic, and have a longer lifespan, reducing waste.\\n\\n2. Modular Design: Introduce a modular design that allows users to customize their bottle according to their needs. This could include interchangeable caps, mouthpiece sizes, and carrying loops. Users can purchase additional modules as needed, promoting a more sustainable approach by reducing the need to buy an entirely new bottle for different purposes.\\n\\n3. Integrated Filter: Incorporate a built-in, washable, and reusable filter that can remove impurities and improve the taste of water. This eliminates the need for single-use disposable filters or bottled water, further reducing plastic waste.\\n\\n4. Smart Cap: Develop a smart cap with a built-in digital display and temperature sensor. This feature allows users to track their daily water intake, set hydration goals, and monitor the temperature of their beverage. The smart cap can be synced with a mobile app for additional functionality, such as reminders and progress tracking.\\n\\n5. Easy-to-Clean Design: Ensure the bottle has a wide mouth and smooth interior surfaces for easy cleaning. Include a brush for hard-to-reach areas, making maintenance simple and encouraging regular use.\\n\\n6. Collapsible Structure: Implement a collapsible design that reduces the bottle's volume when not in use, making it more portable and convenient for storage.\\n\\nBenefits:\\n\\n1. Sustainability: By using recyclable materials and reducing plastic waste, this redesigned bottle significantly contributes to a more sustainable lifestyle. The modular design and reusable filter also minimize single-use plastic consumption.\\n\\n2. User-Friendly: The smart cap, easy-to-clean design, and collapsible structure make the bottle convenient and user-friendly. Users can customize their bottle to suit their needs, ensuring a better overall experience.\\n\\n3. Healthier Option: Using food-grade, BPA-free materials and an integrated filter ensures that the beverages consumed are free from harmful chemicals and impurities, promoting a healthier lifestyle.\\n\\n4. Cost-Effective: Although the initial investment might be higher, the long-term savings from reduced purchases of single-use plastic bottles and disposable filters make this reusable bottle a cost-effective choice.\\n\\n5. Encourages Hydration: The smart cap's features, such as hydration tracking and temperature monitoring, can motivate users to stay hydrated and develop healthier habits.\\n\\nBy redesigning a common household item like the plastic bottle, we can create a more sustainable, user-friendly, and health-conscious alternative that benefits both individuals and the environment.\\n```\\n\\n**Evaluation Results:**\\n<table>\\n<thead>\\n    <caption style=\\\"text-align:center\\\"><b>Comparison with different models over various benchmarks<sup id=\\\"fnref1\\\"><a href=\\\"#fn1\\\">1</a></sup>. Scores of AlpacaEval-2.0 and Arena-Hard are calculated with thinking=True</b></caption>\\n  <tr>\\n    <th style=\\\"text-align:left; background-color: #001d6c; color: white;\\\">Models</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">Arena-Hard</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">AlpacaEval-2.0</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">MMLU</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">PopQA</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">TruthfulQA</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">BigBenchHard<sup id=\\\"fnref2\\\"><a href=\\\"#fn2\\\">2</a></sup></th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">DROP<sup id=\\\"fnref3\\\"><a href=\\\"#fn3\\\">3</a></sup></th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">GSM8K</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">HumanEval</th>\\n   <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">HumanEval+</th>\\n  <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">IFEval</th>\\n  <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">AttaQ</th>\\n  </tr></thead>\\n  <tbody>\\n<tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Granite-3.1-2B-Instruct</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">23.3</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">27.17</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">57.11</td> \\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">20.55</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">59.79</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">61.82</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">20.99</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">67.55</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">79.45</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">75.26</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">63.59</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">84.7</td>\\n  </tr>\\n  <tr>\\n      <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Granite-3.2-2B-Instruct</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">24.86</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">34.51</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">57.18</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">20.56</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">59.8</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">61.39</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">23.84</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">67.02</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">80.13</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">73.39</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">61.55</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">83.23</td>\\n  </tr>\\n  <tr>\\n      <td style=\\\"text-align:left; background-color: #DAE8FF; color: black;\\\"><b>Granite-3.3-2B-Instruct</b></td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 28.86 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 43.45 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 55.88 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 18.4 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 58.97 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 63.91 </td>\\n      <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 44.33 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 72.48 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 80.51 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 75.68 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 65.8 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">87.47</td>\\n      </tr>\\n      \\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Llama-3.1-8B-Instruct</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">36.43</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">27.22</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">69.15</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">28.79</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">52.79</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">73.43</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">71.23</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">83.24</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">85.32</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">80.15</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">79.10</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">83.43</td>\\n  </tr>\\n           \\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">DeepSeek-R1-Distill-Llama-8B</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">17.17</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">21.85</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">45.80</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">13.25</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">47.43</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">67.39</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">49.73</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">72.18</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">67.54</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">62.91</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">66.50</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">42.87</td>\\n  </tr>\\n      \\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Qwen-2.5-7B-Instruct</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">25.44</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">30.34</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">74.30</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">18.12</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">63.06</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">69.19</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">64.06</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">84.46</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">93.35</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">89.91</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">74.90</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">81.90</td>\\n  </tr>\\n      \\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">DeepSeek-R1-Distill-Qwen-7B</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">10.36</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">15.35</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">50.72</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">9.94</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">47.14</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">67.38</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">51.78</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">78.47</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">79.89</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">78.43</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">59.10</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">42.45</td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Granite-3.1-8B-Instruct</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">37.58</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">30.34</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">66.77</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">28.7</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">65.84</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">69.87</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">58.57</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">79.15</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">89.63</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">85.79</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">73.20</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">85.73</td>\\n  </tr>\\n            \\n<tr>\\n      <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Granite-3.2-8B-Instruct</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">55.25</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">61.19</td>\\n   <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">66.79</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">28.04</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">66.92</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">71.86</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">58.29</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">81.65</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">89.35</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">85.72</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">74.31</td>\\n     <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\">84.7</td>\\n  </tr>\\n  <tr>\\n      <td style=\\\"text-align:left; background-color: #DAE8FF; color: black;\\\"><b>Granite-3.3-8B-Instruct</b></td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 57.56 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 62.68 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 65.54 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 26.17 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 66.86 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 69.13 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 59.36 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 80.89 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 89.73 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 86.09 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 74.82 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\">88.5</td>\\n      </tr>                 \\n</tbody></table>\\n\\n<table>\\n <caption style=\\\"text-align:center\\\"><b>Math Benchmarks</b></caption>\\n<thead>\\n  <tr>\\n    <th style=\\\"text-align:left; background-color: #001d6c; color: white;\\\">Models</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">AIME24</th>\\n    <th style=\\\"text-align:center; background-color: #001d6c; color: white;\\\">MATH-500</th>\\n  </tr></thead>\\n  <tbody>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Granite-3.1-2B-Instruct</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\"> 0.89 </td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\"> 35.07 </td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Granite-3.2-2B-Instruct</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\"> 0.89 </td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\"> 35.54 </td>\\n  </tr>\\n  <tr>\\n      <td style=\\\"text-align:left; background-color: #DAE8FF; color: black;\\\"><b>Granite-3.3-2B-Instruct</b></td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 3.28 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 58.09 </td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Granite-3.1-8B-Instruct</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\"> 1.97 </td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\"> 48.73 </td>\\n  </tr>\\n  <tr>\\n    <td style=\\\"text-align:left; background-color: #FFFFFF; color: #2D2D2D;\\\">Granite-3.2-8B-Instruct</td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\"> 2.43 </td>\\n    <td style=\\\"text-align:center; background-color: #FFFFFF; color: #2D2D2D;\\\"> 52.8 </td>\\n  </tr>\\n  <tr>\\n      <td style=\\\"text-align:left; background-color: #DAE8FF; color: black;\\\"><b>Granite-3.3-8B-Instruct</b></td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 8.12 </td>\\n    <td style=\\\"text-align:center; background-color: #DAE8FF; color: black;\\\"> 69.02 </td>\\n  </tr>\\n    </tbody></table>\\n    \\n**Training Data:** \\nOverall, our training data is largely comprised of two key sources: (1) publicly available datasets with permissive license, (2) internal synthetically generated data targeted to enhance reasoning capabilites. \\n<!-- A detailed attribution of datasets can be found in [Granite 3.2 Technical Report (coming soon)](#), and [Accompanying Author List](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/author-ack.pdf). -->\\n\\n**Infrastructure:**\\nWe train Granite-3.3-8B-Instruct using IBM's super computing cluster, Blue Vela, which is outfitted with NVIDIA H100 GPUs. This cluster provides a scalable and efficient infrastructure for training our models over thousands of GPUs.\\n\\n**Ethical Considerations and Limitations:** \\nGranite-3.3-8B-Instruct builds upon Granite-3.3-8B-Base, leveraging both permissively licensed open-source and select proprietary data for enhanced performance. Since it inherits its foundation from the previous model, all ethical considerations and limitations applicable to [Granite-3.3-8B-Base](https://huggingface.co/ibm-granite/granite-3.3-8b-base) remain relevant.\\n\\n\\n**Resources**\\n- ⭐\\uFE0F Learn about the latest updates with Granite: https://www.ibm.com/granite\\n- \\uD83D\\uDCC4 Get started with tutorials, best practices, and prompt engineering advice: https://www.ibm.com/granite/docs/\\n- \\uD83D\\uDCA1 Learn about the latest Granite learning resources: https://ibm.biz/granite-learning-resources\\n\\n<p><a href=\\\"#fnref1\\\" title=\\\"Jump back to reference\\\">[1]</a> Evaluated using <a href=\\\"https://github.com/allenai/olmes\\\">OLMES</a> (except AttaQ and Arena-Hard scores)</p>\\n<p><a href=\\\"#fnref2\\\" title=\\\"Jump back to reference\\\">[2]</a> Added regex for more efficient asnwer extraction.</a></p>\\n<p><a href=\\\"#fnref3\\\" title=\\\"Jump back to reference\\\">[3]</a> Modified the implementation to handle some of the issues mentioned <a href=\\\"https://huggingface.co/blog/open-llm-leaderboard-drop\\\">here</a></p>\\n<!-- ## Citation\\n<!-- ## Citation\\n```\\n@misc{granite-models,\\n  author = {author 1, author2, ...},\\n  title = {},\\n  journal = {},\\n  volume = {},\\n  year = {2024},\\n  url = {https://arxiv.org/abs/0000.00000},\\n}\\n``` -->\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\",\n      \"memory\": 4939212390,\n      \"properties\": {\n        \"jinja\": \"true\"\n      },\n      \"sha256\": \"77bcee066a76dcdd10d0d123c87e32c8ec2c74e31b6ffd87ebee49c9ac215dca\",\n      \"backend\": \"llama-cpp\"\n    },\n    {\n      \"id\": \"hf.ibm-research.granite-3.2-8b-instruct-GGUF\",\n      \"name\": \"ibm-research/granite-3.2-8b-instruct-GGUF\",\n      \"description\": \"# Granite-3.2-8B-Instruct-GGUF\\n\\n**Model Summary:**\\nGranite-3.2-8B-Instruct is an 8-billion-parameter, long-context AI model fine-tuned for thinking capabilities. Built on top of [Granite-3.1-8B-Instruct](https://huggingface.co/ibm-granite/granite-3.1-8b-instruct), it has been trained using a mix of permissively licensed open-source datasets and internally generated synthetic data designed for reasoning tasks. The model allows controllability of its thinking capability, ensuring it is applied only when required.\\n\\n- **Developers:** Granite Team, IBM\\n- **Website**: [Granite Docs](https://www.ibm.com/granite/docs/)\\n- **Release Date**: February 26th, 2025\\n- **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\\n\\n**Supported Languages:** \\nEnglish, German, Spanish, French, Japanese, Portuguese, Arabic, Czech, Italian, Korean, Dutch, and Chinese. However, users may finetune this Granite model for languages beyond these 12 languages.\\n\\n**Intended Use:** \\nThis model is designed to handle general instruction-following tasks and can be integrated into AI assistants across various domains, including business applications.\\n\\n**Capabilities**\\n* **Thinking**\\n* Summarization\\n* Text classification\\n* Text extraction\\n* Question-answering\\n* Retrieval Augmented Generation (RAG)\\n* Code related tasks\\n* Function-calling tasks\\n* Multilingual dialog use cases\\n* Long-context tasks including long document/meeting summarization, long document QA, etc.\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"https://huggingface.co/ibm-research/granite-3.2-8b-instruct-GGUF/resolve/main/granite-3.2-8b-instruct-Q4_K_M.gguf\",\n      \"memory\": 4939212390,\n      \"properties\": {\n        \"chatFormat\": \"openchat\"\n      },\n      \"sha256\": \"363f0bbc3200b9c9b0ab87efe237d77b1e05bb929d5d7e4b57c1447c911223e8\",\n      \"backend\": \"llama-cpp\"\n    },\n    {\n      \"id\": \"hf.ibm-granite.granite-8b-code-instruct\",\n      \"name\": \"ibm-granite/granite-8b-code-instruct-GGUF\",\n      \"description\": \"![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cd5057674cdb524450093d/1hzxoPwqkBJXshKVVe6_9.png)\\n\\n# ibm-granite/granite-8b-code-instruct-GGUF\\nThis is the Q4_K_M converted version of the original [`ibm-granite/granite-8b-code-instruct`](https://huggingface.co/ibm-granite/granite-8b-code-instruct).\\nRefer to the [original model card](https://huggingface.co/ibm-granite/granite-8b-code-instruct) for more details.\\n\\n## Use with llama.cpp\\n```shell\\ngit clone https://github.com/ggerganov/llama.cpp\\ncd llama.cpp\\n\\n# install\\nmake\\n\\n# run generation\\n./main -m granite-8b-code-instruct-GGUF/granite-8b-code-instruct.Q4_K_M.gguf -n 128 -p \\\"def generate_random(x: int):\\\" --color\\n```\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"https://huggingface.co/ibm-granite/granite-8b-code-instruct-GGUF/resolve/main/granite-8b-code-instruct.Q4_K_M.gguf\",\n      \"memory\": 5347234284,\n      \"properties\": {\n        \"chatFormat\": \"openchat\"\n      },\n      \"sha256\": \"bc8804cb43c4e1e82e2188658569b147587f83a89640600a64d5f7d7de2565b4\",\n      \"backend\": \"llama-cpp\"\n    },\n    {\n      \"id\": \"hf.ggerganov.whisper.cpp\",\n      \"name\": \"ggerganov/whisper.cpp\",\n      \"description\": \"# OpenAI's Whisper models converted to ggml format\\n\\n[Available models](https://huggingface.co/ggerganov/whisper.cpp/tree/main)\\n\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-small.bin\",\n      \"memory\": 487010000,\n      \"sha256\": \"1be3a9b2063867b937e64e2ec7483364a79917e157fa98c5d94b5c1fffea987b\",\n      \"backend\": \"whisper-cpp\"\n    },\n    {\n      \"id\": \"hf.facebook.detr-resnet-101\",\n      \"name\": \"facebook/detr-resnet-101\",\n      \"description\": \"# DETR (End-to-End Object Detection) model with ResNet-101 backbone\\n\\nDEtection TRansformer (DETR) model trained end-to-end on COCO 2017 object detection (118k annotated images). It was introduced in the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Carion et al. and first released in [this repository](https://github.com/facebookresearch/detr). \\n\\nDisclaimer: The team releasing DETR did not write a model card for this model so this model card has been written by the Hugging Face team.\\n\\n## Model description\\n\\nThe DETR model is an encoder-decoder transformer with a convolutional backbone. Two heads are added on top of the decoder outputs in order to perform object detection: a linear layer for the class labels and a MLP (multi-layer perceptron) for the bounding boxes. The model uses so-called object queries to detect objects in an image. Each object query looks for a particular object in the image. For COCO, the number of object queries is set to 100. \\n\\nThe model is trained using a \\\"bipartite matching loss\\\": one compares the predicted classes + bounding boxes of each of the N = 100 object queries to the ground truth annotations, padded up to the same length N (so if an image only contains 4 objects, 96 annotations will just have a \\\"no object\\\" as class and \\\"no bounding box\\\" as bounding box). The Hungarian matching algorithm is used to create an optimal one-to-one mapping between each of the N queries and each of the N annotations. Next, standard cross-entropy (for the classes) and a linear combination of the L1 and generalized IoU loss (for the bounding boxes) are used to optimize the parameters of the model.\\n\\n![model image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/detr_architecture.png)\\n\\n## Intended uses & limitations\\n\\nYou can use the raw model for object detection. See the [model hub](https://huggingface.co/models?search=facebook/detr) to look for all available DETR models.\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"https://huggingface.co/facebook/detr-resnet-101/resolve/no_timm/pytorch_model.bin\",\n      \"memory\": 242980000,\n      \"properties\": {\n        \"name\": \"facebook/detr-resnet-101\"\n      },\n      \"sha256\": \"893ae2442b36b2e8e1134ccbf8c0d9bd670648d0964509202ab30c9cbb3d2114\",\n      \"backend\": \"none\"\n    },\n    {\n      \"id\": \"hf.microsoft.Phi-4-mini-reasoning\",\n      \"name\": \"microsoft/Phi-4-mini-reasoning (Unsloth quantization)\",\n      \"description\": \"## Model Summary\\n \\nPhi-4-mini-reasoning is a lightweight open model built upon synthetic data with a focus on high-quality, reasoning dense data further finetuned for more advanced math reasoning capabilities. \\nThe model belongs to the Phi-4 model family and supports 128K token context length. \\n \\n\\uD83D\\uDCF0 [Phi-4-mini-reasoning Blog](https://aka.ms/phi4-mini-reasoning/blog), and [Developer Article](https://techcommunity.microsoft.com/blog/azuredevcommunityblog/make-phi-4-mini-reasoning-more-powerful-with-industry-reasoning-on-edge-devices/4409764)<br>\\n\\uD83D\\uDCD6 [Phi-4-mini-reasoning Technical Report](https://aka.ms/phi4-mini-reasoning/techreport) | [HF paper](https://huggingface.co/papers/2504.21233) <br>\\n\\uD83D\\uDC69\\u200D\\uD83C\\uDF73 [Phi Cookbook](https://github.com/microsoft/PhiCookBook) <br>\\n\\uD83C\\uDFE1 [Phi Portal](https://azure.microsoft.com/en-us/products/phi) <br>\\n\\uD83D\\uDDA5\\uFE0F Try It [Azure](https://aka.ms/phi4-mini-reasoning/azure) <br>\\n \\n \\n\\uD83C\\uDF89**Phi-4 models**: [[Phi-4-reasoning](https://huggingface.co/microsoft/Phi-4-reasoning)] | [[multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct) | [onnx](https://huggingface.co/microsoft/Phi-4-multimodal-instruct-onnx)]; \\n[[mini-instruct](https://huggingface.co/microsoft/Phi-4-mini-instruct) | [onnx](https://huggingface.co/microsoft/Phi-4-mini-instruct-onnx)]\\n\\n## Intended Uses\\n \\n### Primary Use Cases\\n\\nPhi-4-mini-reasoning is designed for multi-step, logic-intensive mathematical problem-solving tasks under memory/compute constrained environments and latency bound scenarios.\\nSome of the use cases include formal proof generation, symbolic computation, advanced word problems, and a wide range of mathematical reasoning scenarios. \\nThese models excel at maintaining context across steps, applying structured logic, and delivering accurate, reliable solutions in domains that require deep analytical thinking.\\n\\n### Use Case Considerations\\n \\nThis model is designed and tested for math reasoning only. It is not specifically designed or evaluated for all downstream purposes. \\nDevelopers should consider common limitations of language models, as well as performance difference across languages, as they select use cases, and evaluate and mitigate for accuracy, safety, and fairness before using within a specific downstream use case, particularly for high-risk scenarios. \\nDevelopers should be aware of and adhere to applicable laws or regulations (including but not limited to privacy, trade compliance laws, etc.) that are relevant to their use case. \\n \\n***Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under.***\\n \\n## Release Notes\\n \\nThis release of Phi-4-mini-reasoning addresses user feedback and market demand for a compact reasoning model. \\nIt is a compact transformer-based language model optimized for mathematical reasoning, built to deliver high-quality, step-by-step problem solving in environments where computing or latency is constrained.\\nThe model is fine-tuned with synthetic math data from a more capable model (much larger, smarter, more accurate, and better at following instructions), which has resulted in enhanced reasoning performance. \\nPhi-4-mini-reasoning balances reasoning ability with efficiency, making it potentially suitable for educational applications, embedded tutoring, and lightweight deployment on edge or mobile systems.\\nIf a critical issue is identified with Phi-4-mini-reasoning, it should be promptly reported through the MSRC Researcher Portal or secure@microsoft.com  \\n \\n### Model Quality\\n \\nTo understand the capabilities, the 3.8B parameters Phi-4-mini-reasoning model was compared with a set of models over a variety of reasoning benchmarks. \\nA high-level overview of the model quality is as follows:\\n\\n| Model                              | AIME  | MATH-500 | GPQA Diamond |\\n|------------------------------------|-------|----------|--------------|\\n| o1-mini*                           | 63.6  | 90.0     | 60.0         |\\n| DeepSeek-R1-Distill-Qwen-7B        | 53.3  | 91.4     | 49.5         |\\n| DeepSeek-R1-Distill-Llama-8B       | 43.3  | 86.9     | 47.3         |\\n| Bespoke-Stratos-7B*                | 20.0  | 82.0     | 37.8         |\\n| OpenThinker-7B*                    | 31.3  | 83.0     | 42.4         |\\n| Llama-3.2-3B-Instruct              | 6.7   | 44.4     | 25.3         |\\n| Phi-4-Mini (base model, 3.8B)      | 10.0  | 71.8     | 36.9         |\\n|**Phi-4-mini-reasoning (3.8B)**     | **57.5** | **94.6** | **52.0**  |\\n \\nOverall, the model with only 3.8B-param achieves a similar level of multilingual language understanding and reasoning ability as much larger models.\\nHowever, it is still fundamentally limited by its size for certain tasks. The model simply does not have the capacity to store too much factual knowledge, therefore, users may experience factual incorrectness. However, it may be possible to resolve such weakness by augmenting Phi-4 with a search engine, particularly when using the model under RAG settings.\\n \\n## Usage\\n \\n### Tokenizer\\n \\nPhi-4-mini-reasoning supports a vocabulary size of up to `200064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-4-mini-reasoning/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size.\\n \\n### Input Formats\\n \\nGiven the nature of the training data, the Phi-4-mini-instruct\\nmodel is best suited for prompts using specific formats.\\nBelow are the two primary formats:\\n \\n#### Chat format\\n \\nThis format is used for general conversation and instructions:\\n \\n```yaml\\n<|system|>Your name is Phi, an AI math expert developed by Microsoft.<|end|><|user|>How to solve 3*x^2+4*x+5=1?<|end|><|assistant|>\\n```\\n### Inference with transformers\\n\\nPhi-4-mini-reasoning has been integrated in the `4.51.3` version of `transformers`. The current `transformers` version can be verified with: `pip list | grep transformers`.\\nPython 3.8 and 3.10 will work best. \\nList of required packages:\\n\\n```\\nflash_attn==2.7.4.post1\\ntorch==2.5.1\\ntransformers==4.51.3\\naccelerate==1.3.0\\n```\\n \\nPhi-4-mini-reasoning is also available in [Azure AI Studio](https://aka.ms/phi-4-mini-reasoning/azure)\\n\\n#### Example\\n \\nAfter obtaining the Phi-4-mini-instruct model checkpoints, users can use this sample code for inference.\\n \\n```python\\nimport torch\\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\\ntorch.random.manual_seed(0)\\n\\nmodel_id = \\\"microsoft/Phi-4-mini-reasoning\\\"\\nmodel = AutoModelForCausalLM.from_pretrained(\\n    model_id,\\n    device_map=\\\"cuda\\\",\\n    torch_dtype=\\\"auto\\\",\\n    trust_remote_code=True,\\n)\\ntokenizer = AutoTokenizer.from_pretrained(model_id)\\n\\nmessages = [{\\n    \\\"role\\\": \\\"user\\\",\\n    \\\"content\\\": \\\"How to solve 3*x^2+4*x+5=1?\\\"\\n}]   \\ninputs = tokenizer.apply_chat_template(\\n    messages,\\n    add_generation_prompt=True,\\n    return_dict=True,\\n    return_tensors=\\\"pt\\\",\\n)\\n\\noutputs = model.generate(\\n    **inputs.to(model.device),\\n    max_new_tokens=32768,\\n    temperature=0.8,\\n    top_p=0.95,\\n    do_sample=True,\\n)\\noutputs = tokenizer.batch_decode(outputs[:, inputs[\\\"input_ids\\\"].shape[-1]:])\\n\\nprint(outputs[0])\\n```\\n \\n## Training\\n \\n### Model\\n \\n+ **Architecture:** Phi-4-mini-reasoning shares the same architecture as Phi-4-Mini, which has 3.8B parameters and is a dense decoder-only Transformer model. When compared with Phi-3.5-Mini, the major changes with Phi-4-Mini are 200K vocabulary, grouped-query attention, and shared input and output embedding.<br>\\n+ **Inputs:** Text. It is best suited for prompts using the chat format.<br>\\n+ **Context length:** 128K tokens<br>\\n+ **GPUs:** 128 H100-80G<br>\\n+ **Training time:** 2 days<br>\\n+ **Training data:** 150B tokens<br>\\n+ **Outputs:** Generated text<br>\\n+ **Dates:** Trained in February 2024<br>\\n+ **Status:** This is a static model trained on offline datasets with the cutoff date of February 2025 for publicly available data.<br>\\n+ **Supported languages:** English<br>\\n+ **Release date:** April 2025<br>\\n \\n### Training Datasets\\n \\nThe training data for Phi-4-mini-reasoning consists exclusively of synthetic mathematical content generated by a stronger and more advanced reasoning model, Deepseek-R1. \\nThe objective is to distill knowledge from this model. This synthetic dataset comprises over one million diverse math problems spanning multiple levels of difficulty (from middle school to Ph.D. level).\\nFor each problem in the synthetic dataset, eight distinct solutions (rollouts) were sampled, and only those verified as correct were retained, resulting in approximately 30 billion tokens of math content.\\nThe dataset  integrates three primary components: \\n1) a curated selection of high-quality, publicly available math questions and a part of the SFT(Supervised Fine-Tuning) data that was used to train the base Phi-4-Mini model;\\n2) an extensive collection of synthetic math data generated by the Deepseek-R1 model, designed specifically for high-quality supervised fine-tuning and model distillation; and\\n3) a balanced set of correct and incorrect answers used to construct preference data aimed at enhancing Phi-4-mini-reasoning's reasoning capabilities by learning more effective reasoning trajectories\\n\\n## Software\\n* [PyTorch](https://github.com/pytorch/pytorch)\\n* [Transformers](https://github.com/huggingface/transformers)\\n* [Flash-Attention](https://github.com/HazyResearch/flash-attention)\\n \\n## Hardware\\nNote that by default, the Phi-4-mini-reasoning model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types:\\n* NVIDIA A100\\n* NVIDIA H100\\n \\nIf you want to run the model on:\\n* NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation=\\\"eager\\\"\\n\\n## Safety Evaluation and Red-Teaming\\n \\nThe Phi-4 family of models has adopted a robust safety post-training approach. This approach leverages a variety of both open-source and in-house generated datasets. The overall technique employed to do the safety alignment is a combination of SFT, DPO (Direct Preference Optimization), and RLHF (Reinforcement Learning from Human Feedback) approaches  by utilizing human-labeled and synthetic English-language datasets, including publicly available datasets focusing on helpfulness and harmlessness, as well as various questions and answers targeted to multiple safety categories. \\n\\nPhi-4-Mini-Reasoning was developed in accordance with Microsoft's responsible AI principles. Potential safety risks in the model’s responses were assessed using the Azure AI Foundry’s Risk and Safety Evaluation framework, focusing on harmful content, direct jailbreak, and model groundedness. The Phi-4-Mini-Reasoning Model Card contains additional information about our approach to safety and responsible AI considerations that developers should be aware of when using this model.\\n\\n## Responsible AI Considerations\\n \\nLike other language models, the Phi family of models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include:\\n \\n+ Quality of Service: The Phi models are trained primarily on English text and some additional multilingual text. Languages other than English will experience worse performance as well as performance disparities across non-English. English language varieties with less representation in the training data might experience worse performance than standard American English.  \\n+ Multilingual performance and safety gaps: We believe it is important to make language models more widely available across different languages, but the Phi 4 models still exhibit challenges common across multilingual releases. As with any deployment of LLMs, developers will be better positioned to test for performance or safety gaps for their linguistic and cultural context and customize the model with additional fine-tuning and appropriate safeguards.\\n+ Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups, cultural contexts, or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases.\\n+ Inappropriate or Offensive Content: These models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the case.\\n+ Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated.  \\n+\\tElection Information Reliability : The model has an elevated defect rate when responding to election-critical queries, which may result in incorrect or unauthoritative election critical information being presented. We are working to improve the model's performance in this area. Users should verify information related to elections with the election authority in their region.\\n+ Limited Scope for Code: The majority of Phi 4 training data is based in Python and uses common packages such as \\\"typing, math, random, collections, datetime, itertools\\\". If the model generates Python scripts that utilize other packages or scripts in other languages, it is  strongly recommended that users manually verify all API uses.\\n+ Long Conversation: Phi 4 models, like other models, can in some cases generate responses that are repetitive, unhelpful, or inconsistent in very long chat sessions in both English and non-English languages. Developers are encouraged to place appropriate mitigations, like limiting conversation turns to account for the possible conversational drift.\\n \\nDevelopers should apply responsible AI best practices, including mapping, measuring, and mitigating risks associated with their specific use case and cultural, linguistic context. Phi 4 family of models are general purpose models. As developers plan to deploy these models for specific use cases, they are encouraged to fine-tune the models for their use case and leverage the models as part of broader AI systems with language-specific safeguards in place. Important areas for consideration include:  \\n \\n+ Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques.\\n+ High-Risk Scenarios: Developers should assess the suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context.\\n+ Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG).  \\n+ Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case.\\n+ Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations.\\n \\n## License\\nThe model is licensed under the [MIT license](./LICENSE).\\n \\n## Trademarks\\nThis project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.\\n \\n \\n## Appendix A: Benchmark Methodology\\n \\nWe include a brief word on methodology here - and in particular, how we think about optimizing prompts. In an ideal world, we would never change any prompts in our benchmarks to ensure it is always an apples-to-apples comparison when comparing different models. Indeed, this is our default approach, and is the case in the vast majority of models we have run to date. For all benchmarks, we consider using the same generation configuration such as max sequence length (32768), the same temperature for the fair comparison.\\nBenchmark datasets\\nWe evaluate the model with three of the most popular math benchmarks where the strongest reasoning models are competing together. Specifically:\\n-\\tMath-500: This benchmark consists of 500 challenging math problems designed to test the model's ability to perform complex mathematical reasoning and problem-solving.\\n-\\tAIME 2024: The American Invitational Mathematics Examination (AIME) is a highly regarded math competition that features a series of difficult problems aimed at assessing advanced mathematical skills and logical reasoning.\\n-\\tGPQA Diamond: The Graduate-Level Google-Proof Q&A (GPQA) Diamond benchmark focuses on evaluating the model's ability to understand and solve a wide range of mathematical questions, including both straightforward calculations and more intricate problem-solving tasks.\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"MIT\",\n      \"url\": \"https://huggingface.co/unsloth/Phi-4-mini-reasoning-GGUF/resolve/main/Phi-4-mini-reasoning-Q4_K_M.gguf\",\n      \"properties\": {\n        \"jinja\": \"true\"\n      },\n      \"memory\": 2480343613,\n      \"sha256\": \"81878401a2f8160473649af89560a7fc0932f3623e4f6e58143d5dcbf71d6480\",\n      \"backend\": \"llama-cpp\"\n    },\n    {\n      \"id\": \"hf.microsoft.Phi-4-reasoning-plus\",\n      \"name\": \"microsoft/Phi-4-reasoning-plus (Unsloth quantization)\",\n      \"description\": \"## Model Summary\\n\\n|                         |                                                                               |     \\n|-------------------------|-------------------------------------------------------------------------------|\\n| **Developers**          | Microsoft Research                                                            |\\n| **Description**         | Phi-4-reasoning-plus is a state-of-the-art open-weight reasoning model finetuned from Phi-4 using supervised fine-tuning on a dataset of chain-of-thought traces and reinforcement learning. The supervised fine-tuning dataset includes a blend of synthetic prompts and high-quality filtered data from public domain websites, focused on math, science, and coding skills as well as alignment data for safety and Responsible AI. The goal of this approach was to ensure that small capable models were trained with data focused on high quality and advanced reasoning. Phi-4-reasoning-plus has been trained additionally with Reinforcement Learning, hence, it has higher accuracy but generates on average 50% more tokens, thus having higher latency.                                                                  |\\n| **Architecture**        | Base model same as previously released Phi-4, 14B parameters, dense decoder-only Transformer model                                                                                                     |\\n| **Inputs**              | Text, best suited for prompts in the chat format                              |\\n| **Context length**      | 32k tokens                                                                    |\\n| **GPUs**                | 32 H100-80G                                                                   |\\n| **Training time**       | 2.5 days                                                                      |\\n| **Training data**       | 16B tokens, ~8.3B unique tokens                                               |\\n| **Outputs**             | Generated text in response to the input. Model responses have two sections, namely, a reasoning chain-of-thought block followed by a summarization block                                                                         |\\n| **Dates**               | January 2025 – April 2025                                                     |\\n| **Status**              | Static model trained on an offline dataset with cutoff dates of March 2025 and earlier for publicly available data                                                                                                      |\\n| **Release date**        | April 30, 2025                                                                |\\n| **License**             | MIT                                                                           |\\n\\n## Intended Use\\n\\n|                               |                                                                         |\\n|-------------------------------|-------------------------------------------------------------------------|\\n| **Primary Use Cases**         | Our model is designed to accelerate research on language models, for use as a building block for generative AI powered features. It provides uses for general purpose AI systems and applications (primarily in English) which require:<br><br>1. Memory/compute constrained environments.<br>2. Latency bound scenarios.<br>3. Reasoning and logic.                                                                                                    |\\n| **Out-of-Scope Use Cases**    | This model is designed and tested for math reasoning only. Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fairness before using within a specific downstream use case, particularly for high-risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case, including the model’s focus on English. Review the Responsible AI Considerations section below for further guidance when choosing a use case. Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under.                                                                                                     |\\n\\n## Usage\\n\\n> \\n > To fully take advantage of the model's capabilities, inference must use `temperature=0.8`, `top_k=50`, `top_p=0.95`, and `do_sample=True`. For more complex queries, set `max_new_tokens=32768` to allow for longer chain-of-thought (CoT).\\n *Phi-4-reasoning-plus has shown strong performance on reasoning-intensive tasks. In our experiments, we extended its maximum number of tokens to 64k, and it handled longer sequences with promising results, maintaining coherence and logical consistency over extended inputs. This makes it a compelling option to explore for tasks that require deep, multi-step reasoning or extensive context.*\\n### Input Formats\\nGiven the nature of the training data, **always use** ChatML template with the **following system prompt** for inference:\\n```bash\\n<|im_start|>system<|im_sep|>\\nYou are Phi, a language model trained by Microsoft to help users. Your role as an assistant involves thoroughly exploring questions through a systematic thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution using the specified format: <think> {Thought section} </think> {Solution section}. In the Thought section, detail your reasoning process in steps. Each step should include detailed considerations such as analysing questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The Solution section should be logical, accurate, and concise and detail necessary steps needed to reach the conclusion. Now, try to solve the following question through the above guidelines:<|im_end|>\\n<|im_start|>user<|im_sep|>\\nWhat is the derivative of x^2?<|im_end|>\\n<|im_start|>assistant<|im_sep|>\\n```\\n### With `transformers`\\n```python\\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\\ntokenizer = AutoTokenizer.from_pretrained(\\\\\\\"microsoft/Phi-4-reasoning-plus\\\\\\\")\\nmodel = AutoModelForCausalLM.from_pretrained(\\\\\\\"microsoft/Phi-4-reasoning-plus\\\\\\\", device_map=\\\\\\\"auto\\\\\\\", torch_dtype=\\\\\\\"auto\\\\\\\")\\nmessages = [\\n    {\\\\\\\"role\\\\\\\": \\\\\\\"system\\\\\\\", \\\\\\\"content\\\\\\\": \\\\\\\"You are Phi, a language model trained by Microsoft to help users. Your role as an assistant involves thoroughly exploring questions through a systematic thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution using the specified format: <think> {Thought section} </think> {Solution section}. In the Thought section, detail your reasoning process in steps. Each step should include detailed considerations such as analysing questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The Solution section should be logical, accurate, and concise and detail necessary steps needed to reach the conclusion. Now, try to solve the following question through the above guidelines:\\\\\\\"},\\n    {\\\\\\\"role\\\\\\\": \\\\\\\"user\\\\\\\", \\\\\\\"content\\\\\\\": \\\\\\\"What is the derivative of x^2?\\\\\\\"},\\n]\\ninputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors=\\\\\\\"pt\\\\\\\")\\n\\noutputs = model.generate(\\n    inputs.to(model.device),\\n    max_new_tokens=4096,\\n    temperature=0.8,\\n    top_k=50,\\n    top_p=0.95,\\n    do_sample=True,\\n)\\nprint(tokenizer.decode(outputs[0]))\\n```\\n### With `vllm`\\n\\n```bash\\nvllm serve microsoft/Phi-4-reasoning-plus --enable-reasoning --reasoning-parser deepseek_r1\\n```\\n\\n*Phi-4-reasoning-plus is also supported out-of-the-box by Ollama, llama.cpp, and any Phi-4 compatible framework.*\\n\\n## Data Overview\\n\\n### Training Datasets\\n\\nOur training data is a mixture of Q&A, chat format data in math, science, and coding. The chat prompts are sourced from filtered high-quality web data and optionally rewritten and processed through a synthetic data generation pipeline. We further include data to improve truthfulness and safety.\\n\\n### Benchmark Datasets\\n\\nWe evaluated Phi-4-reasoning-plus using the open-source [Eureka](https://github.com/microsoft/eureka-ml-insights) evaluation suite and our own internal benchmarks to understand the model's capabilities. More specifically, we evaluate our model on:\\n\\nReasoning tasks:\\n\\n* **AIME 2025, 2024, 2023, and 2022:** Math olympiad questions.\\n\\n* **GPQA-Diamond:** Complex, graduate-level science questions.\\n\\n* **OmniMath:** Collection of over 4000 olympiad-level math problems with human annotation.\\n\\n* **LiveCodeBench:** Code generation benchmark gathered from competitive coding contests.\\n\\n* **3SAT (3-literal Satisfiability Problem) and TSP (Traveling Salesman Problem):** Algorithmic problem solving.\\n\\n* **BA Calendar:** Planning.\\n\\n* **Maze and SpatialMap:** Spatial understanding.\\n\\nGeneral-purpose benchmarks:\\n\\n* **Kitab:** Information retrieval.\\n\\n* **IFEval and ArenaHard:** Instruction following.\\n\\n* **PhiBench:** Internal benchmark.\\n\\n* **FlenQA:** Impact of prompt length on model performance.\\n\\n* **HumanEvalPlus:** Functional code generation.\\n\\n* **MMLU-Pro:** Popular aggregated dataset for multitask language understanding.\\n\\n## Safety\\n\\n### Approach\\n\\nPhi-4-reasoning-plus has adopted a robust safety post-training approach via supervised fine-tuning (SFT). This approach leverages a variety of both open-source and in-house generated synthetic prompts, with LLM-generated responses that adhere to rigorous Microsoft safety guidelines, e.g., User Understanding and Clarity, Security and Ethical Guidelines, Limitations, Disclaimers and Knowledge Scope, Handling Complex and Sensitive Topics, Safety and Respectful Engagement, Confidentiality of Guidelines and Confidentiality of Chain-of-Thoughts. \\n\\n### Safety Evaluation and Red-Teaming\\n\\nPrior to release, Phi-4-reasoning-plus followed a multi-faceted evaluation approach. Quantitative evaluation was conducted with multiple open-source safety benchmarks and in-house tools utilizing adversarial conversation simulation. For qualitative safety evaluation, we collaborated with the independent AI Red Team (AIRT) at Microsoft to assess safety risks posed by Phi-4-reasoning-plus in both average and adversarial user scenarios. In the average user scenario, AIRT emulated typical single-turn and multi-turn interactions to identify potentially risky behaviors. The adversarial user scenario tested a wide range of techniques aimed at intentionally subverting the model's safety training including grounded-ness, jailbreaks, harmful content like hate and unfairness, violence, sexual content, or self-harm, and copyright violations for protected material. We further evaluate models on Toxigen, a benchmark designed to measure bias and toxicity targeted towards minority groups. \\n\\nPlease refer to the technical report for more details on safety alignment. \\n\\n## Model Quality\\n\\nAt the high-level overview of the model quality on representative benchmarks. For the tables below, higher numbers indicate better performance:\\n\\n|                             | AIME 24     | AIME 25     | OmniMath    | GPQA-D     | LiveCodeBench (8/1/24–2/1/25) |\\n|-----------------------------|-------------|-------------|-------------|------------|-------------------------------|\\n| Phi-4-reasoning             | 75.3        | 62.9        | 76.6        | 65.8       | 53.8                          |\\n| Phi-4-reasoning-plus            | 81.3        | 78.0        | 81.9        | 68.9       | 53.1                          |\\n| OpenThinker2-32B            | 58.0        | 58.0        | —           | 64.1       | —                             |\\n| QwQ 32B                     | 79.5        | 65.8        | —           | 59.5       | 63.4                          |\\n| EXAONE-Deep-32B             | 72.1        | 65.8        | —           | 66.1       | 59.5                          |\\n| DeepSeek-R1-Distill-70B     | 69.3        | 51.5        | 63.4        | 66.2       | 57.5                          |\\n| DeepSeek-R1                 | 78.7        | 70.4        | 85.0        | 73.0       | 62.8                          |\\n| o1-mini                     | 63.6        | 54.8        | —           | 60.0       | 53.8                          |\\n| o1                          | 74.6        | 75.3        | 67.5        | 76.7       | 71.0                          |\\n| o3-mini                     | 88.0        | 78.0        | 74.6        | 77.7       | 69.5                          |\\n| Claude-3.7-Sonnet           | 55.3        | 58.7        | 54.6        | 76.8       | —                             |\\n| Gemini-2.5-Pro              | 92.0        | 86.7        | 61.1        | 84.0       | 69.2                          |\\n\\n|                                        | Phi-4 | Phi-4-reasoning  | Phi-4-reasoning-plus  | o3-mini | GPT-4o |\\n|----------------------------------------|-------|------------------|-------------------|---------|--------|\\n| FlenQA [3K-token subset]               | 82.0  | 97.7             | 97.9          | 96.8    | 90.8   |\\n| IFEval Strict                          | 62.3  | 83.4             | 84.9              | 91.5    | 81.8   |\\n| ArenaHard                              | 68.1 | 73.3            | 79.0             | 81.9    | 75.6 |\\n| HumanEvalPlus                          | 83.5  | 92.9         | 92.3              | 94.0| 88.0   |\\n| MMLUPro                                | 71.5  | 74.3             | 76.0              | 79.4    | 73.0   |\\n| Kitab<br><small>No Context - Precision<br>With Context - Precision<br>No Context - Recall<br>With Context - Recall</small>                                  | <br>19.3<br>88.5<br>8.2<br>68.1       | <br>23.2<br>91.5<br>4.9<br>74.8                  | <br>27.6<br>93.6<br>6.3<br>75.4                   | <br>37.9<br>94.0<br>4.2<br>76.1        | <br>53.7<br>84.7<br>20.3<br>69.2       |\\n| Toxigen Discriminative<br><small>Toxic category<br>Neutral category</small>                | <br>72.6<br>90.0       | <br>86.7<br>84.7                 | <br>77.3<br>90.5                   | <br>85.4<br>88.7         | <br>87.6<br>85.1        |\\n| PhiBench 2.21                          | 58.2  | 70.6             | 74.2              | 78.0| 72.4   |\\n\\nOverall, Phi-4-reasoning and Phi-4-reasoning-plus, with only 14B parameters, performs well across a wide range of reasoning tasks, outperforming significantly larger open-weight models such as DeepSeek-R1 distilled 70B model and approaching the performance levels of full DeepSeek R1 model. We also test the models on multiple new reasoning benchmarks for algorithmic problem solving and planning, including 3SAT, TSP, and BA-Calendar. These new tasks are nominally out-of-domain for the models as the training process did not intentionally target these skills, but the models still show strong generalization to these tasks. Furthermore, when evaluating performance against standard general abilities benchmarks such as instruction following or non-reasoning tasks, we find that our new models improve significantly from Phi-4, despite the post-training being focused on reasoning skills in specific domains. \\n\\n## Responsible AI Considerations\\n\\nLike other language models, Phi-4-reasoning-plus can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include:   \\n\\n* **Quality of Service:** The model is trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. Phi-4-reasoning-plus is not intended to support multilingual use. \\n\\n* **Representation of Harms & Perpetuation of Stereotypes:** These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases.  \\n\\n* **Inappropriate or Offensive Content:** These models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case.  \\n\\n* **Information Reliability:** Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated.\\n\\n* **Election Information Reliability:** The model has an elevated defect rate when responding to election-critical queries, which may result in incorrect or unauthoritative election critical information being presented. We are working to improve the model's performance in this area. Users should verify information related to elections with the election authority in their region. \\n\\n* **Limited Scope for Code:** Majority of Phi-4-reasoning-plus training data is based in Python and uses common packages such as `typing`, `math`, `random`, `collections`, `datetime`, `itertools`. If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses.  \\n\\nDevelopers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Using safety services like [Azure AI Content Safety](https://azure.microsoft.com/en-us/products/ai-services/ai-content-safety) that have advanced guardrails is highly recommended. Important areas for consideration include:\\n\\n* **Allocation:** Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques. \\n\\n* **High-Risk Scenarios:** Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context.  \\n\\n* **Misinformation:** Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG).    \\n\\n* **Generation of Harmful Content:** Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case.  \\n\\n* **Misuse:** Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations.\\n\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"mit\",\n      \"url\": \"https://huggingface.co/unsloth/Phi-4-reasoning-plus-GGUF/resolve/main/Phi-4-reasoning-plus-Q4_K_M.gguf\",\n      \"memory\": 9715463520,\n      \"properties\": {\n        \"jinja\": \"true\"\n      },\n      \"sha256\": \"faf720745e20df40f52ee218be14c72b33070f7aacc508b3fbc61d47f32b4ffe\",\n      \"backend\": \"llama-cpp\"\n    },\n    {\n      \"id\": \"hf.google.gemma-3n-E4B\",\n      \"name\": \"google/gemma-3n-E4B (Unsloth quantization)\",\n      \"description\": \"# Gemma 3n model card\\n\\n**Model Page**: [Gemma 3n](https://ai.google.dev/gemma/docs/gemma-3n)\\n\\n**Resources and Technical Documentation**:\\n\\n-   [Responsible Generative AI Toolkit](https://ai.google.dev/responsible)\\n-   [Gemma on Kaggle](https://www.kaggle.com/models/google/gemma-3n)\\n-   [Gemma on HuggingFace](https://huggingface.co/collections/google/gemma-3n-685065323f5984ef315c93f4)\\n-   [Gemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/gemma3n)\\n\\n**Terms of Use**: [Terms](https://ai.google.dev/gemma/terms)\\\\\\n**Authors**: Google DeepMind\\n\\n## Model Information\\n\\nSummary description and brief definition of inputs and outputs.\\n\\n### Description\\n\\nGemma is a family of lightweight, state-of-the-art open models from Google,\\nbuilt from the same research and technology used to create the Gemini models.\\nGemma 3n models are designed for efficient execution on low-resource devices.\\nThey are capable of multimodal input, handling text, image, video, and audio\\ninput, and generating text outputs, with open weights for pre-trained and\\ninstruction-tuned variants. These models were trained with data in over 140\\nspoken languages.\\n\\nGemma 3n models use selective parameter activation technology to reduce resource\\nrequirements. This technique allows the models to operate at an effective size\\nof 2B and 4B parameters, which is lower than the total number of parameters they\\ncontain. For more information on Gemma 3n's efficient parameter management\\ntechnology, see the\\n[Gemma 3n](https://ai.google.dev/gemma/docs/gemma-3n#parameters)\\npage.\\n\\n### Inputs and outputs\\n\\n-   **Input:**\\n    -   Text string, such as a question, a prompt, or a document to be\\n        summarized\\n    -   Images, normalized to 256x256, 512x512, or 768x768 resolution\\n        and encoded to 256 tokens each\\n    -   Audio data encoded to 6.25 tokens per second from a single channel\\n    -   Total input context of 32K tokens\\n-   **Output:**\\n    -   Generated text in response to the input, such as an answer to a\\n        question, analysis of image content, or a summary of a document\\n    -   Total output length up to 32K tokens, subtracting the request\\n        input tokens\\n\\n### Usage\\n\\nBelow, there are some code snippets on how to get quickly started with running\\nthe model. First, install the Transformers library. Gemma 3n is supported\\nstarting from transformers 4.53.0.\\n\\n```sh\\n$ pip install -U transformers\\n```\\n\\nThen, copy the snippet from the section that is relevant for your use case.\\n\\n#### Running with the `pipeline` API\\n\\nYou can initialize the model and processor for inference with `pipeline` as\\nfollows.\\n\\n```python\\nfrom transformers import pipeline\\nimport torch\\n\\npipe = pipeline(\\n    \\\"image-text-to-text\\\",\\n    model=\\\"google/gemma-3n-e4b-it\\\",\\n    device=\\\"cuda\\\",\\n    torch_dtype=torch.bfloat16,\\n)\\n```\\n\\nWith instruction-tuned models, you need to use chat templates to process our\\ninputs first. Then, you can pass it to the pipeline.\\n\\n```python\\nmessages = [\\n    {\\n        \\\"role\\\": \\\"system\\\",\\n        \\\"content\\\": [{\\\"type\\\": \\\"text\\\", \\\"text\\\": \\\"You are a helpful assistant.\\\"}]\\n    },\\n    {\\n        \\\"role\\\": \\\"user\\\",\\n        \\\"content\\\": [\\n            {\\\"type\\\": \\\"image\\\", \\\"url\\\": \\\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\\\"},\\n            {\\\"type\\\": \\\"text\\\", \\\"text\\\": \\\"What animal is on the candy?\\\"}\\n        ]\\n    }\\n]\\n\\noutput = pipe(text=messages, max_new_tokens=200)\\nprint(output[0][\\\"generated_text\\\"][-1][\\\"content\\\"])\\n# Okay, let's take a look!\\n# Based on the image, the animal on the candy is a **turtle**.\\n# You can see the shell shape and the head and legs.\\n```\\n\\n#### Running the model on a single GPU\\n\\n```python\\nfrom transformers import AutoProcessor, Gemma3nForConditionalGeneration\\nfrom PIL import Image\\nimport requests\\nimport torch\\n\\nmodel_id = \\\"google/gemma-3n-e4b-it\\\"\\n\\nmodel = Gemma3nForConditionalGeneration.from_pretrained(model_id, device_map=\\\"auto\\\", torch_dtype=torch.bfloat16,).eval()\\n\\nprocessor = AutoProcessor.from_pretrained(model_id)\\n\\nmessages = [\\n    {\\n        \\\"role\\\": \\\"system\\\",\\n        \\\"content\\\": [{\\\"type\\\": \\\"text\\\", \\\"text\\\": \\\"You are a helpful assistant.\\\"}]\\n    },\\n    {\\n        \\\"role\\\": \\\"user\\\",\\n        \\\"content\\\": [\\n            {\\\"type\\\": \\\"image\\\", \\\"image\\\": \\\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg\\\"},\\n            {\\\"type\\\": \\\"text\\\", \\\"text\\\": \\\"Describe this image in detail.\\\"}\\n        ]\\n    }\\n]\\n\\ninputs = processor.apply_chat_template(\\n    messages,\\n    add_generation_prompt=True,\\n    tokenize=True,\\n    return_dict=True,\\n    return_tensors=\\\"pt\\\",\\n).to(model.device)\\n\\ninput_len = inputs[\\\"input_ids\\\"].shape[-1]\\n\\nwith torch.inference_mode():\\n    generation = model.generate(**inputs, max_new_tokens=100, do_sample=False)\\n    generation = generation[0][input_len:]\\n\\ndecoded = processor.decode(generation, skip_special_tokens=True)\\nprint(decoded)\\n\\n# **Overall Impression:** The image is a close-up shot of a vibrant garden scene,\\n# focusing on a cluster of pink cosmos flowers and a busy bumblebee.\\n# It has a slightly soft, natural feel, likely captured in daylight.\\n```\\n\\n### Citation\\n\\n```\\n@article{gemma_3n_2025,\\n    title={Gemma 3n},\\n    url={https://ai.google.dev/gemma/docs/gemma-3n},\\n    publisher={Google DeepMind},\\n    author={Gemma Team},\\n    year={2025}\\n}\\n```\\n\\n## Model Data\\n\\nData used for model training and how the data was processed.\\n\\n### Training Dataset\\n\\nThese models were trained on a dataset that includes a wide variety of sources\\ntotalling approximately 11 trillion tokens. The knowledge cutoff date for the\\ntraining data was June 2024. Here are the key components:\\n\\n-   **Web Documents**: A diverse collection of web text ensures the model\\n    is exposed to a broad range of linguistic styles, topics, and vocabulary.\\n    The training dataset includes content in over 140 languages.\\n-   **Code**: Exposing the model to code helps it to learn the syntax and\\n    patterns of programming languages, which improves its ability to generate\\n    code and understand code-related questions.\\n-   **Mathematics**: Training on mathematical text helps the model learn\\n    logical reasoning, symbolic representation, and to address mathematical queries.\\n-   **Images**: A wide range of images enables the model to perform image\\n    analysis and visual data extraction tasks.\\n-   Audio: A diverse set of sound samples enables the model to recognize\\n    speech, transcribe text from recordings, and identify information in audio data.\\n\\nThe combination of these diverse data sources is crucial for training a\\npowerful multimodal model that can handle a wide variety of different tasks and\\ndata formats.\\n\\n### Data Preprocessing\\n\\nHere are the key data cleaning and filtering methods applied to the training\\ndata:\\n\\n-   **CSAM Filtering**: Rigorous CSAM (Child Sexual Abuse Material)\\n    filtering was applied at multiple stages in the data preparation process to\\n    ensure the exclusion of harmful and illegal content.\\n-   **Sensitive Data Filtering**: As part of making Gemma pre-trained models\\n    safe and reliable, automated techniques were used to filter out certain\\n    personal information and other sensitive data from training sets.\\n-   **Additional methods**: Filtering based on content quality and safety in\\n    line with\\n    [our policies](https://ai.google/static/documents/ai-responsibility-update-published-february-2025.pdf).\\n\\n## Implementation Information\\n\\nDetails about the model internals.\\n\\n### Hardware\\n\\nGemma was trained using [Tensor Processing Unit\\n(TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv4p, TPUv5p\\nand TPUv5e). Training generative models requires significant computational\\npower. TPUs, designed specifically for matrix operations common in machine\\nlearning, offer several advantages in this domain:\\n\\n-   **Performance**: TPUs are specifically designed to handle the massive\\n    computations involved in training generative models. They can speed up\\n    training considerably compared to CPUs.\\n-   **Memory**: TPUs often come with large amounts of high-bandwidth memory,\\n    allowing for the handling of large models and batch sizes during training.\\n    This can lead to better model quality.\\n-   **Scalability**: TPU Pods (large clusters of TPUs) provide a scalable\\n    solution for handling the growing complexity of large foundation models.\\n    You can distribute training across multiple TPU devices for faster and more\\n    efficient processing.\\n-   **Cost-effectiveness**: In many scenarios, TPUs can provide a more\\n    cost-effective solution for training large models compared to CPU-based\\n    infrastructure, especially when considering the time and resources saved\\n    due to faster training.\\n\\nThese advantages are aligned with\\n[Google's commitments to operate sustainably](https://sustainability.google/operating-sustainably/).\\n\\n### Software\\n\\nTraining was done using [JAX](https://github.com/jax-ml/jax) and\\n[ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/).\\nJAX allows researchers to take advantage of the latest generation of hardware,\\nincluding TPUs, for faster and more efficient training of large models. ML\\nPathways is Google's latest effort to build artificially intelligent systems\\ncapable of generalizing across multiple tasks. This is specially suitable for\\nfoundation models, including large language models like these ones.\\n\\nTogether, JAX and ML Pathways are used as described in the\\n[paper about the Gemini family of models](https://goo.gle/gemma2report):\\n*\\\"the 'single controller' programming model of Jax and Pathways allows a single\\nPython process to orchestrate the entire training run, dramatically simplifying\\nthe development workflow.\\\"*\\n\\n## Evaluation\\n\\nModel evaluation metrics and results.\\n\\n### Benchmark Results\\n\\nThese models were evaluated at full precision (float32) against a large\\ncollection of different datasets and metrics to cover different aspects of\\ncontent generation. Evaluation results marked with **IT** are for\\ninstruction-tuned models. Evaluation results marked with **PT** are for\\npre-trained models.\\n\\n#### Reasoning and factuality\\n\\n| Benchmark                      | Metric         | n-shot   |  E2B PT  |  E4B PT  |\\n| ------------------------------ |----------------|----------|:--------:|:--------:|\\n| [HellaSwag][hellaswag]         | Accuracy       | 10-shot  |   72.2   |   78.6   |\\n| [BoolQ][boolq]                 | Accuracy       | 0-shot   |   76.4   |   81.6   |\\n| [PIQA][piqa]                   | Accuracy       | 0-shot   |   78.9   |   81.0   |\\n| [SocialIQA][socialiqa]         | Accuracy       | 0-shot   |   48.8   |   50.0   |\\n| [TriviaQA][triviaqa]           | Accuracy       | 5-shot   |   60.8   |   70.2   |\\n| [Natural Questions][naturalq]  | Accuracy       | 5-shot   |   15.5   |   20.9   |\\n| [ARC-c][arc]                   | Accuracy       | 25-shot  |   51.7   |   61.6   |\\n| [ARC-e][arc]                   | Accuracy       | 0-shot   |   75.8   |   81.6   |\\n| [WinoGrande][winogrande]       | Accuracy       | 5-shot   |   66.8   |   71.7   |\\n| [BIG-Bench Hard][bbh]          | Accuracy       | few-shot |   44.3   |   52.9   |\\n| [DROP][drop]                   | Token F1 score | 1-shot   |   53.9   |   60.8   |\\n\\n[hellaswag]: https://arxiv.org/abs/1905.07830\\n[boolq]: https://arxiv.org/abs/1905.10044\\n[piqa]: https://arxiv.org/abs/1911.11641\\n[socialiqa]: https://arxiv.org/abs/1904.09728\\n[triviaqa]: https://arxiv.org/abs/1705.03551\\n[naturalq]: https://github.com/google-research-datasets/natural-questions\\n[arc]: https://arxiv.org/abs/1911.01547\\n[winogrande]: https://arxiv.org/abs/1907.10641\\n[bbh]: https://paperswithcode.com/dataset/bbh\\n[drop]: https://arxiv.org/abs/1903.00161\\n\\n#### Multilingual\\n\\n| Benchmark                           | Metric                  | n-shot   |  E2B IT  |  E4B IT  |\\n| ------------------------------------|-------------------------|----------|:--------:|:--------:|\\n| [MGSM][mgsm]                        | Accuracy                |  0-shot  |   53.1   |   60.7   |\\n| [WMT24++][wmt24pp] (ChrF)           | Character-level F-score |  0-shot  |   42.7   |   50.1   |\\n| [Include][include]                  | Accuracy                |  0-shot  |   38.6   |   57.2   |\\n| [MMLU][mmlu] (ProX)                 | Accuracy                |  0-shot  |    8.1   |   19.9   |\\n| [OpenAI MMLU][openai-mmlu]          | Accuracy                |  0-shot  |   22.3   |   35.6   |\\n| [Global-MMLU][global-mmlu]          | Accuracy                |  0-shot  |   55.1   |   60.3   |\\n| [ECLeKTic][eclektic]                | ECLeKTic score          |  0-shot  |    2.5   |    1.9   |\\n\\n[mgsm]: https://arxiv.org/abs/2210.03057\\n[wmt24pp]: https://arxiv.org/abs/2502.12404v1\\n[include]:https://arxiv.org/abs/2411.19799\\n[mmlu]: https://arxiv.org/abs/2009.03300\\n[openai-mmlu]: https://huggingface.co/datasets/openai/MMMLU\\n[global-mmlu]: https://huggingface.co/datasets/CohereLabs/Global-MMLU\\n[eclektic]: https://arxiv.org/abs/2502.21228\\n\\n#### STEM and code\\n\\n| Benchmark                           | Metric                   | n-shot   |  E2B IT  |  E4B IT  |\\n| ------------------------------------|--------------------------|----------|:--------:|:--------:|\\n| [GPQA][gpqa] Diamond                | RelaxedAccuracy/accuracy |  0-shot  |   24.8   |   23.7   |\\n| [LiveCodeBench][lcb] v5             | pass@1                   |  0-shot  |   18.6   |   25.7   |\\n| Codegolf v2.2                       | pass@1                   |  0-shot  |   11.0   |   16.8   |\\n| [AIME 2025][aime-2025]              | Accuracy                 |  0-shot  |    6.7   |   11.6   |\\n\\n[gpqa]: https://arxiv.org/abs/2311.12022\\n[lcb]: https://arxiv.org/abs/2403.07974\\n[aime-2025]: https://www.vals.ai/benchmarks/aime-2025-05-09\\n\\n#### Additional benchmarks\\n\\n| Benchmark                            | Metric     | n-shot   |  E2B IT  |  E4B IT  |\\n| ------------------------------------ |------------|----------|:--------:|:--------:|\\n| [MMLU][mmlu]                         |  Accuracy  |  0-shot  |   60.1   |   64.9   |\\n| [MBPP][mbpp]                         |  pass@1    |  3-shot  |   56.6   |   63.6   |\\n| [HumanEval][humaneval]               |  pass@1    |  0-shot  |   66.5   |   75.0   |\\n| [LiveCodeBench][lcb]                 |  pass@1    |  0-shot  |   13.2   |   13.2   |\\n| HiddenMath                           |  Accuracy  |  0-shot  |   27.7   |   37.7   |\\n| [Global-MMLU-Lite][global-mmlu-lite] |  Accuracy  |  0-shot  |   59.0   |   64.5   |\\n| [MMLU][mmlu] (Pro)                   |  Accuracy  |  0-shot  |   40.5   |   50.6   |\\n\\n[gpqa]: https://arxiv.org/abs/2311.12022\\n[mbpp]: https://arxiv.org/abs/2108.07732\\n[humaneval]: https://arxiv.org/abs/2107.03374\\n[lcb]: https://arxiv.org/abs/2403.07974\\n[global-mmlu-lite]: https://huggingface.co/datasets/CohereForAI/Global-MMLU-Lite\\n\\n## Ethics and Safety\\n\\nEthics and safety evaluation approach and results.\\n\\n### Evaluation Approach\\n\\nOur evaluation methods include structured evaluations and internal red-teaming\\ntesting of relevant content policies. Red-teaming was conducted by a number of\\ndifferent teams, each with different goals and human evaluation metrics. These\\nmodels were evaluated against a number of different categories relevant to\\nethics and safety, including:\\n\\n-   **Child Safety**: Evaluation of text-to-text and image to text prompts\\n    covering child safety policies, including child sexual abuse and\\n    exploitation.\\n-   **Content Safety:** Evaluation of text-to-text and image to text prompts\\n    covering safety policies including, harassment, violence and gore, and hate\\n    speech.\\n-   **Representational Harms**: Evaluation of text-to-text and image to text\\n    prompts covering safety policies including bias, stereotyping, and harmful\\n    associations or inaccuracies.\\n\\nIn addition to development level evaluations, we conduct \\\"assurance\\nevaluations\\\" which are our 'arms-length' internal evaluations for responsibility\\ngovernance decision making. They are conducted separately from the model\\ndevelopment team, to inform decision making about release. High level findings\\nare fed back to the model team, but prompt sets are held-out to prevent\\noverfitting and preserve the results' ability to inform decision making. Notable\\nassurance evaluation results are reported to our Responsibility & Safety Council\\nas part of release review.\\n\\n### Evaluation Results\\n\\nFor all areas of safety testing, we saw safe levels of performance across the\\ncategories of child safety, content safety, and representational harms relative\\nto previous Gemma models. All testing was conducted without safety filters to\\nevaluate the model capabilities and behaviors. For text-to-text,  image-to-text,\\nand audio-to-text, and across all model sizes, the model produced minimal policy\\nviolations, and showed significant improvements over previous Gemma models'\\nperformance with respect to high severity violations. A limitation of our\\nevaluations was they included primarily English language prompts.\\n\\n## Usage and Limitations\\n\\nThese models have certain limitations that users should be aware of.\\n\\n### Intended Usage\\n\\nOpen generative models have a wide range of applications across various\\nindustries and domains. The following list of potential uses is not\\ncomprehensive. The purpose of this list is to provide contextual information\\nabout the possible use-cases that the model creators considered as part of model\\ntraining and development.\\n\\n-   Content Creation and Communication\\n    -   **Text Generation**: Generate creative text formats such as\\n        poems, scripts, code, marketing copy, and email drafts.\\n    -   **Chatbots and Conversational AI**: Power conversational\\n        interfaces for customer service, virtual assistants, or interactive\\n        applications.\\n    -   **Text Summarization**: Generate concise summaries of a text\\n        corpus, research papers, or reports.\\n    -   **Image Data Extraction**: Extract, interpret, and summarize\\n        visual data for text communications.\\n    -   **Audio Data Extraction**: Transcribe spoken language, translate speech\\n        to text in other languages, and analyze sound-based data.\\n-   Research and Education\\n    -   **Natural Language Processing (NLP) and generative model\\n        Research**: These models can serve as a foundation for researchers to\\n        experiment with generative models and NLP techniques, develop\\n        algorithms, and contribute to the advancement of the field.\\n    -   **Language Learning Tools**: Support interactive language\\n        learning experiences, aiding in grammar correction or providing writing\\n        practice.\\n    -   **Knowledge Exploration**: Assist researchers in exploring large\\n        bodies of data by generating summaries or answering questions about\\n        specific topics.\\n\\n### Limitations\\n\\n-   Training Data\\n    -   The quality and diversity of the training data significantly\\n        influence the model's capabilities. Biases or gaps in the training data\\n        can lead to limitations in the model's responses.\\n    -   The scope of the training dataset determines the subject areas\\n        the model can handle effectively.\\n-   Context and Task Complexity\\n    -   Models are better at tasks that can be framed with clear\\n        prompts and instructions. Open-ended or highly complex tasks might be\\n        challenging.\\n    -   A model's performance can be influenced by the amount of context\\n        provided (longer context generally leads to better outputs, up to a\\n        certain point).\\n-   Language Ambiguity and Nuance\\n    -   Natural language is inherently complex. Models might struggle\\n        to grasp subtle nuances, sarcasm, or figurative language.\\n-   Factual Accuracy\\n    -   Models generate responses based on information they learned\\n        from their training datasets, but they are not knowledge bases. They\\n        may generate incorrect or outdated factual statements.\\n-   Common Sense\\n    -   Models rely on statistical patterns in language. They might\\n        lack the ability to apply common sense reasoning in certain situations.\\n\\n### Ethical Considerations and Risks\\n\\nThe development of generative models raises several ethical concerns. In\\ncreating an open model, we have carefully considered the following:\\n\\n-   Bias and Fairness\\n    -   Generative models trained on large-scale, real-world text and image data\\n        can reflect socio-cultural biases embedded in the training material.\\n        These models underwent careful scrutiny, input data pre-processing\\n        described and posterior evaluations reported in this card.\\n-   Misinformation and Misuse\\n    -   Generative models can be misused to generate text that is\\n        false, misleading, or harmful.\\n    -   Guidelines are provided for responsible use with the model, see the\\n        [Responsible Generative AI Toolkit](https://ai.google.dev/responsible).\\n-   Transparency and Accountability:\\n    -   This model card summarizes details on the models' architecture,\\n        capabilities, limitations, and evaluation processes.\\n    -   A responsibly developed open model offers the opportunity to\\n        share innovation by making generative model technology accessible to\\n        developers and researchers across the AI ecosystem.\\n\\nRisks identified and mitigations:\\n\\n-   **Perpetuation of biases**: It's encouraged to perform continuous monitoring\\n    (using evaluation metrics, human review) and the exploration of de-biasing\\n    techniques during model training, fine-tuning, and other use cases.\\n-   **Generation of harmful content**: Mechanisms and guidelines for content\\n    safety are essential. Developers are encouraged to exercise caution and\\n    implement appropriate content safety safeguards based on their specific\\n    product policies and application use cases.\\n-   **Misuse for malicious purposes**: Technical limitations and developer\\n    and end-user education can help mitigate against malicious applications of\\n    generative models. Educational resources and reporting mechanisms for users\\n    to flag misuse are provided. Prohibited uses of Gemma models are outlined\\n    in the\\n    [Gemma Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy).\\n-   **Privacy violations**: Models were trained on data filtered for removal of\\n    certain personal information and other sensitive data. Developers are\\n    encouraged to adhere to privacy regulations with privacy-preserving\\n    techniques.\\n\\n### Benefits\\n\\nAt the time of release, this family of models provides high-performance open\\ngenerative model implementations designed from the ground up for responsible AI\\ndevelopment compared to similarly sized models.\\n\\nUsing the benchmark evaluation metrics described in this document, these models\\nhave shown to provide superior performance to other, comparably-sized open model\\nalternatives.\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"gemma\",\n      \"url\": \"https://huggingface.co/unsloth/gemma-3n-E4B-it-GGUF/resolve/main/gemma-3n-E4B-it-Q4_K_M.gguf\",\n      \"memory\": 4425974,\n      \"properties\": {\n        \"jinja\": \"true\"\n      },\n      \"sha256\": \"43b489bb77a81bda85180e7c490d40ad7f1d5c2ce654c9b05e15e104bd3c777e\",\n      \"backend\": \"llama-cpp\"\n    },\n    {\n      \"id\": \"OpenVINO/mistral-7B-instruct-v0.2-int4-ov\",\n      \"name\": \"OpenVINO/mistral-7B-instruct-v0.2-int4-ov\",\n      \"description\": \"# Mistral-7B-Instruct-v0.2-int4-ov\\n* Model creator: [Mistral AI](https://huggingface.co/mistralai)\\n * Original model: [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)\\n\\n## Description\\n\\nThis is [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) model converted to the [OpenVINO™ IR](https://docs.openvino.ai/2024/documentation/openvino-ir-format.html) (Intermediate Representation) format.\\n\\n## Compatibility\\n\\nThe provided OpenVINO™ IR model is compatible with:\\n\\n* OpenVINO version 2024.2.0 and higher\\n* Optimum Intel 1.19.0 and higher\\n\\n## Running Model Inference with [Optimum Intel](https://huggingface.co/docs/optimum/intel/index)\\n\\n\\n1. Install packages required for using [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) integration with the OpenVINO backend:\\n\\n```\\npip install optimum[openvino]\\n```\\n\\n2. Run model inference:\\n\\n```\\nfrom transformers import AutoTokenizer\\nfrom optimum.intel.openvino import OVModelForCausalLM\\n\\nmodel_id = \\\"OpenVINO/<model_name>\\\"\\ntokenizer = AutoTokenizer.from_pretrained(model_id)\\nmodel = OVModelForCausalLM.from_pretrained(model_id)\\n\\ninputs = tokenizer(\\\"What is OpenVINO?\\\", return_tensors=\\\"pt\\\")\\n\\noutputs = model.generate(**inputs, max_length=200)\\ntext = tokenizer.batch_decode(outputs)[0]\\nprint(text)\\n```\\n\\nFor more examples and possible optimizations, refer to the [OpenVINO Large Language Model Inference Guide](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html).\\n\\n## Running Model Inference with [OpenVINO GenAI](https://github.com/openvinotoolkit/openvino.genai)\\n\\n1. Install packages required for using OpenVINO GenAI.\\n```\\npip install openvino-genai huggingface_hub\\n```\\n\\n2. Download model from HuggingFace Hub\\n   \\n```\\nimport huggingface_hub as hf_hub\\n\\nmodel_id = \\\"OpenVINO/Mistral-7B-Instruct-v0.2-int4-ov\\\"\\nmodel_path = \\\"Mistral-7B-Instruct-v0.2-int4-ov\\\"\\n\\nhf_hub.snapshot_download(model_id, local_dir=model_path)\\n\\n```\\n\\n3. Run model inference:\\n\\n```\\nimport openvino_genai as ov_genai\\n\\ndevice = \\\"CPU\\\"\\npipe = ov_genai.LLMPipeline(model_path, device)\\nprint(pipe.generate(\\\"What is OpenVINO?\\\", max_length=200))\\n```\\n\\nMore GenAI usage examples can be found in OpenVINO GenAI library [docs](https://github.com/openvinotoolkit/openvino.genai/blob/master/src/README.md) and [samples](https://github.com/openvinotoolkit/openvino.genai?tab=readme-ov-file#openvino-genai-samples)\\n\\n## Limitations\\n\\nCheck the original model card for [limitations](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2#limitations).\\n\\n## Legal information\\n\\nThe original model is distributed under [apache-2.0](https://choosealicense.com/licenses/apache-2.0/) license. More details can be found in [original model card](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2).\\n\\n## Disclaimer\\n\\nIntel is committed to respecting human rights and avoiding causing or contributing to adverse impacts on human rights. See [Intel’s Global Human Rights Principles](https://www.intel.com/content/dam/www/central-libraries/us/en/documents/policy-human-rights.pdf). Intel’s products and software are intended only to be used in applications that do not cause or contribute to adverse impacts on human rights.\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"Apache-2.0\",\n      \"url\": \"huggingface:/OpenVINO/mistral-7B-instruct-v0.2-int4-ov\",\n      \"backend\": \"openvino\"\n    }\n  ],\n  \"categories\": [\n    {\n      \"id\": \"natural-language-processing\",\n      \"name\": \"Natural Language Processing\",\n      \"description\": \"Models that work with text: classify, summarize, translate, or generate text.\"\n    },\n    {\n      \"id\": \"computer-vision\",\n      \"description\": \"Process images, from classification to object detection and segmentation.\",\n      \"name\": \"Computer Vision\"\n    },\n    {\n      \"id\": \"audio\",\n      \"description\": \"Recognize speech or classify audio with audio models.\",\n      \"name\": \"Audio\"\n    },\n    {\n      \"id\": \"multimodal\",\n      \"description\": \"Stuff about multimodal models goes here omg yes amazing.\",\n      \"name\": \"Multimodal\"\n    }\n  ]\n}\n"
  },
  {
    "path": "packages/backend/src/assets/inference-images.json",
    "content": "{\n  \"whispercpp\": {\n    \"default\": \"quay.io/ramalama/ramalama-whisper-server@sha256:2ce4e2751672e3baf76d6f220100160da86ff5a98001b76392aeae9da2d90b18\"\n  },\n  \"llamacpp\": {\n    \"default\": \"quay.io/ramalama/ramalama-llama-server@sha256:293f66f2dfea8e21393dc03e898616b2a71f0a72a0f3bc5f936439130ada2648\",\n    \"cuda\": \"quay.io/ramalama/cuda-llama-server@sha256:b9ced640539c72edee2f946b69618a6d30b68700ac9342d1b9483831988d40ef\",\n    \"intel\": \"quay.io/ramalama/intel-gpu-llama-server@sha256:ea2aa37c0a4af544de80da9d8aa53a0641c91ccfdca3a329a251685a96210551\"\n  },\n  \"openvino\": {\n    \"default\": \"quay.io/ramalama/openvino@sha256:e026ecbdf6ae222a193badad5b0dd2253362e366e22c8b402f5a492803b10fd5\"\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/assets/instructlab-images.json",
    "content": "{\n  \"default\": \"docker.io/redhat/instructlab@sha256:c6b2ecb4547b1f43b5539ee99bdbf5c9ae40599fabe1c740622295d9721b91c4\"\n}\n"
  },
  {
    "path": "packages/backend/src/assets/llama-stack-images.json",
    "content": "{\n  \"default\": \"ghcr.io/containers/podman-ai-lab-stack:a06f399ebf7cb2645af126da0e84395db9bb0d1a\"\n}\n"
  },
  {
    "path": "packages/backend/src/assets/llama-stack-playground-images.json",
    "content": "{\n  \"default\": \"quay.io/podman-ai-lab/llama-stack-playground@sha256:2ee73137c0b2b401c2703b5881dd84c07f0baa385408e7c02f076a2804c689c2\"\n}\n"
  },
  {
    "path": "packages/backend/src/assets/openai.json",
    "content": "{\n  \"openapi\": \"3.1.0\",\n  \"info\": {\n    \"title\": \"OpenAI API\",\n    \"version\": \"0.3.2\"\n  },\n  \"servers\": [\n    {\n      \"url\": \"\",\n      \"description\": \"description\"\n    }\n  ],\n  \"paths\": {\n    \"/v1/completions\": {\n      \"post\": {\n        \"tags\": [\"OpenAI V1\"],\n        \"summary\": \"Completion\",\n        \"operationId\": \"create_completion_v1_completions_post\",\n        \"requestBody\": {\n          \"content\": {\n            \"application/json\": {\n              \"schema\": {\n                \"$ref\": \"#/components/schemas/CreateCompletionRequest\"\n              }\n            }\n          },\n          \"required\": true\n        },\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Successful Response\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"anyOf\": [\n                    {\n                      \"$ref\": \"#/components/schemas/CreateCompletionResponse\"\n                    },\n                    {\n                      \"type\": \"string\"\n                    },\n                    {\n                      \"$ref\": \"#/components/schemas/CreateCompletionResponse\"\n                    }\n                  ],\n                  \"title\": \"Completion response, when stream=False\"\n                }\n              },\n              \"text/event-stream\": {\n                \"schema\": {\n                  \"type\": \"string\",\n                  \"title\": \"Server Side Streaming response, when stream=True. See SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format\",\n                  \"example\": \"data: {... see CreateCompletionResponse ...} \\\\n\\\\n data: ... \\\\n\\\\n ... data: [DONE]\"\n                }\n              }\n            }\n          },\n          \"422\": {\n            \"description\": \"Validation Error\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"$ref\": \"#/components/schemas/HTTPValidationError\"\n                }\n              }\n            }\n          }\n        },\n        \"security\": [\n          {\n            \"HTTPBearer\": []\n          }\n        ]\n      }\n    },\n    \"/v1/embeddings\": {\n      \"post\": {\n        \"tags\": [\"OpenAI V1\"],\n        \"summary\": \"Embedding\",\n        \"operationId\": \"create_embedding_v1_embeddings_post\",\n        \"requestBody\": {\n          \"content\": {\n            \"application/json\": {\n              \"schema\": {\n                \"$ref\": \"#/components/schemas/CreateEmbeddingRequest\"\n              }\n            }\n          },\n          \"required\": true\n        },\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Successful Response\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {}\n              }\n            }\n          },\n          \"422\": {\n            \"description\": \"Validation Error\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"$ref\": \"#/components/schemas/HTTPValidationError\"\n                }\n              }\n            }\n          }\n        },\n        \"security\": [\n          {\n            \"HTTPBearer\": []\n          }\n        ]\n      }\n    },\n    \"/v1/chat/completions\": {\n      \"post\": {\n        \"tags\": [\"OpenAI V1\"],\n        \"summary\": \"Chat\",\n        \"operationId\": \"create_chat_completion_v1_chat_completions_post\",\n        \"requestBody\": {\n          \"content\": {\n            \"application/json\": {\n              \"schema\": {\n                \"$ref\": \"#/components/schemas/CreateChatCompletionRequest\"\n              },\n              \"examples\": {\n                \"normal\": {\n                  \"summary\": \"Chat Completion\",\n                  \"value\": {\n                    \"model\": \"gpt-3.5-turbo\",\n                    \"messages\": [\n                      {\n                        \"role\": \"system\",\n                        \"content\": \"You are a helpful assistant.\"\n                      },\n                      {\n                        \"role\": \"user\",\n                        \"content\": \"What is the capital of France?\"\n                      }\n                    ]\n                  }\n                },\n                \"json_mode\": {\n                  \"summary\": \"JSON Mode\",\n                  \"value\": {\n                    \"model\": \"gpt-3.5-turbo\",\n                    \"messages\": [\n                      {\n                        \"role\": \"system\",\n                        \"content\": \"You are a helpful assistant.\"\n                      },\n                      {\n                        \"role\": \"user\",\n                        \"content\": \"Who won the world series in 2020\"\n                      }\n                    ],\n                    \"response_format\": {\n                      \"type\": \"json_object\"\n                    }\n                  }\n                },\n                \"tool_calling\": {\n                  \"summary\": \"Tool Calling\",\n                  \"value\": {\n                    \"model\": \"gpt-3.5-turbo\",\n                    \"messages\": [\n                      {\n                        \"role\": \"system\",\n                        \"content\": \"You are a helpful assistant.\"\n                      },\n                      {\n                        \"role\": \"user\",\n                        \"content\": \"Extract Jason is 30 years old.\"\n                      }\n                    ],\n                    \"tools\": [\n                      {\n                        \"type\": \"function\",\n                        \"function\": {\n                          \"name\": \"User\",\n                          \"description\": \"User record\",\n                          \"parameters\": {\n                            \"type\": \"object\",\n                            \"properties\": {\n                              \"name\": {\n                                \"type\": \"string\"\n                              },\n                              \"age\": {\n                                \"type\": \"number\"\n                              }\n                            },\n                            \"required\": [\"name\", \"age\"]\n                          }\n                        }\n                      }\n                    ],\n                    \"tool_choice\": {\n                      \"type\": \"function\",\n                      \"function\": {\n                        \"name\": \"User\"\n                      }\n                    }\n                  }\n                },\n                \"logprobs\": {\n                  \"summary\": \"Logprobs\",\n                  \"value\": {\n                    \"model\": \"gpt-3.5-turbo\",\n                    \"messages\": [\n                      {\n                        \"role\": \"system\",\n                        \"content\": \"You are a helpful assistant.\"\n                      },\n                      {\n                        \"role\": \"user\",\n                        \"content\": \"What is the capital of France?\"\n                      }\n                    ],\n                    \"logprobs\": true,\n                    \"top_logprobs\": 10\n                  }\n                }\n              }\n            }\n          },\n          \"required\": true\n        },\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Successful Response\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"anyOf\": [\n                    {\n                      \"$ref\": \"#/components/schemas/CreateChatCompletionResponse\"\n                    },\n                    {\n                      \"type\": \"string\"\n                    },\n                    {\n                      \"$ref\": \"#/components/schemas/CreateChatCompletionResponse\"\n                    }\n                  ],\n                  \"title\": \"Completion response, when stream=False\"\n                }\n              },\n              \"text/event-stream\": {\n                \"schema\": {\n                  \"type\": \"string\",\n                  \"title\": \"Server Side Streaming response, when stream=TrueSee SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format\",\n                  \"example\": \"data: {... see CreateChatCompletionResponse ...} \\\\n\\\\n data: ... \\\\n\\\\n ... data: [DONE]\"\n                }\n              }\n            }\n          },\n          \"422\": {\n            \"description\": \"Validation Error\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"$ref\": \"#/components/schemas/HTTPValidationError\"\n                }\n              }\n            }\n          }\n        },\n        \"security\": [\n          {\n            \"HTTPBearer\": []\n          }\n        ]\n      }\n    },\n    \"/v1/models\": {\n      \"get\": {\n        \"tags\": [\"OpenAI V1\"],\n        \"summary\": \"Models\",\n        \"operationId\": \"get_models_v1_models_get\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Successful Response\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"$ref\": \"#/components/schemas/ModelList\"\n                }\n              }\n            }\n          }\n        },\n        \"security\": [\n          {\n            \"HTTPBearer\": []\n          }\n        ]\n      }\n    },\n    \"/extras/tokenize\": {\n      \"post\": {\n        \"tags\": [\"Extras\"],\n        \"summary\": \"Tokenize\",\n        \"operationId\": \"tokenize_extras_tokenize_post\",\n        \"requestBody\": {\n          \"content\": {\n            \"application/json\": {\n              \"schema\": {\n                \"$ref\": \"#/components/schemas/TokenizeInputRequest\"\n              }\n            }\n          },\n          \"required\": true\n        },\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Successful Response\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"$ref\": \"#/components/schemas/TokenizeInputResponse\"\n                }\n              }\n            }\n          },\n          \"422\": {\n            \"description\": \"Validation Error\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"$ref\": \"#/components/schemas/HTTPValidationError\"\n                }\n              }\n            }\n          }\n        },\n        \"security\": [\n          {\n            \"HTTPBearer\": []\n          }\n        ]\n      }\n    },\n    \"/extras/tokenize/count\": {\n      \"post\": {\n        \"tags\": [\"Extras\"],\n        \"summary\": \"Tokenize Count\",\n        \"operationId\": \"count_query_tokens_extras_tokenize_count_post\",\n        \"requestBody\": {\n          \"content\": {\n            \"application/json\": {\n              \"schema\": {\n                \"$ref\": \"#/components/schemas/TokenizeInputRequest\"\n              }\n            }\n          },\n          \"required\": true\n        },\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Successful Response\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"$ref\": \"#/components/schemas/TokenizeInputCountResponse\"\n                }\n              }\n            }\n          },\n          \"422\": {\n            \"description\": \"Validation Error\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"$ref\": \"#/components/schemas/HTTPValidationError\"\n                }\n              }\n            }\n          }\n        },\n        \"security\": [\n          {\n            \"HTTPBearer\": []\n          }\n        ]\n      }\n    },\n    \"/extras/detokenize\": {\n      \"post\": {\n        \"tags\": [\"Extras\"],\n        \"summary\": \"Detokenize\",\n        \"operationId\": \"detokenize_extras_detokenize_post\",\n        \"requestBody\": {\n          \"content\": {\n            \"application/json\": {\n              \"schema\": {\n                \"$ref\": \"#/components/schemas/DetokenizeInputRequest\"\n              }\n            }\n          },\n          \"required\": true\n        },\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Successful Response\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"$ref\": \"#/components/schemas/DetokenizeInputResponse\"\n                }\n              }\n            }\n          },\n          \"422\": {\n            \"description\": \"Validation Error\",\n            \"content\": {\n              \"application/json\": {\n                \"schema\": {\n                  \"$ref\": \"#/components/schemas/HTTPValidationError\"\n                }\n              }\n            }\n          }\n        },\n        \"security\": [\n          {\n            \"HTTPBearer\": []\n          }\n        ]\n      }\n    }\n  },\n  \"components\": {\n    \"schemas\": {\n      \"ChatCompletionFunction\": {\n        \"properties\": {\n          \"name\": {\n            \"type\": \"string\",\n            \"title\": \"Name\"\n          },\n          \"description\": {\n            \"type\": \"string\",\n            \"title\": \"Description\"\n          },\n          \"parameters\": {\n            \"additionalProperties\": {\n              \"anyOf\": [\n                {\n                  \"type\": \"integer\"\n                },\n                {\n                  \"type\": \"string\"\n                },\n                {\n                  \"type\": \"boolean\"\n                },\n                {\n                  \"items\": {},\n                  \"type\": \"array\"\n                },\n                {\n                  \"type\": \"object\"\n                },\n                {\n                  \"type\": \"null\"\n                }\n              ]\n            },\n            \"type\": \"object\",\n            \"title\": \"Parameters\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"name\", \"parameters\"],\n        \"title\": \"ChatCompletionFunction\"\n      },\n      \"ChatCompletionMessageToolCall\": {\n        \"properties\": {\n          \"id\": {\n            \"type\": \"string\",\n            \"title\": \"Id\"\n          },\n          \"type\": {\n            \"type\": \"string\",\n            \"const\": \"function\",\n            \"title\": \"Type\"\n          },\n          \"function\": {\n            \"$ref\": \"#/components/schemas/ChatCompletionMessageToolCallFunction\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"id\", \"type\", \"function\"],\n        \"title\": \"ChatCompletionMessageToolCall\"\n      },\n      \"ChatCompletionMessageToolCallFunction\": {\n        \"properties\": {\n          \"name\": {\n            \"type\": \"string\",\n            \"title\": \"Name\"\n          },\n          \"arguments\": {\n            \"type\": \"string\",\n            \"title\": \"Arguments\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"name\", \"arguments\"],\n        \"title\": \"ChatCompletionMessageToolCallFunction\"\n      },\n      \"ChatCompletionNamedToolChoice\": {\n        \"properties\": {\n          \"type\": {\n            \"type\": \"string\",\n            \"const\": \"function\",\n            \"title\": \"Type\"\n          },\n          \"function\": {\n            \"$ref\": \"#/components/schemas/ChatCompletionNamedToolChoiceFunction\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"type\", \"function\"],\n        \"title\": \"ChatCompletionNamedToolChoice\"\n      },\n      \"ChatCompletionNamedToolChoiceFunction\": {\n        \"properties\": {\n          \"name\": {\n            \"type\": \"string\",\n            \"title\": \"Name\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"name\"],\n        \"title\": \"ChatCompletionNamedToolChoiceFunction\"\n      },\n      \"ChatCompletionRequestAssistantMessage\": {\n        \"properties\": {\n          \"role\": {\n            \"type\": \"string\",\n            \"const\": \"assistant\",\n            \"title\": \"Role\"\n          },\n          \"content\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Content\"\n          },\n          \"tool_calls\": {\n            \"items\": {\n              \"$ref\": \"#/components/schemas/ChatCompletionMessageToolCall\"\n            },\n            \"type\": \"array\",\n            \"title\": \"Tool Calls\"\n          },\n          \"function_call\": {\n            \"$ref\": \"#/components/schemas/ChatCompletionRequestAssistantMessageFunctionCall\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"role\", \"content\"],\n        \"title\": \"ChatCompletionRequestAssistantMessage\"\n      },\n      \"ChatCompletionRequestAssistantMessageFunctionCall\": {\n        \"properties\": {\n          \"name\": {\n            \"type\": \"string\",\n            \"title\": \"Name\"\n          },\n          \"arguments\": {\n            \"type\": \"string\",\n            \"title\": \"Arguments\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"name\", \"arguments\"],\n        \"title\": \"ChatCompletionRequestAssistantMessageFunctionCall\"\n      },\n      \"ChatCompletionRequestFunctionCallOption\": {\n        \"properties\": {\n          \"name\": {\n            \"type\": \"string\",\n            \"title\": \"Name\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"name\"],\n        \"title\": \"ChatCompletionRequestFunctionCallOption\"\n      },\n      \"ChatCompletionRequestFunctionMessage\": {\n        \"properties\": {\n          \"role\": {\n            \"type\": \"string\",\n            \"const\": \"function\",\n            \"title\": \"Role\"\n          },\n          \"content\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Content\"\n          },\n          \"name\": {\n            \"type\": \"string\",\n            \"title\": \"Name\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"role\", \"content\", \"name\"],\n        \"title\": \"ChatCompletionRequestFunctionMessage\"\n      },\n      \"ChatCompletionRequestMessageContentPartImage\": {\n        \"properties\": {\n          \"type\": {\n            \"type\": \"string\",\n            \"const\": \"image_url\",\n            \"title\": \"Type\"\n          },\n          \"image_url\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"$ref\": \"#/components/schemas/ChatCompletionRequestMessageContentPartImageImageUrl\"\n              }\n            ],\n            \"title\": \"Image Url\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"type\", \"image_url\"],\n        \"title\": \"ChatCompletionRequestMessageContentPartImage\"\n      },\n      \"ChatCompletionRequestMessageContentPartImageImageUrl\": {\n        \"properties\": {\n          \"url\": {\n            \"type\": \"string\",\n            \"title\": \"Url\"\n          },\n          \"detail\": {\n            \"type\": \"string\",\n            \"enum\": [\"auto\", \"low\", \"high\"],\n            \"title\": \"Detail\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"url\"],\n        \"title\": \"ChatCompletionRequestMessageContentPartImageImageUrl\"\n      },\n      \"ChatCompletionRequestMessageContentPartText\": {\n        \"properties\": {\n          \"type\": {\n            \"type\": \"string\",\n            \"const\": \"text\",\n            \"title\": \"Type\"\n          },\n          \"text\": {\n            \"type\": \"string\",\n            \"title\": \"Text\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"type\", \"text\"],\n        \"title\": \"ChatCompletionRequestMessageContentPartText\"\n      },\n      \"ChatCompletionRequestResponseFormat\": {\n        \"properties\": {\n          \"type\": {\n            \"type\": \"string\",\n            \"enum\": [\"text\", \"json_object\"],\n            \"title\": \"Type\"\n          },\n          \"schema\": {\n            \"anyOf\": [\n              {\n                \"type\": \"integer\"\n              },\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"boolean\"\n              },\n              {\n                \"items\": {},\n                \"type\": \"array\"\n              },\n              {\n                \"type\": \"object\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Schema\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"type\"],\n        \"title\": \"ChatCompletionRequestResponseFormat\"\n      },\n      \"ChatCompletionRequestSystemMessage\": {\n        \"properties\": {\n          \"role\": {\n            \"type\": \"string\",\n            \"const\": \"system\",\n            \"title\": \"Role\"\n          },\n          \"content\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Content\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"role\", \"content\"],\n        \"title\": \"ChatCompletionRequestSystemMessage\"\n      },\n      \"ChatCompletionRequestToolMessage\": {\n        \"properties\": {\n          \"role\": {\n            \"type\": \"string\",\n            \"const\": \"tool\",\n            \"title\": \"Role\"\n          },\n          \"content\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Content\"\n          },\n          \"tool_call_id\": {\n            \"type\": \"string\",\n            \"title\": \"Tool Call Id\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"role\", \"content\", \"tool_call_id\"],\n        \"title\": \"ChatCompletionRequestToolMessage\"\n      },\n      \"ChatCompletionRequestUserMessage\": {\n        \"properties\": {\n          \"role\": {\n            \"type\": \"string\",\n            \"const\": \"user\",\n            \"title\": \"Role\"\n          },\n          \"content\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"items\": {\n                  \"anyOf\": [\n                    {\n                      \"$ref\": \"#/components/schemas/ChatCompletionRequestMessageContentPartText\"\n                    },\n                    {\n                      \"$ref\": \"#/components/schemas/ChatCompletionRequestMessageContentPartImage\"\n                    }\n                  ]\n                },\n                \"type\": \"array\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Content\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"role\", \"content\"],\n        \"title\": \"ChatCompletionRequestUserMessage\"\n      },\n      \"ChatCompletionResponseChoice\": {\n        \"properties\": {\n          \"index\": {\n            \"type\": \"integer\",\n            \"title\": \"Index\"\n          },\n          \"message\": {\n            \"$ref\": \"#/components/schemas/ChatCompletionResponseMessage\"\n          },\n          \"logprobs\": {\n            \"anyOf\": [\n              {\n                \"$ref\": \"#/components/schemas/CompletionLogprobs\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ]\n          },\n          \"finish_reason\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Finish Reason\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"index\", \"message\", \"logprobs\", \"finish_reason\"],\n        \"title\": \"ChatCompletionResponseChoice\"\n      },\n      \"ChatCompletionResponseFunctionCall\": {\n        \"properties\": {\n          \"name\": {\n            \"type\": \"string\",\n            \"title\": \"Name\"\n          },\n          \"arguments\": {\n            \"type\": \"string\",\n            \"title\": \"Arguments\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"name\", \"arguments\"],\n        \"title\": \"ChatCompletionResponseFunctionCall\"\n      },\n      \"ChatCompletionResponseMessage\": {\n        \"properties\": {\n          \"content\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Content\"\n          },\n          \"tool_calls\": {\n            \"items\": {\n              \"$ref\": \"#/components/schemas/ChatCompletionMessageToolCall\"\n            },\n            \"type\": \"array\",\n            \"title\": \"Tool Calls\"\n          },\n          \"role\": {\n            \"type\": \"string\",\n            \"enum\": [\"assistant\", \"function\"],\n            \"title\": \"Role\"\n          },\n          \"function_call\": {\n            \"$ref\": \"#/components/schemas/ChatCompletionResponseFunctionCall\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"content\", \"role\"],\n        \"title\": \"ChatCompletionResponseMessage\"\n      },\n      \"ChatCompletionTool\": {\n        \"properties\": {\n          \"type\": {\n            \"type\": \"string\",\n            \"const\": \"function\",\n            \"title\": \"Type\"\n          },\n          \"function\": {\n            \"$ref\": \"#/components/schemas/ChatCompletionToolFunction\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"type\", \"function\"],\n        \"title\": \"ChatCompletionTool\"\n      },\n      \"ChatCompletionToolFunction\": {\n        \"properties\": {\n          \"name\": {\n            \"type\": \"string\",\n            \"title\": \"Name\"\n          },\n          \"description\": {\n            \"type\": \"string\",\n            \"title\": \"Description\"\n          },\n          \"parameters\": {\n            \"additionalProperties\": {\n              \"anyOf\": [\n                {\n                  \"type\": \"integer\"\n                },\n                {\n                  \"type\": \"string\"\n                },\n                {\n                  \"type\": \"boolean\"\n                },\n                {\n                  \"items\": {},\n                  \"type\": \"array\"\n                },\n                {\n                  \"type\": \"object\"\n                },\n                {\n                  \"type\": \"null\"\n                }\n              ]\n            },\n            \"type\": \"object\",\n            \"title\": \"Parameters\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"name\", \"parameters\"],\n        \"title\": \"ChatCompletionToolFunction\"\n      },\n      \"CompletionChoice\": {\n        \"properties\": {\n          \"text\": {\n            \"type\": \"string\",\n            \"title\": \"Text\"\n          },\n          \"index\": {\n            \"type\": \"integer\",\n            \"title\": \"Index\"\n          },\n          \"logprobs\": {\n            \"anyOf\": [\n              {\n                \"$ref\": \"#/components/schemas/CompletionLogprobs\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ]\n          },\n          \"finish_reason\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\",\n                \"enum\": [\"stop\", \"length\"]\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Finish Reason\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"text\", \"index\", \"logprobs\", \"finish_reason\"],\n        \"title\": \"CompletionChoice\"\n      },\n      \"CompletionLogprobs\": {\n        \"properties\": {\n          \"text_offset\": {\n            \"items\": {\n              \"type\": \"integer\"\n            },\n            \"type\": \"array\",\n            \"title\": \"Text Offset\"\n          },\n          \"token_logprobs\": {\n            \"items\": {\n              \"anyOf\": [\n                {\n                  \"type\": \"number\"\n                },\n                {\n                  \"type\": \"null\"\n                }\n              ]\n            },\n            \"type\": \"array\",\n            \"title\": \"Token Logprobs\"\n          },\n          \"tokens\": {\n            \"items\": {\n              \"type\": \"string\"\n            },\n            \"type\": \"array\",\n            \"title\": \"Tokens\"\n          },\n          \"top_logprobs\": {\n            \"items\": {\n              \"anyOf\": [\n                {\n                  \"additionalProperties\": {\n                    \"type\": \"number\"\n                  },\n                  \"type\": \"object\"\n                },\n                {\n                  \"type\": \"null\"\n                }\n              ]\n            },\n            \"type\": \"array\",\n            \"title\": \"Top Logprobs\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"text_offset\", \"token_logprobs\", \"tokens\", \"top_logprobs\"],\n        \"title\": \"CompletionLogprobs\"\n      },\n      \"CompletionUsage\": {\n        \"properties\": {\n          \"prompt_tokens\": {\n            \"type\": \"integer\",\n            \"title\": \"Prompt Tokens\"\n          },\n          \"completion_tokens\": {\n            \"type\": \"integer\",\n            \"title\": \"Completion Tokens\"\n          },\n          \"total_tokens\": {\n            \"type\": \"integer\",\n            \"title\": \"Total Tokens\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"prompt_tokens\", \"completion_tokens\", \"total_tokens\"],\n        \"title\": \"CompletionUsage\"\n      },\n      \"CreateChatCompletionRequest\": {\n        \"properties\": {\n          \"messages\": {\n            \"items\": {\n              \"anyOf\": [\n                {\n                  \"$ref\": \"#/components/schemas/ChatCompletionRequestSystemMessage\"\n                },\n                {\n                  \"$ref\": \"#/components/schemas/ChatCompletionRequestUserMessage\"\n                },\n                {\n                  \"$ref\": \"#/components/schemas/ChatCompletionRequestAssistantMessage\"\n                },\n                {\n                  \"$ref\": \"#/components/schemas/ChatCompletionRequestToolMessage\"\n                },\n                {\n                  \"$ref\": \"#/components/schemas/ChatCompletionRequestFunctionMessage\"\n                }\n              ]\n            },\n            \"type\": \"array\",\n            \"title\": \"Messages\",\n            \"description\": \"A list of messages to generate completions for.\",\n            \"default\": []\n          },\n          \"functions\": {\n            \"anyOf\": [\n              {\n                \"items\": {\n                  \"$ref\": \"#/components/schemas/ChatCompletionFunction\"\n                },\n                \"type\": \"array\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Functions\",\n            \"description\": \"A list of functions to apply to the generated completions.\"\n          },\n          \"function_call\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\",\n                \"enum\": [\"none\", \"auto\"]\n              },\n              {\n                \"$ref\": \"#/components/schemas/ChatCompletionRequestFunctionCallOption\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Function Call\",\n            \"description\": \"A function to apply to the generated completions.\"\n          },\n          \"tools\": {\n            \"anyOf\": [\n              {\n                \"items\": {\n                  \"$ref\": \"#/components/schemas/ChatCompletionTool\"\n                },\n                \"type\": \"array\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Tools\",\n            \"description\": \"A list of tools to apply to the generated completions.\"\n          },\n          \"tool_choice\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\",\n                \"enum\": [\"none\", \"auto\", \"required\"]\n              },\n              {\n                \"$ref\": \"#/components/schemas/ChatCompletionNamedToolChoice\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Tool Choice\",\n            \"description\": \"A tool to apply to the generated completions.\"\n          },\n          \"max_tokens\": {\n            \"anyOf\": [\n              {\n                \"type\": \"integer\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Max Tokens\",\n            \"description\": \"The maximum number of tokens to generate. Defaults to inf\"\n          },\n          \"min_tokens\": {\n            \"type\": \"integer\",\n            \"minimum\": 0.0,\n            \"title\": \"Min Tokens\",\n            \"description\": \"The minimum number of tokens to generate. It may return fewer tokens if another condition is met (e.g. max_tokens, stop).\",\n            \"default\": 0\n          },\n          \"logprobs\": {\n            \"anyOf\": [\n              {\n                \"type\": \"boolean\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Logprobs\",\n            \"description\": \"Whether to output the logprobs or not. Default is True\",\n            \"default\": false\n          },\n          \"top_logprobs\": {\n            \"anyOf\": [\n              {\n                \"type\": \"integer\",\n                \"minimum\": 0.0\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Top Logprobs\",\n            \"description\": \"The number of logprobs to generate. If None, no logprobs are generated. logprobs need to set to True.\"\n          },\n          \"temperature\": {\n            \"type\": \"number\",\n            \"title\": \"Temperature\",\n            \"description\": \"Adjust the randomness of the generated text.\\n\\nTemperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run.\",\n            \"default\": 0.8\n          },\n          \"top_p\": {\n            \"type\": \"number\",\n            \"maximum\": 1.0,\n            \"minimum\": 0.0,\n            \"title\": \"Top P\",\n            \"description\": \"Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\\n\\nTop-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.\",\n            \"default\": 0.95\n          },\n          \"min_p\": {\n            \"type\": \"number\",\n            \"maximum\": 1.0,\n            \"minimum\": 0.0,\n            \"title\": \"Min P\",\n            \"description\": \"Sets a minimum base probability threshold for token selection.\\n\\nThe Min-P sampling method was designed as an alternative to Top-P, and aims to ensure a balance of quality and variety. The parameter min_p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.\",\n            \"default\": 0.05\n          },\n          \"stop\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"items\": {\n                  \"type\": \"string\"\n                },\n                \"type\": \"array\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Stop\",\n            \"description\": \"A list of tokens at which to stop generation. If None, no stop tokens are used.\"\n          },\n          \"stream\": {\n            \"type\": \"boolean\",\n            \"title\": \"Stream\",\n            \"description\": \"Whether to stream the results as they are generated. Useful for chatbots.\",\n            \"default\": false\n          },\n          \"stream_options\": {\n            \"anyOf\": [\n              {\n                \"$ref\": \"#/components/schemas/StreamOptions\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"description\": \"Options for streaming response. Only set this when you set stream: true.\"\n          },\n          \"presence_penalty\": {\n            \"anyOf\": [\n              {\n                \"type\": \"number\",\n                \"maximum\": 2.0,\n                \"minimum\": -2.0\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Presence Penalty\",\n            \"description\": \"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\",\n            \"default\": 0.0\n          },\n          \"frequency_penalty\": {\n            \"anyOf\": [\n              {\n                \"type\": \"number\",\n                \"maximum\": 2.0,\n                \"minimum\": -2.0\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Frequency Penalty\",\n            \"description\": \"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\",\n            \"default\": 0.0\n          },\n          \"logit_bias\": {\n            \"anyOf\": [\n              {\n                \"additionalProperties\": {\n                  \"type\": \"number\"\n                },\n                \"type\": \"object\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Logit Bias\"\n          },\n          \"seed\": {\n            \"anyOf\": [\n              {\n                \"type\": \"integer\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Seed\"\n          },\n          \"response_format\": {\n            \"anyOf\": [\n              {\n                \"$ref\": \"#/components/schemas/ChatCompletionRequestResponseFormat\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ]\n          },\n          \"model\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Model\",\n            \"description\": \"The model to use for generating completions.\"\n          },\n          \"n\": {\n            \"anyOf\": [\n              {\n                \"type\": \"integer\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"N\",\n            \"default\": 1\n          },\n          \"user\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"User\"\n          },\n          \"top_k\": {\n            \"type\": \"integer\",\n            \"minimum\": 0.0,\n            \"title\": \"Top K\",\n            \"description\": \"Limit the next token selection to the K most probable tokens.\\n\\nTop-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text.\",\n            \"default\": 40\n          },\n          \"repeat_penalty\": {\n            \"type\": \"number\",\n            \"minimum\": 0.0,\n            \"title\": \"Repeat Penalty\",\n            \"description\": \"A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\\n\\nRepeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient.\",\n            \"default\": 1.1\n          },\n          \"logit_bias_type\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\",\n                \"enum\": [\"input_ids\", \"tokens\"]\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Logit Bias Type\"\n          },\n          \"mirostat_mode\": {\n            \"type\": \"integer\",\n            \"maximum\": 2.0,\n            \"minimum\": 0.0,\n            \"title\": \"Mirostat Mode\",\n            \"description\": \"Enable Mirostat constant-perplexity algorithm of the specified version (1 or 2; 0 = disabled)\",\n            \"default\": 0\n          },\n          \"mirostat_tau\": {\n            \"type\": \"number\",\n            \"maximum\": 10.0,\n            \"minimum\": 0.0,\n            \"title\": \"Mirostat Tau\",\n            \"description\": \"Mirostat target entropy, i.e. the target perplexity - lower values produce focused and coherent text, larger values produce more diverse and less coherent text\",\n            \"default\": 5.0\n          },\n          \"mirostat_eta\": {\n            \"type\": \"number\",\n            \"maximum\": 1.0,\n            \"minimum\": 0.001,\n            \"title\": \"Mirostat Eta\",\n            \"description\": \"Mirostat learning rate\",\n            \"default\": 0.1\n          },\n          \"grammar\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Grammar\"\n          }\n        },\n        \"type\": \"object\",\n        \"title\": \"CreateChatCompletionRequest\",\n        \"examples\": [\n          {\n            \"messages\": [\n              {\n                \"content\": \"You are a helpful assistant.\",\n                \"role\": \"system\"\n              },\n              {\n                \"content\": \"What is the capital of France?\",\n                \"role\": \"user\"\n              }\n            ]\n          }\n        ]\n      },\n      \"CreateChatCompletionResponse\": {\n        \"properties\": {\n          \"id\": {\n            \"type\": \"string\",\n            \"title\": \"Id\"\n          },\n          \"object\": {\n            \"type\": \"string\",\n            \"const\": \"chat.completion\",\n            \"title\": \"Object\"\n          },\n          \"created\": {\n            \"type\": \"integer\",\n            \"title\": \"Created\"\n          },\n          \"model\": {\n            \"type\": \"string\",\n            \"title\": \"Model\"\n          },\n          \"choices\": {\n            \"items\": {\n              \"$ref\": \"#/components/schemas/ChatCompletionResponseChoice\"\n            },\n            \"type\": \"array\",\n            \"title\": \"Choices\"\n          },\n          \"usage\": {\n            \"$ref\": \"#/components/schemas/CompletionUsage\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"id\", \"object\", \"created\", \"model\", \"choices\", \"usage\"],\n        \"title\": \"CreateChatCompletionResponse\"\n      },\n      \"CreateCompletionRequest\": {\n        \"properties\": {\n          \"prompt\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"items\": {\n                  \"type\": \"string\"\n                },\n                \"type\": \"array\"\n              }\n            ],\n            \"title\": \"Prompt\",\n            \"description\": \"The prompt to generate completions for.\",\n            \"default\": \"\"\n          },\n          \"suffix\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Suffix\",\n            \"description\": \"A suffix to append to the generated text. If None, no suffix is appended. Useful for chatbots.\"\n          },\n          \"max_tokens\": {\n            \"anyOf\": [\n              {\n                \"type\": \"integer\",\n                \"minimum\": 0.0\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Max Tokens\",\n            \"description\": \"The maximum number of tokens to generate.\",\n            \"default\": 16\n          },\n          \"min_tokens\": {\n            \"type\": \"integer\",\n            \"minimum\": 0.0,\n            \"title\": \"Min Tokens\",\n            \"description\": \"The minimum number of tokens to generate. It may return fewer tokens if another condition is met (e.g. max_tokens, stop).\",\n            \"default\": 0\n          },\n          \"temperature\": {\n            \"type\": \"number\",\n            \"title\": \"Temperature\",\n            \"description\": \"Adjust the randomness of the generated text.\\n\\nTemperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run.\",\n            \"default\": 0.8\n          },\n          \"top_p\": {\n            \"type\": \"number\",\n            \"maximum\": 1.0,\n            \"minimum\": 0.0,\n            \"title\": \"Top P\",\n            \"description\": \"Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\\n\\nTop-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.\",\n            \"default\": 0.95\n          },\n          \"min_p\": {\n            \"type\": \"number\",\n            \"maximum\": 1.0,\n            \"minimum\": 0.0,\n            \"title\": \"Min P\",\n            \"description\": \"Sets a minimum base probability threshold for token selection.\\n\\nThe Min-P sampling method was designed as an alternative to Top-P, and aims to ensure a balance of quality and variety. The parameter min_p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.\",\n            \"default\": 0.05\n          },\n          \"echo\": {\n            \"type\": \"boolean\",\n            \"title\": \"Echo\",\n            \"description\": \"Whether to echo the prompt in the generated text. Useful for chatbots.\",\n            \"default\": false\n          },\n          \"stop\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"items\": {\n                  \"type\": \"string\"\n                },\n                \"type\": \"array\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Stop\",\n            \"description\": \"A list of tokens at which to stop generation. If None, no stop tokens are used.\"\n          },\n          \"stream\": {\n            \"type\": \"boolean\",\n            \"title\": \"Stream\",\n            \"description\": \"Whether to stream the results as they are generated. Useful for chatbots.\",\n            \"default\": false\n          },\n          \"stream_options\": {\n            \"anyOf\": [\n              {\n                \"$ref\": \"#/components/schemas/StreamOptions\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"description\": \"Options for streaming response. Only set this when you set stream: true.\"\n          },\n          \"logprobs\": {\n            \"anyOf\": [\n              {\n                \"type\": \"integer\",\n                \"minimum\": 0.0\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Logprobs\",\n            \"description\": \"The number of logprobs to generate. If None, no logprobs are generated.\"\n          },\n          \"presence_penalty\": {\n            \"anyOf\": [\n              {\n                \"type\": \"number\",\n                \"maximum\": 2.0,\n                \"minimum\": -2.0\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Presence Penalty\",\n            \"description\": \"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\",\n            \"default\": 0.0\n          },\n          \"frequency_penalty\": {\n            \"anyOf\": [\n              {\n                \"type\": \"number\",\n                \"maximum\": 2.0,\n                \"minimum\": -2.0\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Frequency Penalty\",\n            \"description\": \"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\",\n            \"default\": 0.0\n          },\n          \"logit_bias\": {\n            \"anyOf\": [\n              {\n                \"additionalProperties\": {\n                  \"type\": \"number\"\n                },\n                \"type\": \"object\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Logit Bias\"\n          },\n          \"seed\": {\n            \"anyOf\": [\n              {\n                \"type\": \"integer\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Seed\"\n          },\n          \"model\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Model\",\n            \"description\": \"The model to use for generating completions.\"\n          },\n          \"n\": {\n            \"anyOf\": [\n              {\n                \"type\": \"integer\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"N\",\n            \"default\": 1\n          },\n          \"best_of\": {\n            \"anyOf\": [\n              {\n                \"type\": \"integer\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Best Of\",\n            \"default\": 1\n          },\n          \"user\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"User\"\n          },\n          \"top_k\": {\n            \"type\": \"integer\",\n            \"minimum\": 0.0,\n            \"title\": \"Top K\",\n            \"description\": \"Limit the next token selection to the K most probable tokens.\\n\\nTop-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text.\",\n            \"default\": 40\n          },\n          \"repeat_penalty\": {\n            \"type\": \"number\",\n            \"minimum\": 0.0,\n            \"title\": \"Repeat Penalty\",\n            \"description\": \"A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\\n\\nRepeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient.\",\n            \"default\": 1.1\n          },\n          \"logit_bias_type\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\",\n                \"enum\": [\"input_ids\", \"tokens\"]\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Logit Bias Type\"\n          },\n          \"mirostat_mode\": {\n            \"type\": \"integer\",\n            \"maximum\": 2.0,\n            \"minimum\": 0.0,\n            \"title\": \"Mirostat Mode\",\n            \"description\": \"Enable Mirostat constant-perplexity algorithm of the specified version (1 or 2; 0 = disabled)\",\n            \"default\": 0\n          },\n          \"mirostat_tau\": {\n            \"type\": \"number\",\n            \"maximum\": 10.0,\n            \"minimum\": 0.0,\n            \"title\": \"Mirostat Tau\",\n            \"description\": \"Mirostat target entropy, i.e. the target perplexity - lower values produce focused and coherent text, larger values produce more diverse and less coherent text\",\n            \"default\": 5.0\n          },\n          \"mirostat_eta\": {\n            \"type\": \"number\",\n            \"maximum\": 1.0,\n            \"minimum\": 0.001,\n            \"title\": \"Mirostat Eta\",\n            \"description\": \"Mirostat learning rate\",\n            \"default\": 0.1\n          },\n          \"grammar\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Grammar\"\n          }\n        },\n        \"type\": \"object\",\n        \"title\": \"CreateCompletionRequest\",\n        \"examples\": [\n          {\n            \"prompt\": \"\\n\\n### Instructions:\\nWhat is the capital of France?\\n\\n### Response:\\n\",\n            \"stop\": [\"\\n\", \"###\"]\n          }\n        ]\n      },\n      \"CreateCompletionResponse\": {\n        \"properties\": {\n          \"id\": {\n            \"type\": \"string\",\n            \"title\": \"Id\"\n          },\n          \"object\": {\n            \"type\": \"string\",\n            \"const\": \"text_completion\",\n            \"title\": \"Object\"\n          },\n          \"created\": {\n            \"type\": \"integer\",\n            \"title\": \"Created\"\n          },\n          \"model\": {\n            \"type\": \"string\",\n            \"title\": \"Model\"\n          },\n          \"choices\": {\n            \"items\": {\n              \"$ref\": \"#/components/schemas/CompletionChoice\"\n            },\n            \"type\": \"array\",\n            \"title\": \"Choices\"\n          },\n          \"usage\": {\n            \"$ref\": \"#/components/schemas/CompletionUsage\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"id\", \"object\", \"created\", \"model\", \"choices\"],\n        \"title\": \"CreateCompletionResponse\"\n      },\n      \"CreateEmbeddingRequest\": {\n        \"properties\": {\n          \"model\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Model\",\n            \"description\": \"The model to use for generating completions.\"\n          },\n          \"input\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"items\": {\n                  \"type\": \"string\"\n                },\n                \"type\": \"array\"\n              }\n            ],\n            \"title\": \"Input\",\n            \"description\": \"The input to embed.\"\n          },\n          \"user\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"User\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"input\"],\n        \"title\": \"CreateEmbeddingRequest\",\n        \"examples\": [\n          {\n            \"input\": \"The food was delicious and the waiter...\"\n          }\n        ]\n      },\n      \"DetokenizeInputRequest\": {\n        \"properties\": {\n          \"model\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Model\",\n            \"description\": \"The model to use for generating completions.\"\n          },\n          \"tokens\": {\n            \"items\": {\n              \"type\": \"integer\"\n            },\n            \"type\": \"array\",\n            \"title\": \"Tokens\",\n            \"description\": \"A list of toekns to detokenize.\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"tokens\"],\n        \"title\": \"DetokenizeInputRequest\",\n        \"example\": [\n          {\n            \"tokens\": [123, 321, 222]\n          }\n        ]\n      },\n      \"DetokenizeInputResponse\": {\n        \"properties\": {\n          \"text\": {\n            \"type\": \"string\",\n            \"title\": \"Text\",\n            \"description\": \"The detokenized text.\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"text\"],\n        \"title\": \"DetokenizeInputResponse\",\n        \"example\": {\n          \"text\": \"How many tokens in this query?\"\n        }\n      },\n      \"HTTPValidationError\": {\n        \"properties\": {\n          \"detail\": {\n            \"items\": {\n              \"$ref\": \"#/components/schemas/ValidationError\"\n            },\n            \"type\": \"array\",\n            \"title\": \"Detail\"\n          }\n        },\n        \"type\": \"object\",\n        \"title\": \"HTTPValidationError\"\n      },\n      \"ModelData\": {\n        \"properties\": {\n          \"id\": {\n            \"type\": \"string\",\n            \"title\": \"Id\"\n          },\n          \"object\": {\n            \"type\": \"string\",\n            \"const\": \"model\",\n            \"title\": \"Object\"\n          },\n          \"owned_by\": {\n            \"type\": \"string\",\n            \"title\": \"Owned By\"\n          },\n          \"permissions\": {\n            \"items\": {\n              \"type\": \"string\"\n            },\n            \"type\": \"array\",\n            \"title\": \"Permissions\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"id\", \"object\", \"owned_by\", \"permissions\"],\n        \"title\": \"ModelData\"\n      },\n      \"ModelList\": {\n        \"properties\": {\n          \"object\": {\n            \"type\": \"string\",\n            \"const\": \"list\",\n            \"title\": \"Object\"\n          },\n          \"data\": {\n            \"items\": {\n              \"$ref\": \"#/components/schemas/ModelData\"\n            },\n            \"type\": \"array\",\n            \"title\": \"Data\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"object\", \"data\"],\n        \"title\": \"ModelList\"\n      },\n      \"StreamOptions\": {\n        \"properties\": {\n          \"include_usage\": {\n            \"anyOf\": [\n              {\n                \"type\": \"boolean\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Include Usage\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"include_usage\"],\n        \"title\": \"StreamOptions\"\n      },\n      \"TokenizeInputCountResponse\": {\n        \"properties\": {\n          \"count\": {\n            \"type\": \"integer\",\n            \"title\": \"Count\",\n            \"description\": \"The number of tokens in the input.\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"count\"],\n        \"title\": \"TokenizeInputCountResponse\",\n        \"example\": {\n          \"count\": 5\n        }\n      },\n      \"TokenizeInputRequest\": {\n        \"properties\": {\n          \"model\": {\n            \"anyOf\": [\n              {\n                \"type\": \"string\"\n              },\n              {\n                \"type\": \"null\"\n              }\n            ],\n            \"title\": \"Model\",\n            \"description\": \"The model to use for generating completions.\"\n          },\n          \"input\": {\n            \"type\": \"string\",\n            \"title\": \"Input\",\n            \"description\": \"The input to tokenize.\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"input\"],\n        \"title\": \"TokenizeInputRequest\",\n        \"examples\": [\n          {\n            \"input\": \"How many tokens in this query?\"\n          }\n        ]\n      },\n      \"TokenizeInputResponse\": {\n        \"properties\": {\n          \"tokens\": {\n            \"items\": {\n              \"type\": \"integer\"\n            },\n            \"type\": \"array\",\n            \"title\": \"Tokens\",\n            \"description\": \"A list of tokens.\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"tokens\"],\n        \"title\": \"TokenizeInputResponse\",\n        \"example\": {\n          \"tokens\": [123, 321, 222]\n        }\n      },\n      \"ValidationError\": {\n        \"properties\": {\n          \"loc\": {\n            \"items\": {\n              \"anyOf\": [\n                {\n                  \"type\": \"string\"\n                },\n                {\n                  \"type\": \"integer\"\n                }\n              ]\n            },\n            \"type\": \"array\",\n            \"title\": \"Location\"\n          },\n          \"msg\": {\n            \"type\": \"string\",\n            \"title\": \"Message\"\n          },\n          \"type\": {\n            \"type\": \"string\",\n            \"title\": \"Error Type\"\n          }\n        },\n        \"type\": \"object\",\n        \"required\": [\"loc\", \"msg\", \"type\"],\n        \"title\": \"ValidationError\"\n      }\n    },\n    \"securitySchemes\": {\n      \"HTTPBearer\": {\n        \"type\": \"http\",\n        \"scheme\": \"bearer\"\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/extension.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport type { ExtensionContext } from '@podman-desktop/api';\nimport { activate, deactivate } from './extension';\nimport { Studio } from './studio';\n\nvi.mock('./studio');\n\nbeforeEach(() => {\n  vi.clearAllMocks();\n});\n\ntest('check we call activate method on studio instance', async () => {\n  const fakeContext = {} as unknown as ExtensionContext;\n\n  await activate(fakeContext);\n\n  // expect the activate method to be called on the studio mock\n  expect(Studio.prototype.activate).toBeCalledTimes(1);\n\n  // no call on deactivate\n  expect(Studio.prototype.deactivate).not.toBeCalled();\n});\n\ntest('check we call deactivate method on studio instance ', async () => {\n  await deactivate();\n\n  // expect the activate method to be called on the studio mock\n  expect(Studio.prototype.deactivate).toBeCalledTimes(1);\n\n  // no call on activate\n  expect(Studio.prototype.activate).not.toBeCalled();\n});\n"
  },
  {
    "path": "packages/backend/src/extension.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { ExtensionContext } from '@podman-desktop/api';\nimport { Studio } from './studio';\n\nlet studio: Studio | undefined;\n\nexport async function activate(extensionContext: ExtensionContext): Promise<void> {\n  studio = new Studio(extensionContext);\n  await studio?.activate();\n}\n\nexport async function deactivate(): Promise<void> {\n  await studio?.deactivate();\n}\n"
  },
  {
    "path": "packages/backend/src/instructlab-api-impl.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { InstructlabAPI } from '@shared/InstructlabAPI';\nimport type { InstructlabManager } from './managers/instructlab/instructlabManager';\nimport type { InstructlabSession } from '@shared/models/instructlab/IInstructlabSession';\nimport type { InstructlabContainerConfiguration } from '@shared/models/instructlab/IInstructlabContainerConfiguration';\nimport { navigation } from '@podman-desktop/api';\n\nexport class InstructlabApiImpl implements InstructlabAPI {\n  constructor(private instructlabManager: InstructlabManager) {}\n\n  async getIsntructlabSessions(): Promise<InstructlabSession[]> {\n    return this.instructlabManager.getSessions();\n  }\n\n  requestCreateInstructlabContainer(config: InstructlabContainerConfiguration): Promise<void> {\n    return this.instructlabManager.requestCreateInstructlabContainer(config);\n  }\n\n  routeToInstructLabContainerTerminal(containerId: string): Promise<void> {\n    return navigation.navigateToContainerTerminal(containerId);\n  }\n\n  getInstructlabContainerId(): Promise<string | undefined> {\n    return this.instructlabManager.getInstructLabContainer();\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/llama-stack-api-impl.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { navigation } from '@podman-desktop/api';\nimport type { LlamaStackAPI } from '@shared/LlamaStackAPI';\nimport type { LlamaStackContainerConfiguration } from '@shared/models/llama-stack/LlamaStackContainerConfiguration';\nimport type { LlamaStackManager } from './managers/llama-stack/llamaStackManager';\nimport type { LlamaStackContainers } from '@shared/models/llama-stack/LlamaStackContainerInfo';\n\nexport class LlamaStackApiImpl implements LlamaStackAPI {\n  constructor(private llamaStackManager: LlamaStackManager) {}\n\n  requestcreateLlamaStackContainerss(config: LlamaStackContainerConfiguration): Promise<void> {\n    return this.llamaStackManager.requestcreateLlamaStackContainerss(config);\n  }\n\n  routeToLlamaStackContainerTerminal(containerId: string): Promise<void> {\n    return navigation.navigateToContainerTerminal(containerId);\n  }\n\n  getLlamaStackContainersInfo(): Promise<LlamaStackContainers | undefined> {\n    return this.llamaStackManager.getLlamaStackContainers();\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/GPUManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { expect, test, vi, beforeEach } from 'vitest';\nimport { GPUManager } from './GPUManager';\nimport { graphics, type Systeminformation } from 'systeminformation';\nimport { GPUVendor } from '@shared/models/IGPUInfo';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nvi.mock('../utils/inferenceUtils', () => ({\n  getProviderContainerConnection: vi.fn(),\n  getImageInfo: vi.fn(),\n}));\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    env: {\n      isWindows: false,\n    },\n  };\n});\n\nvi.mock('systeminformation', () => ({\n  graphics: vi.fn(),\n}));\n\nconst rpcExtensionMock = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(rpcExtensionMock.fire).mockResolvedValue(true);\n});\n\ntest('post constructor should have no items', () => {\n  const manager = new GPUManager(rpcExtensionMock);\n  expect(manager.getAll().length).toBe(0);\n});\n\ntest('no controller should return empty array', async () => {\n  vi.mocked(graphics).mockResolvedValue({\n    controllers: [],\n    displays: [],\n  });\n\n  const manager = new GPUManager(rpcExtensionMock);\n  expect(await manager.collectGPUs()).toHaveLength(0);\n});\n\ntest('intel controller should return intel vendor', async () => {\n  vi.mocked(graphics).mockResolvedValue({\n    controllers: [\n      {\n        vendor: 'Intel Corporation',\n        model: 'intel model',\n        vram: 1024,\n      } as unknown as Systeminformation.GraphicsControllerData,\n    ],\n    displays: [],\n  });\n\n  const manager = new GPUManager(rpcExtensionMock);\n  expect(await manager.collectGPUs()).toStrictEqual([\n    {\n      vendor: GPUVendor.INTEL,\n      model: 'intel model',\n      vram: 1024,\n    },\n  ]);\n});\n\ntest('NVIDIA controller should return intel vendor', async () => {\n  vi.mocked(graphics).mockResolvedValue({\n    controllers: [\n      {\n        vendor: 'NVIDIA',\n        model: 'NVIDIA GeForce GTX 1060 6GB',\n        vram: 6144,\n      } as unknown as Systeminformation.GraphicsControllerData,\n    ],\n    displays: [],\n  });\n\n  const manager = new GPUManager(rpcExtensionMock);\n  expect(await manager.collectGPUs()).toStrictEqual([\n    {\n      vendor: GPUVendor.NVIDIA,\n      model: 'NVIDIA GeForce GTX 1060 6GB',\n      vram: 6144,\n    },\n  ]);\n});\n\ntest('NVIDIA controller can have vendor \"NVIDIA Corporation\"', async () => {\n  vi.mocked(graphics).mockResolvedValue({\n    controllers: [\n      {\n        vendor: 'NVIDIA Corporation',\n        model: 'NVIDIA GeForce GTX 1060 6GB',\n        vram: 6144,\n      } as unknown as Systeminformation.GraphicsControllerData,\n    ],\n    displays: [],\n  });\n\n  const manager = new GPUManager(rpcExtensionMock);\n  expect(await manager.collectGPUs()).toStrictEqual([\n    {\n      vendor: GPUVendor.NVIDIA,\n      model: 'NVIDIA GeForce GTX 1060 6GB',\n      vram: 6144,\n    },\n  ]);\n});\n"
  },
  {
    "path": "packages/backend/src/managers/GPUManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { type Disposable } from '@podman-desktop/api';\nimport { GPUVendor, type IGPUInfo } from '@shared/models/IGPUInfo';\nimport { Publisher } from '../utils/Publisher';\nimport { graphics } from 'systeminformation';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { MSG_GPUS_UPDATE } from '@shared/Messages';\n\n/**\n * @experimental\n */\nexport class GPUManager extends Publisher<IGPUInfo[]> implements Disposable {\n  #gpus: IGPUInfo[];\n\n  constructor(rpcExtension: RpcExtension) {\n    super(rpcExtension, MSG_GPUS_UPDATE, () => this.getAll());\n    // init properties\n    this.#gpus = [];\n  }\n\n  dispose(): void {}\n\n  getAll(): IGPUInfo[] {\n    return this.#gpus;\n  }\n\n  async collectGPUs(): Promise<IGPUInfo[]> {\n    const { controllers } = await graphics();\n    return controllers.map(controller => ({\n      vendor: this.getVendor(controller.vendor),\n      model: controller.model,\n      vram: controller.vram ?? undefined,\n    }));\n  }\n\n  protected getVendor(raw: string): GPUVendor {\n    switch (raw) {\n      case 'Intel Corporation':\n        return GPUVendor.INTEL;\n      case 'NVIDIA':\n      case 'NVIDIA Corporation':\n        return GPUVendor.NVIDIA;\n      case 'Apple':\n        return GPUVendor.APPLE;\n      default:\n        return GPUVendor.UNKNOWN;\n    }\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/SnippetManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { SnippetManager } from './SnippetManager';\nimport type { TelemetryLogger } from '@podman-desktop/api';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { MSG_SUPPORTED_LANGUAGES_UPDATE } from '@shared/Messages';\n\nconst rpcExtensionMock = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nconst telemetryMock = {\n  logUsage: vi.fn(),\n  logError: vi.fn(),\n} as unknown as TelemetryLogger;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(rpcExtensionMock.fire).mockResolvedValue(true);\n});\n\ntest('expect init to notify webview', () => {\n  const manager = new SnippetManager(rpcExtensionMock, telemetryMock);\n  manager.init();\n\n  expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_SUPPORTED_LANGUAGES_UPDATE, manager.getLanguageList());\n});\n\ntest('expect postman-code-generators to have many languages available.', () => {\n  const manager = new SnippetManager(rpcExtensionMock, telemetryMock);\n  manager.init();\n\n  expect(manager.getLanguageList().length).toBeGreaterThan(0);\n});\n\ntest('expect postman-code-generators to have nodejs supported.', () => {\n  const manager = new SnippetManager(rpcExtensionMock, telemetryMock);\n  manager.init();\n\n  const languages = manager.getLanguageList();\n  const nodejs = languages.find(language => language.key === 'nodejs');\n  expect(nodejs).toBeDefined();\n  expect(nodejs?.variants.length).toBeGreaterThan(0);\n\n  const native = nodejs?.variants.find(variant => variant.key === 'Request');\n  expect(native).toBeDefined();\n});\n\ntest('expect postman-code-generators to generate proper nodejs native code', async () => {\n  const manager = new SnippetManager(rpcExtensionMock, telemetryMock);\n  manager.init();\n\n  const snippet = await manager.generate(\n    {\n      url: 'http://localhost:8080',\n    },\n    'nodejs',\n    'Request',\n  );\n  expect(snippet).toBe(`var request = require('request');\nvar options = {\n  'method': 'GET',\n  'url': 'http://localhost:8080',\n  'headers': {\n  }\n};\nrequest(options, function (error, response) {\n  if (error) throw new Error(error);\n  console.log(response.body);\n});\n`);\n});\n\ntest('expect snippet manager to have Quarkus Langchain4J supported.', () => {\n  const manager = new SnippetManager(rpcExtensionMock, telemetryMock);\n  manager.init();\n\n  const languages = manager.getLanguageList();\n  const java = languages.find(language => language.key === 'java');\n  expect(java).toBeDefined();\n  expect(java?.variants.length).toBeGreaterThan(0);\n\n  const quarkus_langchain4j = java?.variants.find(variant => variant.key === 'Quarkus Langchain4J');\n  expect(quarkus_langchain4j).toBeDefined();\n});\n\ntest('expect new variant to replace existing one if same name', () => {\n  const manager = new SnippetManager(rpcExtensionMock, telemetryMock);\n  manager.init();\n\n  const languages = manager.getLanguageList();\n  const java = languages.find(language => language.key === 'java');\n  expect(java).toBeDefined();\n  expect(java?.variants.length).toBeGreaterThan(0);\n\n  if (!java) throw new Error('undefined java');\n\n  const oldVariantsNumber = java.variants.length;\n  manager.addVariant('java', java.variants[0].key, vi.fn());\n  const languages_updated = manager.getLanguageList();\n  const java_updated = languages_updated.find(language => language.key === 'java');\n  expect(java_updated).toBeDefined();\n  expect(java_updated?.variants.length).equals(oldVariantsNumber);\n});\n"
  },
  {
    "path": "packages/backend/src/managers/SnippetManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { Disposable, TelemetryLogger } from '@podman-desktop/api';\nimport { getLanguageList, convert, type Language } from 'postman-code-generators';\nimport { Request } from 'postman-collection';\nimport { Publisher } from '../utils/Publisher';\nimport type { RequestOptions } from '@shared/models/RequestOptions';\nimport { quarkusLangchain4Jgenerator } from './snippets/quarkus-snippet';\nimport { javaOkHttpGenerator } from './snippets/java-okhttp-snippet';\nimport { pythonLangChainGenerator } from './snippets/python-langchain-snippet';\nimport { MSG_SUPPORTED_LANGUAGES_UPDATE } from '@shared/Messages';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\ntype Generator = (requestOptions: RequestOptions) => Promise<string>;\n\nexport class SnippetManager extends Publisher<Language[]> implements Disposable {\n  #languages: Language[];\n  #additionalGenerators: Map<string, Generator>;\n\n  constructor(\n    rpcExtension: RpcExtension,\n    private telemetry: TelemetryLogger,\n  ) {\n    super(rpcExtension, MSG_SUPPORTED_LANGUAGES_UPDATE, () => this.getLanguageList());\n\n    this.#languages = [];\n    this.#additionalGenerators = new Map<string, Generator>();\n  }\n\n  addVariant(key: string, variant: string, generator: Generator): void {\n    const original = this.#languages;\n    const language = original.find((lang: Language) => lang.key === key);\n    if (language) {\n      if (!language.variants.find(v => v.key === variant)) {\n        language.variants.push({ key: variant });\n      }\n      this.#additionalGenerators.set(`${key}/${variant}`, generator);\n    }\n  }\n\n  getLanguageList(): Language[] {\n    return this.#languages;\n  }\n\n  async generate(requestOptions: RequestOptions, language: string, variant: string): Promise<string> {\n    this.telemetry.logUsage('snippet.generate', { language: language, variant: variant });\n    const generator = this.#additionalGenerators.get(`${language}/${variant}`);\n    if (generator) {\n      return generator(requestOptions);\n    }\n\n    return new Promise((resolve, reject) => {\n      const request = new Request(requestOptions);\n      convert(language, variant, request, {}, (error: unknown, snippet: string | undefined) => {\n        if (error) {\n          reject(error);\n          return;\n        } else if (snippet === undefined) {\n          throw new Error('undefined snippet');\n        }\n        resolve(snippet);\n      });\n    });\n  }\n\n  init(): void {\n    this.#languages = getLanguageList();\n    this.addVariant('java', 'Quarkus Langchain4J', quarkusLangchain4Jgenerator);\n    this.addVariant('java', 'OkHttp', javaOkHttpGenerator);\n    this.addVariant('python', 'Python LangChain', pythonLangChainGenerator);\n    // Notify the publisher\n    this.notify();\n  }\n\n  dispose(): void {}\n}\n"
  },
  {
    "path": "packages/backend/src/managers/TaskRunner.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport type { TaskRegistry } from '../registries/TaskRegistry';\nimport { TaskRunner } from './TaskRunner';\nimport type { TaskRunnerTools } from '../models/TaskRunner';\nimport type { Task } from '@shared/models/ITask';\n\nconst taskRegistry = {\n  createTask: vi.fn(),\n  updateTask: vi.fn(),\n  getTasksByLabels: vi.fn(),\n} as unknown as TaskRegistry;\n\nconst runner = vi.fn<(tools: TaskRunnerTools) => Promise<void>>();\n\nlet taskRunner: TaskRunner;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  taskRunner = new TaskRunner(taskRegistry);\n});\n\ntest('runner terminates with no successLabel', async () => {\n  vi.mocked(taskRegistry.createTask).mockReturnValue({\n    id: 'task1',\n    name: 'Loading...',\n    state: 'loading',\n  });\n  runner.mockResolvedValue();\n  const labels = {\n    label1: 'value1',\n    label2: 'value2',\n  };\n  await taskRunner.runAsTask(\n    labels,\n    {\n      loadingLabel: 'Loading...',\n      errorMsg: err => `an error: ${err}`,\n    },\n    runner,\n  );\n\n  expect(taskRegistry.createTask).toHaveBeenCalledWith('Loading...', 'loading', labels);\n  expect(taskRegistry.updateTask).toHaveBeenCalledWith({\n    id: 'task1',\n    name: 'Loading...',\n    state: 'success',\n  });\n});\n\ntest('runner terminates with successLabel', async () => {\n  vi.mocked(taskRegistry.createTask).mockReturnValue({\n    id: 'task1',\n    name: 'Loading...',\n    state: 'loading',\n  });\n  runner.mockResolvedValue();\n  const labels = {\n    label1: 'value1',\n    label2: 'value2',\n  };\n  await taskRunner.runAsTask(\n    labels,\n    {\n      loadingLabel: 'Loading...',\n      successLabel: 'Success!!',\n      errorMsg: err => `an error: ${err}`,\n    },\n    runner,\n  );\n\n  expect(taskRegistry.createTask).toHaveBeenCalledWith('Loading...', 'loading', labels);\n  expect(taskRegistry.updateTask).toHaveBeenCalledWith({\n    id: 'task1',\n    name: 'Success!!',\n    state: 'success',\n  });\n});\n\ntest('runner throws with no errorLabel', async () => {\n  vi.mocked(taskRegistry.createTask).mockReturnValue({\n    id: 'task1',\n    name: 'Loading...',\n    state: 'loading',\n  });\n  runner.mockRejectedValue('something goes wrong');\n  const labels = {\n    label1: 'value1',\n    label2: 'value2',\n  };\n  await expect(() =>\n    taskRunner.runAsTask(\n      labels,\n      {\n        loadingLabel: 'Loading...',\n        errorMsg: err => `an error: ${err}`,\n      },\n      runner,\n    ),\n  ).rejects.toThrow();\n\n  expect(taskRegistry.createTask).toHaveBeenCalledWith('Loading...', 'loading', labels);\n  expect(taskRegistry.updateTask).toHaveBeenCalledWith({\n    id: 'task1',\n    name: 'Loading...',\n    state: 'error',\n    error: 'an error: something goes wrong',\n  });\n});\n\ntest('runner throws with errorLabel', async () => {\n  vi.mocked(taskRegistry.createTask).mockReturnValue({\n    id: 'task1',\n    name: 'Loading...',\n    state: 'loading',\n  });\n  runner.mockRejectedValue('something goes wrong');\n  const labels = {\n    label1: 'value1',\n    label2: 'value2',\n  };\n  await expect(() =>\n    taskRunner.runAsTask(\n      labels,\n      {\n        loadingLabel: 'Loading...',\n        errorLabel: 'Failed :(',\n        errorMsg: err => `an error: ${err}`,\n      },\n      runner,\n    ),\n  ).rejects.toThrow();\n\n  expect(taskRegistry.createTask).toHaveBeenCalledWith('Loading...', 'loading', labels);\n  expect(taskRegistry.updateTask).toHaveBeenCalledWith({\n    id: 'task1',\n    name: 'Failed :(',\n    state: 'error',\n    error: 'an error: something goes wrong',\n  });\n});\n\ntest('updateLabels', async () => {\n  vi.mocked(taskRegistry.createTask).mockReturnValue({\n    id: 'task1',\n    name: 'Loading...',\n    state: 'loading',\n  });\n  runner.mockImplementation(async ({ updateLabels }) => {\n    updateLabels(labels => ({ ...labels, newLabel: 'newValue' }));\n  });\n  const labels = {\n    label1: 'value1',\n    label2: 'value2',\n  };\n  await taskRunner.runAsTask(\n    labels,\n    {\n      loadingLabel: 'Loading...',\n      errorMsg: err => `an error: ${err}`,\n    },\n    runner,\n  );\n\n  expect(taskRegistry.createTask).toHaveBeenCalledWith('Loading...', 'loading', labels);\n  expect(taskRegistry.updateTask).toHaveBeenCalledWith({\n    id: 'task1',\n    name: 'Loading...',\n    state: 'success',\n    labels: {\n      label1: 'value1',\n      label2: 'value2',\n      newLabel: 'newValue',\n    },\n  });\n  expect(taskRegistry.updateTask).toHaveBeenCalledWith({\n    id: 'task1',\n    name: 'Loading...',\n    state: 'success',\n    labels: {\n      label1: 'value1',\n      label2: 'value2',\n      newLabel: 'newValue',\n    },\n  });\n});\n\ntest.each<{ failFast: boolean }>([\n  {\n    failFast: true,\n  },\n  {\n    failFast: false,\n  },\n])('failFastSubtasks $failFast', async ({ failFast }) => {\n  vi.mocked(taskRegistry.createTask).mockReturnValue({\n    id: 'task1',\n    name: 'Loading...',\n    state: 'loading',\n  });\n  const otherTasks: Task[] = [\n    {\n      id: 'subtask1',\n      name: 'Sub task 1',\n      state: 'loading',\n    },\n    {\n      id: 'subtask2',\n      name: 'Sub task 2',\n      state: 'loading',\n    },\n    {\n      id: 'subtask3',\n      name: 'Sub task 3',\n      state: 'error',\n    },\n  ];\n  vi.mocked(taskRegistry.getTasksByLabels).mockReturnValue(otherTasks);\n  runner.mockRejectedValue('something goes wrong');\n  const labels = {\n    label1: 'value1',\n    label2: 'value2',\n  };\n  await expect(() =>\n    taskRunner.runAsTask(\n      labels,\n      {\n        loadingLabel: 'Loading...',\n        errorMsg: err => `an error: ${err}`,\n        failFastSubtasks: failFast,\n      },\n      runner,\n    ),\n  ).rejects.toThrow();\n\n  expect(taskRegistry.createTask).toHaveBeenCalledWith('Loading...', 'loading', labels);\n  if (failFast) {\n    expect(taskRegistry.updateTask).toHaveBeenCalledTimes(3);\n    expect(taskRegistry.updateTask).toHaveBeenNthCalledWith(1, { ...otherTasks[0], state: 'error' });\n    expect(taskRegistry.updateTask).toHaveBeenNthCalledWith(2, { ...otherTasks[1], state: 'error' });\n    expect(taskRegistry.updateTask).toHaveBeenNthCalledWith(3, {\n      id: 'task1',\n      name: 'Loading...',\n      state: 'error',\n      error: 'an error: something goes wrong',\n    });\n  } else {\n    expect(taskRegistry.updateTask).toHaveBeenCalledExactlyOnceWith({\n      id: 'task1',\n      name: 'Loading...',\n      state: 'error',\n      error: 'an error: something goes wrong',\n    });\n  }\n});\n"
  },
  {
    "path": "packages/backend/src/managers/TaskRunner.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { RunAsTaskOptions, TaskRunnerTools } from '../models/TaskRunner';\nimport type { TaskRegistry } from '../registries/TaskRegistry';\n\nexport class TaskRunner {\n  constructor(private taskRegistry: TaskRegistry) {}\n\n  async runAsTask<T>(\n    labels: Record<string, string>,\n    options: RunAsTaskOptions,\n    run: (tools: TaskRunnerTools) => Promise<T>,\n  ): Promise<T> {\n    const tools = {\n      updateLabels: (f: (labels: Record<string, string>) => Record<string, string>): void => {\n        task.labels = f(labels);\n        this.taskRegistry.updateTask(task);\n      },\n    };\n\n    const task = this.taskRegistry.createTask(options.loadingLabel, 'loading', labels);\n    try {\n      const result = await run(tools);\n      task.state = 'success';\n      if (options.successLabel) {\n        task.name = options.successLabel;\n      }\n      return result;\n    } catch (err: unknown) {\n      task.state = 'error';\n      task.error = options.errorMsg(err);\n      if (options.errorLabel) {\n        task.name = options.errorLabel;\n      }\n      if (options.failFastSubtasks) {\n        this.failFastSubtasks(labels);\n      }\n      throw err;\n    } finally {\n      task.progress = undefined;\n      this.taskRegistry.updateTask(task);\n    }\n  }\n\n  private failFastSubtasks(labels: Record<string, string>): void {\n    const tasks = this.taskRegistry.getTasksByLabels(labels);\n    // Filter the one no in loading state\n    tasks\n      .filter(t => t.state === 'loading')\n      .forEach(t => {\n        this.taskRegistry.updateTask({\n          ...t,\n          state: 'error',\n        });\n      });\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/apiServer.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\n/* eslint-disable sonarjs/no-nested-functions */\n\nimport { afterEach, assert, beforeEach, describe, expect, test, vi } from 'vitest';\nimport { ApiServer, PREFERENCE_RANDOM_PORT } from './apiServer';\nimport request from 'supertest';\nimport type * as podmanDesktopApi from '@podman-desktop/api';\nimport path from 'node:path';\nimport type { Server } from 'node:http';\nimport type { ModelsManager } from './modelsManager';\nimport type { EventEmitter } from 'node:events';\nimport { once } from 'node:events';\nimport type { ConfigurationRegistry } from '../registries/ConfigurationRegistry';\nimport type { AddressInfo } from 'node:net';\nimport type { CatalogManager } from './catalogManager';\nimport type { Downloader } from '../utils/downloader';\nimport type { ProgressEvent } from '../models/baseEvent';\nimport type { InferenceManager } from './inference/inferenceManager';\nimport type { ContainerHealthy, ContainerRegistry } from '../registries/ContainerRegistry';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport OpenAI from 'openai';\nimport type { ChatCompletion, ChatCompletionChunk } from 'openai/resources';\nimport { Stream } from 'openai/streaming';\n\nvi.mock('openai', () => {\n  const OpenAI = vi.fn();\n  OpenAI.prototype = {\n    chat: {\n      completions: {\n        create: vi.fn(),\n      },\n    },\n  };\n  return { default: OpenAI };\n});\n\nclass TestApiServer extends ApiServer {\n  public override getListener(): Server | undefined {\n    return super.getListener();\n  }\n}\n\nconst extensionContext = {} as unknown as podmanDesktopApi.ExtensionContext;\n\nlet server: TestApiServer;\n\nconst modelsManager = {\n  getModelsInfo: vi.fn(),\n  isModelOnDisk: vi.fn(),\n  createDownloader: vi.fn(),\n  getLocalModelsFromDisk: vi.fn(),\n  sendModelsInfo: vi.fn(),\n} as unknown as ModelsManager;\n\nconst catalogManager = {\n  getModelByName: vi.fn(),\n} as unknown as CatalogManager;\n\nconst inferenceManager = {\n  getServers: vi.fn(),\n  createInferenceServer: vi.fn(),\n  startInferenceServer: vi.fn(),\n} as unknown as InferenceManager;\n\nconst configurationRegistry = {\n  getExtensionConfiguration: () => {\n    return {\n      apiPort: PREFERENCE_RANDOM_PORT,\n    };\n  },\n} as unknown as ConfigurationRegistry;\n\nconst containerRegistry = {\n  onHealthyContainerEvent: vi.fn(),\n} as unknown as ContainerRegistry;\n\nbeforeEach(async () => {\n  vi.clearAllMocks();\n  server = new TestApiServer(\n    extensionContext,\n    modelsManager,\n    catalogManager,\n    inferenceManager,\n    configurationRegistry,\n    containerRegistry,\n  );\n  vi.spyOn(server, 'getSpecFile').mockReturnValue(path.join(__dirname, '../../../../api/openapi.yaml'));\n  vi.spyOn(server, 'getPackageFile').mockReturnValue(path.join(__dirname, '../../../../package.json'));\n  await server.init();\n  await new Promise(resolve => setTimeout(resolve, 0)); // wait for random port to be set\n});\n\nafterEach(async () => {\n  server.dispose();\n  await once(server.getListener() as EventEmitter, 'close');\n});\n\ntest('/spec endpoint', async () => {\n  expect(server.getListener()).toBeDefined();\n  const res = await request(server.getListener()!)\n    .get('/spec')\n    .expect(200)\n    .expect('Content-Type', 'application/yaml; charset=utf-8');\n  expect(res.text).toMatch(/^openapi:/);\n});\n\ntest('/spec endpoint when spec file is not found', async () => {\n  expect(server.getListener()).toBeDefined();\n  vi.spyOn(server, 'getSpecFile').mockReturnValue(path.join(__dirname, '../../../../api/openapi-notfound.yaml'));\n  const res = await request(server.getListener()!).get('/spec').expect(500);\n  expect(res.body.message).toEqual('unable to get spec');\n});\n\ntest('/spec endpoint when getting spec file fails', async () => {\n  expect(server.getListener()).toBeDefined();\n  vi.spyOn(server, 'getSpecFile').mockImplementation(() => {\n    throw new Error('an error getting spec file');\n  });\n  const res = await request(server.getListener()!).get('/spec').expect(500);\n  expect(res.body.message).toEqual('unable to get spec');\n  expect(res.body.errors[0]).toEqual('an error getting spec file');\n});\n\ntest('/api/version endpoint', async () => {\n  expect(server.getListener()).toBeDefined();\n  const res = await request(server.getListener()!)\n    .get('/api/version')\n    .expect(200)\n    .expect('Content-Type', 'application/json; charset=utf-8');\n  expect(res.body.version).toBeDefined();\n});\n\ntest('/api/version endpoint when package.json file is not found', async () => {\n  expect(server.getListener()).toBeDefined();\n  vi.spyOn(server, 'getPackageFile').mockReturnValue(path.join(__dirname, '../../../../package-notfound.json'));\n  const res = await request(server.getListener()!).get('/api/version').expect(500);\n  expect(res.body.message).toEqual('unable to get version');\n});\n\ntest('/api/version endpoint when getting package.json file fails', async () => {\n  expect(server.getListener()).toBeDefined();\n  vi.spyOn(server, 'getPackageFile').mockImplementation(() => {\n    throw new Error('an error getting package file');\n  });\n  const res = await request(server.getListener()!).get('/api/version').expect(500);\n  expect(res.body.message).toEqual('unable to get version');\n  expect(res.body.errors[0]).toEqual('an error getting package file');\n});\n\ntest('/api/version endpoint with unexpected param', async () => {\n  expect(server.getListener()).toBeDefined();\n  const res = await request(server.getListener()!).get('/api/version?wrongParam').expect(400);\n  expect(res.body.message).toEqual(`Unknown query parameter 'wrongParam'`);\n});\n\ntest('/api/wrongEndpoint', async () => {\n  expect(server.getListener()).toBeDefined();\n  const res = await request(server.getListener()!).get('/api/wrongEndpoint').expect(404);\n  expect(res.body.message).toEqual('not found');\n});\n\ntest('/', async () => {\n  expect(server.getListener()).toBeDefined();\n  await request(server.getListener()!).get('/').expect(200);\n});\n\ntest('/api/tags', async () => {\n  expect(server.getListener()).toBeDefined();\n  vi.mocked(modelsManager.getModelsInfo).mockReturnValue([]);\n  await request(server.getListener()!).get('/api/tags').expect(200);\n});\n\ntest('/api/tags returns error', async () => {\n  expect(server.getListener()).toBeDefined();\n  vi.mocked(modelsManager.getModelsInfo).mockRejectedValue({});\n  const res = await request(server.getListener()!).get('/api/tags').expect(500);\n  expect(res.body.message).toEqual('unable to get models');\n});\n\ntest('/api/tags returns ok', async () => {\n  expect(server.getListener()).toBeDefined();\n  vi.mocked(modelsManager.getModelsInfo).mockReturnValue([\n    {\n      id: 'modelId',\n      name: 'model-name',\n      description: 'a description',\n    },\n  ]);\n  vi.mocked(modelsManager.isModelOnDisk).mockReturnValue(true);\n  const res = await request(server.getListener()!).get('/api/tags').expect(200);\n  expect(res.body).toBeDefined();\n  expect(res.body.models).toBeDefined();\n  expect(res.body.models[0]).toMatchObject({\n    name: 'model-name',\n    model: 'model-name',\n  });\n});\n\ntest('/api-docs/9000 returns swagger UI', async () => {\n  expect(server.getListener()).toBeDefined();\n  vi.mocked(modelsManager.getModelsInfo).mockRejectedValue({});\n  const listener = server.getListener();\n  if (!listener) {\n    assert.fail('listener is not defined');\n  }\n  const response = await request(listener).get('/api-docs/9000/').expect(200);\n  expect(response.status).toBe(200);\n\n  // Ensure it returns the Swagger UI page\n  expect(response.text).toContain('<title>Swagger UI</title>');\n});\n\ntest('verify listening on localhost', async () => {\n  expect(server.getListener()).toBeDefined();\n  expect((server.getListener()?.address() as AddressInfo).address).toEqual('0.0.0.0');\n});\n\ntest('/api/pull returns an error if no body is passed', async () => {\n  expect(server.getListener()).toBeDefined();\n  await request(server.getListener()!).post('/api/pull').expect(415);\n});\n\ndescribe.each([undefined, true, false])('/api/pull endpoint, stream is %o', stream => {\n  test('/api/pull returns an error if the model is not known', async () => {\n    expect(server.getListener()).toBeDefined();\n    vi.mocked(catalogManager.getModelByName).mockImplementation(() => {\n      throw new Error('model unknown');\n    });\n    const req = request(server.getListener()!).post('/api/pull').send({ model: 'unknown-model-name', stream });\n    if (stream === false) {\n      const res = await req.expect(500).expect('Content-Type', 'application/json; charset=utf-8');\n      expect(res.body.error).toEqual('pull model manifest: file does not exist');\n    } else {\n      const res = await req.expect(200);\n      const lines = res.text.split('\\n');\n      expect(lines.length).toEqual(3);\n      expect(lines[0]).toEqual('{\"status\":\"pulling manifest\"}');\n      expect(lines[1]).toEqual('{\"error\":\"pull model manifest: file does not exist\"}');\n      expect(lines[2]).toEqual('');\n    }\n  });\n\n  test('/api/pull returns success if model already downloaded', async () => {\n    expect(server.getListener()).toBeDefined();\n    vi.mocked(catalogManager.getModelByName).mockReturnValue({\n      id: 'modelId',\n      name: 'model-name',\n      description: 'a description',\n    });\n    vi.mocked(modelsManager.isModelOnDisk).mockReturnValue(true);\n    const req = request(server.getListener()!).post('/api/pull').send({ model: 'model-name', stream });\n    if (stream === false) {\n      const res = await req.expect(200).expect('Content-Type', 'application/json; charset=utf-8');\n      expect(res.body.status).toEqual('success');\n    } else {\n      const res = await req.expect(200).expect('transfer-encoding', 'chunked');\n      const lines = res.text.split('\\n');\n      expect(lines.length).toEqual(3);\n      expect(lines[0]).toEqual('{\"status\":\"pulling manifest\"}');\n      expect(lines[1]).toEqual('{\"status\":\"success\"}');\n      expect(lines[2]).toEqual('');\n    }\n  });\n\n  test('/api/pull downloads model and returns success', async () => {\n    const getLocalModelsSpy = vi.spyOn(modelsManager, 'getLocalModelsFromDisk').mockResolvedValue();\n    const sendModelsInfoSpy = vi.spyOn(modelsManager, 'sendModelsInfo').mockResolvedValue();\n    expect(server.getListener()).toBeDefined();\n    vi.mocked(catalogManager.getModelByName).mockReturnValue({\n      id: 'modelId',\n      name: 'model-name',\n      description: 'a description',\n      sha256: '123456',\n    });\n    vi.mocked(modelsManager.isModelOnDisk).mockReturnValue(false);\n    vi.mocked(modelsManager.createDownloader).mockReturnValue({\n      perform: async (_name: string) => {},\n      onEvent: (listener: (e: ProgressEvent) => void) => {\n        listener({\n          status: 'progress',\n          id: 'model-name',\n          total: 100000,\n          value: 100000,\n        });\n      },\n    } as unknown as Downloader);\n    const req = request(server.getListener()!).post('/api/pull').send({ model: 'model-name', stream });\n    if (stream === false) {\n      const res = await req.expect(200).expect('Content-Type', 'application/json; charset=utf-8');\n      expect(res.body.status).toEqual('success');\n    } else {\n      const res = await req.expect(200).expect('transfer-encoding', 'chunked');\n      const lines = res.text.split('\\n');\n      expect(lines.length).toEqual(4);\n      expect(lines[0]).toEqual('{\"status\":\"pulling manifest\"}');\n      expect(lines[1]).toEqual(\n        '{\"status\":\"pulling 123456\",\"digest\":\"sha256:123456\",\"total\":100000,\"completed\":100000000}',\n      );\n      expect(lines[2]).toEqual('{\"status\":\"success\"}');\n      expect(lines[3]).toEqual('');\n    }\n    expect(getLocalModelsSpy).toHaveBeenCalledTimes(1);\n    expect(sendModelsInfoSpy).toHaveBeenCalledTimes(1);\n  });\n\n  test('/api/pull should return an error if an error occurs during download', async () => {\n    expect(server.getListener()).toBeDefined();\n    vi.mocked(catalogManager.getModelByName).mockReturnValue({\n      id: 'modelId',\n      name: 'model-name',\n      description: 'a description',\n      sha256: '123456',\n    });\n    vi.mocked(modelsManager.isModelOnDisk).mockReturnValue(false);\n    vi.mocked(modelsManager.createDownloader).mockReturnValue({\n      perform: async (_name: string) => {\n        await new Promise(resolve => setTimeout(resolve, 0)); // wait for random port to be set\n        throw new Error('an error');\n      },\n      onEvent: (listener: (e: ProgressEvent) => void) => {\n        listener({\n          status: 'progress',\n          id: 'model-name',\n          total: 100000,\n          value: 100000,\n        });\n      },\n    } as unknown as Downloader);\n    const req = request(server.getListener()!).post('/api/pull').send({ model: 'model-name', stream });\n    if (stream === false) {\n      const res = await req.expect(500).expect('Content-Type', 'application/json; charset=utf-8');\n      expect(res.body.error).toEqual('Error: an error');\n    } else {\n      const res = await req.expect(200).expect('transfer-encoding', 'chunked');\n      const lines = res.text.split('\\n');\n      expect(lines.length).toEqual(4);\n      expect(lines[0]).toEqual('{\"status\":\"pulling manifest\"}');\n      expect(lines[1]).toEqual(\n        '{\"status\":\"pulling 123456\",\"digest\":\"sha256:123456\",\"total\":100000,\"completed\":100000000}',\n      );\n      expect(lines[2]).toEqual('{\"error\":\"Error: an error\"}');\n      expect(lines[3]).toEqual('');\n    }\n  });\n});\n\ndescribe.each([undefined, true, false])('stream is %o', stream => {\n  describe.each(['/api/chat', '/api/generate'])('%o endpoint', endpoint => {\n    test('returns an error if the model is not known', async () => {\n      expect(server.getListener()).toBeDefined();\n      vi.mocked(catalogManager.getModelByName).mockImplementation(() => {\n        throw new Error('model unknown');\n      });\n      const req = request(server.getListener()!).post(endpoint).send({ model: 'unknown-model-name', stream });\n      if (stream === false) {\n        const res = await req.expect(500).expect('Content-Type', 'application/json; charset=utf-8');\n        expect(res.body.error).toEqual('chat: model \"unknown-model-name\" does not exist');\n      } else {\n        const res = await req.expect(200);\n        const lines = res.text.split('\\n');\n        expect(lines.length).toEqual(2);\n        expect(lines[0]).toEqual('{\"error\":\"chat: model \\\\\"unknown-model-name\\\\\" does not exist\"}');\n        expect(lines[1]).toEqual('');\n      }\n    });\n\n    test('returns an error if model is not downloaded', async () => {\n      expect(server.getListener()).toBeDefined();\n      vi.mocked(catalogManager.getModelByName).mockReturnValue({\n        id: 'modelId',\n        name: 'model-name',\n        description: 'a description',\n      });\n      vi.mocked(modelsManager.isModelOnDisk).mockReturnValue(false);\n      const req = request(server.getListener()!).post(endpoint).send({ model: 'model-name', stream });\n      if (stream === false) {\n        const res = await req.expect(500).expect('Content-Type', 'application/json; charset=utf-8');\n        expect(res.body.error).toEqual('chat: model \"model-name\" not found, try pulling it first');\n      } else {\n        const res = await req.expect(200).expect('transfer-encoding', 'chunked');\n        const lines = res.text.split('\\n');\n        expect(lines.length).toEqual(2);\n        expect(lines[0]).toEqual('{\"error\":\"chat: model \\\\\"model-name\\\\\" not found, try pulling it first\"}');\n        expect(lines[1]).toEqual('');\n      }\n    });\n  });\n\n  describe('the model is available', () => {\n    const onHealthyContainerEventEmptyCallback = (): podmanDesktopApi.Disposable => {\n      return {\n        dispose: vi.fn(),\n      };\n    };\n\n    const onHealthyContainerEventNonEmptyCallback = (\n      fn: (e: ContainerHealthy) => void,\n    ): podmanDesktopApi.Disposable => {\n      setTimeout(\n        () =>\n          fn({\n            id: 'container1',\n          }),\n        100,\n      );\n      return {\n        dispose: vi.fn(),\n      };\n    };\n\n    beforeEach(() => {\n      expect(server.getListener()).toBeDefined();\n      vi.mocked(catalogManager.getModelByName).mockReturnValue({\n        id: 'modelId1',\n        name: 'model-name',\n        description: 'a description',\n        file: {\n          file: 'a-file-name',\n          path: '/path/to/model-file',\n        },\n      });\n      vi.mocked(modelsManager.isModelOnDisk).mockReturnValue(true);\n    });\n\n    describe('the service is initially not created', async () => {\n      beforeEach(async () => {\n        vi.mocked(inferenceManager.getServers).mockReturnValueOnce([]);\n      });\n\n      describe('the created service is immediately healthy', () => {\n        beforeEach(() => {\n          vi.mocked(inferenceManager.createInferenceServer).mockImplementation(async () => {\n            vi.mocked(inferenceManager.getServers).mockReturnValueOnce([\n              {\n                models: [\n                  {\n                    id: 'modelId1',\n                    name: 'model-name',\n                    description: 'model 1',\n                  },\n                ],\n                container: {\n                  engineId: 'engine1',\n                  containerId: 'container1',\n                },\n                status: 'running',\n                health: {\n                  Status: 'healthy',\n                },\n              } as unknown as InferenceServer,\n            ]);\n            vi.mocked(containerRegistry.onHealthyContainerEvent).mockImplementation(\n              onHealthyContainerEventEmptyCallback,\n            );\n            return 'container1';\n          });\n        });\n\n        test('/api/generate creates the service and returns that the model is loaded', async () => {\n          const req = request(server.getListener()!).post('/api/generate').send({ model: 'model-name', stream });\n          if (stream === false) {\n            const res = await req.expect(200).expect('Content-Type', 'application/json; charset=utf-8');\n            expect(res.body).toEqual({ model: 'model-name', response: '', done: true, done_reason: 'load' });\n          } else {\n            const res = await req.expect(200).expect('transfer-encoding', 'chunked');\n            const lines = res.text.split('\\n');\n            expect(lines.length).toEqual(2);\n            expect(lines[0]).toEqual('{\"model\":\"model-name\",\"response\":\"\",\"done\":true,\"done_reason\":\"load\"}');\n            expect(lines[1]).toEqual('');\n          }\n          expect(containerRegistry.onHealthyContainerEvent).toHaveBeenCalledOnce();\n          expect(inferenceManager.createInferenceServer).toHaveBeenCalledOnce();\n        });\n      });\n\n      describe('the created service is eventually healthy', () => {\n        beforeEach(() => {\n          vi.mocked(inferenceManager.createInferenceServer).mockImplementation(async () => {\n            vi.mocked(inferenceManager.getServers).mockReturnValueOnce([\n              {\n                models: [\n                  {\n                    id: 'modelId1',\n                    name: 'model-name',\n                    description: 'model 1',\n                  },\n                ],\n                container: {\n                  engineId: 'engine1',\n                  containerId: 'container1',\n                },\n                status: 'starting',\n              } as unknown as InferenceServer,\n            ]);\n            vi.mocked(containerRegistry.onHealthyContainerEvent).mockImplementation(\n              onHealthyContainerEventNonEmptyCallback,\n            );\n            return 'container1';\n          });\n        });\n\n        test('/api/generate creates the service and returns that the model is loaded', async () => {\n          const req = request(server.getListener()!).post('/api/generate').send({ model: 'model-name', stream });\n          if (stream === false) {\n            const res = await req.expect(200).expect('Content-Type', 'application/json; charset=utf-8');\n            expect(res.body).toEqual({ model: 'model-name', response: '', done: true, done_reason: 'load' });\n          } else {\n            const res = await req.expect(200).expect('transfer-encoding', 'chunked');\n            const lines = res.text.split('\\n');\n            expect(lines.length).toEqual(2);\n            expect(lines[0]).toEqual('{\"model\":\"model-name\",\"response\":\"\",\"done\":true,\"done_reason\":\"load\"}');\n            expect(lines[1]).toEqual('');\n          }\n          expect(containerRegistry.onHealthyContainerEvent).toHaveBeenCalledOnce();\n          expect(inferenceManager.createInferenceServer).toHaveBeenCalledOnce();\n        });\n      });\n    });\n\n    describe('the service is initially created but not started', async () => {\n      beforeEach(async () => {\n        vi.mocked(inferenceManager.getServers).mockReturnValueOnce([\n          {\n            models: [\n              {\n                id: 'modelId1',\n                name: 'model-name',\n                description: 'model 1',\n              },\n            ],\n            container: {\n              engineId: 'engine1',\n              containerId: 'container1',\n            },\n            status: 'stopped',\n          } as unknown as InferenceServer,\n        ]);\n      });\n\n      describe('the started service is immediately healthy', () => {\n        beforeEach(() => {\n          vi.mocked(inferenceManager.startInferenceServer).mockImplementation(async () => {\n            vi.mocked(inferenceManager.getServers).mockReturnValueOnce([\n              {\n                models: [\n                  {\n                    id: 'modelId1',\n                    name: 'model-name',\n                    description: 'model 1',\n                  },\n                ],\n                container: {\n                  engineId: 'engine1',\n                  containerId: 'container1',\n                },\n                status: 'running',\n                health: {\n                  Status: 'healthy',\n                },\n              } as unknown as InferenceServer,\n            ]);\n            vi.mocked(containerRegistry.onHealthyContainerEvent).mockImplementation(\n              onHealthyContainerEventEmptyCallback,\n            );\n          });\n        });\n\n        test('/api/generate starts the service and returns that the model is loaded', async () => {\n          const req = request(server.getListener()!).post('/api/generate').send({ model: 'model-name', stream });\n          if (stream === false) {\n            const res = await req.expect(200).expect('Content-Type', 'application/json; charset=utf-8');\n            expect(res.body).toEqual({ model: 'model-name', response: '', done: true, done_reason: 'load' });\n          } else {\n            const res = await req.expect(200).expect('transfer-encoding', 'chunked');\n            const lines = res.text.split('\\n');\n            expect(lines.length).toEqual(2);\n            expect(lines[0]).toEqual('{\"model\":\"model-name\",\"response\":\"\",\"done\":true,\"done_reason\":\"load\"}');\n            expect(lines[1]).toEqual('');\n          }\n          expect(containerRegistry.onHealthyContainerEvent).toHaveBeenCalledOnce();\n          expect(inferenceManager.startInferenceServer).toHaveBeenCalledOnce();\n        });\n      });\n\n      describe('the started service is eventually healthy', () => {\n        beforeEach(() => {\n          vi.mocked(inferenceManager.startInferenceServer).mockImplementation(async () => {\n            vi.mocked(inferenceManager.getServers).mockReturnValueOnce([\n              {\n                models: [\n                  {\n                    id: 'modelId1',\n                    name: 'model-name',\n                    description: 'model 1',\n                  },\n                ],\n                container: {\n                  engineId: 'engine1',\n                  containerId: 'container1',\n                },\n                status: 'starting',\n              } as unknown as InferenceServer,\n            ]);\n            vi.mocked(containerRegistry.onHealthyContainerEvent).mockImplementation(\n              onHealthyContainerEventNonEmptyCallback,\n            );\n          });\n        });\n\n        test('/api/generate starts the service and returns that the model is loaded', async () => {\n          const req = request(server.getListener()!).post('/api/generate').send({ model: 'model-name', stream });\n          if (stream === false) {\n            const res = await req.expect(200).expect('Content-Type', 'application/json; charset=utf-8');\n            expect(res.body).toEqual({ model: 'model-name', response: '', done: true, done_reason: 'load' });\n          } else {\n            const res = await req.expect(200).expect('transfer-encoding', 'chunked');\n            const lines = res.text.split('\\n');\n            expect(lines.length).toEqual(2);\n            expect(lines[0]).toEqual('{\"model\":\"model-name\",\"response\":\"\",\"done\":true,\"done_reason\":\"load\"}');\n            expect(lines[1]).toEqual('');\n          }\n          expect(containerRegistry.onHealthyContainerEvent).toHaveBeenCalledOnce();\n          expect(inferenceManager.startInferenceServer).toHaveBeenCalledOnce();\n        });\n      });\n    });\n\n    describe('the service is running', async () => {\n      beforeEach(async () => {\n        vi.mocked(inferenceManager.getServers).mockReturnValue([\n          {\n            models: [\n              {\n                id: 'modelId1',\n                name: 'model-name',\n                description: 'model 1',\n              },\n            ],\n            container: {\n              engineId: 'engine1',\n              containerId: 'container1',\n            },\n            status: 'running',\n            health: {\n              Status: 'healthy',\n            },\n            connection: {\n              port: 8080,\n            },\n          } as unknown as InferenceServer,\n        ]);\n      });\n\n      test('/api/generate returns that the model is loaded', async () => {\n        const req = request(server.getListener()!).post('/api/generate').send({ model: 'model-name', stream });\n        if (stream === false) {\n          const res = await req.expect(200).expect('Content-Type', 'application/json; charset=utf-8');\n          expect(res.body).toEqual({ model: 'model-name', response: '', done: true, done_reason: 'load' });\n        } else {\n          const res = await req.expect(200).expect('transfer-encoding', 'chunked');\n          const lines = res.text.split('\\n');\n          expect(lines.length).toEqual(2);\n          expect(lines[0]).toEqual('{\"model\":\"model-name\",\"response\":\"\",\"done\":true,\"done_reason\":\"load\"}');\n          expect(lines[1]).toEqual('');\n        }\n      });\n\n      describe.each([\n        {\n          endpoint: '/api/chat',\n          query: {\n            model: 'model-name',\n            stream,\n            messages: [\n              {\n                role: 'user',\n                content: 'what is the question?',\n              },\n            ],\n          },\n          expectedNonStreamed: {\n            model: 'model-name',\n            message: { role: 'assistant', content: 'that is a good question' },\n            done: true,\n            done_reason: 'stop',\n          },\n          expectedStreamed: [\n            '{\"model\":\"model-name\",\"message\":{\"role\":\"assistant\",\"content\":\"that \"},\"done\":false}',\n            '{\"model\":\"model-name\",\"message\":{\"role\":\"assistant\",\"content\":\"is \"},\"done\":false}',\n            '{\"model\":\"model-name\",\"message\":{\"role\":\"assistant\",\"content\":\"a \"},\"done\":false}',\n            '{\"model\":\"model-name\",\"message\":{\"role\":\"assistant\",\"content\":\"good \"},\"done\":false}',\n            '{\"model\":\"model-name\",\"message\":{\"role\":\"assistant\",\"content\":\"question\"},\"done\":false}',\n            '{\"model\":\"model-name\",\"message\":{\"role\":\"assistant\",\"content\":\".\"},\"done\":true,\"done_reason\":\"stop\"}',\n            '',\n          ],\n        },\n        {\n          endpoint: '/api/generate',\n          query: { model: 'model-name', stream, prompt: 'what is the question?' },\n          expectedNonStreamed: {\n            model: 'model-name',\n            response: 'that is a good question',\n            done: true,\n            done_reason: 'stop',\n          },\n          expectedStreamed: [\n            '{\"model\":\"model-name\",\"response\":\"that \",\"done\":false}',\n            '{\"model\":\"model-name\",\"response\":\"is \",\"done\":false}',\n            '{\"model\":\"model-name\",\"response\":\"a \",\"done\":false}',\n            '{\"model\":\"model-name\",\"response\":\"good \",\"done\":false}',\n            '{\"model\":\"model-name\",\"response\":\"question\",\"done\":false}',\n            '{\"model\":\"model-name\",\"response\":\".\",\"done\":true,\"done_reason\":\"stop\"}',\n            '',\n          ],\n        },\n      ])('%o endpoint', ({ endpoint, query, expectedNonStreamed, expectedStreamed }) => {\n        test('calls the service and replies to the prompt', async () => {\n          if (stream || stream === undefined) {\n            const chunks = [\n              {\n                choices: [\n                  {\n                    delta: {\n                      content: 'that ',\n                    },\n                  },\n                ],\n              },\n              {\n                choices: [\n                  {\n                    delta: {\n                      content: 'is ',\n                    },\n                  },\n                ],\n              },\n              {\n                choices: [\n                  {\n                    delta: {\n                      content: 'a ',\n                    },\n                  },\n                ],\n              },\n              {\n                choices: [\n                  {\n                    delta: {\n                      content: 'good ',\n                    },\n                  },\n                ],\n              },\n              {\n                choices: [\n                  {\n                    delta: {\n                      content: 'question',\n                    },\n                  },\n                ],\n              },\n              {\n                choices: [\n                  {\n                    delta: {\n                      content: '.',\n                    },\n                    finish_reason: 'stop',\n                  },\n                ],\n              },\n            ] as ChatCompletionChunk[];\n            const asyncIterator = (async function* (): AsyncGenerator<\n              OpenAI.Chat.Completions.ChatCompletionChunk,\n              void,\n              unknown\n            > {\n              for (const chunk of chunks) {\n                yield chunk;\n              }\n            })();\n            const response = new Stream<ChatCompletionChunk>(() => asyncIterator, new AbortController());\n            vi.mocked(OpenAI.prototype.chat.completions.create).mockResolvedValue(response);\n          } else {\n            vi.mocked(OpenAI.prototype.chat.completions.create).mockResolvedValue({\n              id: 'id1',\n              choices: [\n                {\n                  message: {\n                    role: 'assistant',\n                    content: 'that is a good question',\n                  },\n                },\n              ],\n            } as unknown as ChatCompletion);\n          }\n          const req = request(server.getListener()!).post(endpoint).send(query);\n          if (stream === false) {\n            const res = await req.expect(200).expect('Content-Type', 'application/json; charset=utf-8');\n            expect(res.body).toEqual(expectedNonStreamed);\n          } else {\n            const res = await req.expect(200).expect('transfer-encoding', 'chunked');\n            const lines = res.text.split('\\n');\n            expect(lines.length).toEqual(expectedStreamed.length);\n            for (const [i, line] of lines.entries()) {\n              expect(line).toEqual(expectedStreamed[i]);\n            }\n          }\n        });\n      });\n    });\n  });\n});\n\ndescribe('/api/ps', () => {\n  test('returns an error if the model is not known', async () => {\n    expect(server.getListener()).toBeDefined();\n    vi.mocked(inferenceManager.getServers).mockImplementation(() => {\n      throw new Error('model unknown');\n    });\n    const res = await request(server.getListener()!).get('/api/ps').expect(500);\n    expect(res.body).toMatchObject({ message: 'unable to ps' });\n  });\n\n  test('returns empty result if no servers', async () => {\n    expect(server.getListener()).toBeDefined();\n    vi.mocked(inferenceManager.getServers).mockReturnValue([]);\n    const res = await request(server.getListener()!).get('/api/ps').expect(200);\n    expect(res.body).toEqual({ models: [] });\n  });\n\n  test('returns empty result if server is stopped', async () => {\n    expect(server.getListener()).toBeDefined();\n    vi.mocked(inferenceManager.getServers).mockReturnValue([\n      {\n        models: [\n          {\n            id: 'modelId1',\n            name: 'model-name',\n            description: 'model 1',\n          },\n        ],\n        container: {\n          engineId: 'engine1',\n          containerId: 'container1',\n        },\n        status: 'stopped',\n      } as unknown as InferenceServer,\n    ]);\n    const res = await request(server.getListener()!).get('/api/ps').expect(200);\n    expect(res.body).toEqual({ models: [] });\n  });\n\n  test('returns result if server is started', async () => {\n    expect(server.getListener()).toBeDefined();\n    vi.mocked(inferenceManager.getServers).mockReturnValue([\n      {\n        models: [\n          {\n            id: 'modelId1',\n            name: 'model-name',\n            description: 'model 1',\n            memory: 1_000_000,\n          },\n        ],\n        container: {\n          engineId: 'engine1',\n          containerId: 'container1',\n        },\n        status: 'running',\n      } as unknown as InferenceServer,\n    ]);\n    const res = await request(server.getListener()!).get('/api/ps').expect(200);\n    expect(res.body).toEqual({\n      models: [\n        {\n          name: 'model-name',\n          model: 'model-name',\n          size: 1_000_000,\n          digest: 'b48fa42fa5b28c4363747ec0797532e274650f73004383a3054697137d9d1f30',\n        },\n      ],\n    });\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/managers/apiServer.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Disposable } from '@podman-desktop/api';\nimport type { NextFunction, Request, Response } from 'express';\nimport express from 'express';\nimport type { Server } from 'node:http';\nimport path, { resolve } from 'node:path';\nimport http from 'node:http';\nimport { existsSync } from 'node:fs';\nimport * as podmanDesktopApi from '@podman-desktop/api';\nimport { readFile } from 'node:fs/promises';\nimport type { ModelsManager } from './modelsManager';\nimport type { components } from '../../src-generated/openapi';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { ConfigurationRegistry } from '../registries/ConfigurationRegistry';\nimport { getFreeRandomPort } from '../utils/ports';\nimport * as OpenApiValidator from 'express-openapi-validator';\nimport type { HttpError, OpenApiRequest } from 'express-openapi-validator/dist/framework/types';\nimport type { CatalogManager } from './catalogManager';\nimport { isProgressEvent } from '../models/baseEvent';\nimport type { InferenceManager } from './inference/inferenceManager';\nimport { withDefaultConfiguration } from '../utils/inferenceUtils';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport OpenAI from 'openai';\nimport type { ChatCompletionMessageParam } from 'openai/resources';\nimport type { ContainerRegistry } from '../registries/ContainerRegistry';\nimport type { Stream } from 'openai/streaming';\nimport crypto from 'node:crypto';\nimport swaggerUi from 'swagger-ui-express';\nimport { getAbsoluteFSPath } from 'swagger-ui-dist';\nimport openAiApi from '../assets/openai.json';\n\nconst SHOW_API_ERROR_COMMAND = 'ai-lab.show-api-error';\n\nexport const PREFERENCE_RANDOM_PORT = 0;\n\ntype ListModelResponse = components['schemas']['ListModelResponse'];\ntype Message = components['schemas']['Message'];\ntype ProcessModelResponse = components['schemas']['ProcessModelResponse'];\n\ninterface SwaggerRequest extends Request {\n  swaggerDoc?: { servers: { description: string; url: string }[] };\n}\n\nfunction asListModelResponse(model: ModelInfo): ListModelResponse {\n  return {\n    model: model.name,\n    name: model.name,\n    digest: toDigest(model.name, model.sha256),\n    size: model.file?.size,\n    modified_at: model.file?.creation?.toISOString(),\n    details: {},\n  };\n}\n\n// ollama expect at least 12 characters for the digest\nfunction toDigest(name: string, sha256?: string): string {\n  return sha256 ?? crypto.createHash('sha256').update(name).digest('hex');\n}\n\nfunction asProcessModelResponse(model: ModelInfo): ProcessModelResponse {\n  return {\n    name: model.name,\n    model: model.name,\n    size: model.memory,\n    digest: toDigest(model.name, model.sha256),\n  };\n}\n\nconst LISTENING_ADDRESS = '0.0.0.0';\n\ninterface ChatCompletionOptions {\n  server: InferenceServer;\n  modelInfo: ModelInfo;\n  messages: ChatCompletionMessageParam[];\n  stream: boolean;\n  onStreamResponse: (response: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>) => Promise<void>;\n  onNonStreamResponse: (response: OpenAI.Chat.Completions.ChatCompletion) => void;\n}\n\nexport class ApiServer implements Disposable {\n  #listener?: Server;\n\n  constructor(\n    private extensionContext: podmanDesktopApi.ExtensionContext,\n    private modelsManager: ModelsManager,\n    private catalogManager: CatalogManager,\n    private inferenceManager: InferenceManager,\n    private configurationRegistry: ConfigurationRegistry,\n    private containerRegistry: ContainerRegistry,\n  ) {}\n\n  protected getListener(): Server | undefined {\n    return this.#listener;\n  }\n\n  async init(): Promise<void> {\n    const app = express();\n\n    const router = express.Router();\n    router.use(express.json());\n\n    // validate requests / responses based on openapi spec\n    router.use(\n      OpenApiValidator.middleware({\n        apiSpec: this.getSpecFile(),\n        validateRequests: true,\n        validateResponses: {\n          onError: (error, body, req) => {\n            console.error(`Response body fails validation: `, error);\n            console.error(`Emitted from:`, req.originalUrl);\n            console.error(body);\n          },\n        },\n      }),\n    );\n\n    router.use((err: HttpError, _req: OpenApiRequest, res: Response, _next: NextFunction) => {\n      // format errors from validator\n      res.status(err.status || 500).json({\n        message: err.message,\n        errors: err.errors,\n      });\n    });\n\n    // declare routes\n    router.get('/version', this.getVersion.bind(this));\n    router.get('/tags', this.getModels.bind(this));\n    router.post('/pull', this.pullModel.bind(this));\n    router.post('/show', this.show.bind(this));\n    router.post('/generate', this.generate.bind(this));\n    router.post('/chat', this.chat.bind(this));\n    router.get('/ps', this.ps.bind(this));\n    app.get('/', (_res, res) => res.sendStatus(200)); //required for the ollama client to work against us\n    app.use('/api', router);\n    app.use('/spec', this.getSpec.bind(this));\n\n    // get swagger-ui path from dist/swagger-ui\n    const swaggerPath = resolve(getAbsoluteFSPath(), 'swagger-ui');\n\n    app.use(\n      '/api-docs/:portNumber',\n      function (req: SwaggerRequest, res: Response, next: NextFunction) {\n        if (req.path.startsWith('/swagger-ui') && req.path !== '/swagger-ui-init.js') {\n          // serve the swagger-ui-dist file from swaggerPath\n          res.sendFile(path.join(swaggerPath, req.path));\n          return;\n        }\n        const copyOpenAiJson = structuredClone(openAiApi);\n        // Extract the port number from the route parameter and set it as the server URL\n        const portNumber = req.params.portNumber;\n        copyOpenAiJson.servers = [{ description: 'AI Lab Inference Server', url: `http://localhost:${portNumber}` }];\n        req.swaggerDoc = copyOpenAiJson;\n\n        next();\n      },\n      swaggerUi.serveFiles(openAiApi),\n      swaggerUi.setup(),\n    );\n\n    const server = http.createServer(app);\n    let listeningOn = this.configurationRegistry.getExtensionConfiguration().apiPort;\n    server.on('error', () => {\n      this.displayApiError(listeningOn);\n    });\n    if (listeningOn === PREFERENCE_RANDOM_PORT) {\n      getFreeRandomPort(LISTENING_ADDRESS)\n        .then((randomPort: number) => {\n          listeningOn = randomPort;\n          this.#listener = server.listen(listeningOn, LISTENING_ADDRESS);\n        })\n        .catch((e: unknown) => {\n          console.error('unable to get a free port for the api server', e);\n        });\n    } else {\n      this.#listener = server.listen(listeningOn, LISTENING_ADDRESS);\n    }\n  }\n\n  displayApiError(port: number): void {\n    const apiStatusBarItem = podmanDesktopApi.window.createStatusBarItem();\n    apiStatusBarItem.text = `AI Lab API listening error`;\n    apiStatusBarItem.command = SHOW_API_ERROR_COMMAND;\n    this.extensionContext.subscriptions.push(\n      podmanDesktopApi.commands.registerCommand(SHOW_API_ERROR_COMMAND, async () => {\n        const address = `http://localhost:${port}`;\n        await podmanDesktopApi.window.showErrorMessage(\n          `AI Lab API failed to listen on\\n${address}\\nYou can change the port in the Preferences then restart the extension.`,\n          'OK',\n        );\n      }),\n      apiStatusBarItem,\n    );\n    apiStatusBarItem.show();\n  }\n\n  private getFile(filepath: string): string {\n    // when plugin is installed, the file is placed in the plugin directory (~/.local/share/containers/podman-desktop/plugins/<pluginname>/)\n    const prodFile = path.join(__dirname, filepath);\n    if (existsSync(prodFile)) {\n      return prodFile;\n    }\n    // return dev file\n    return path.join(__dirname, '..', '..', filepath);\n  }\n\n  getSpecFile(): string {\n    return this.getFile('../api/openapi.yaml');\n  }\n\n  getPackageFile(): string {\n    return this.getFile('../package.json');\n  }\n\n  dispose(): void {\n    this.#listener?.close();\n  }\n\n  private doErr(res: Response, message: string, err: unknown): void {\n    res.status(500).json({\n      message,\n      errors: [err instanceof Error ? err.message : err],\n    });\n  }\n\n  getSpec(_req: Request, res: Response): void {\n    try {\n      const spec = this.getSpecFile();\n      readFile(spec, 'utf-8')\n        .then(content => {\n          res.status(200).type('application/yaml').send(content);\n        })\n        .catch((err: unknown) => this.doErr(res, 'unable to get spec', err));\n    } catch (err: unknown) {\n      this.doErr(res, 'unable to get spec', err);\n    }\n  }\n\n  getVersion(_req: Request, res: Response): void {\n    try {\n      const pkg = this.getPackageFile();\n      readFile(pkg, 'utf-8')\n        .then(content => {\n          const json = JSON.parse(content);\n          res.status(200).json({ version: `v${json.version}` });\n        })\n        .catch((err: unknown) => this.doErr(res, 'unable to get version', err));\n    } catch (err: unknown) {\n      this.doErr(res, 'unable to get version', err);\n    }\n  }\n\n  getModels(_req: Request, res: Response): void {\n    try {\n      const models = this.modelsManager\n        .getModelsInfo()\n        .filter(model => this.modelsManager.isModelOnDisk(model.id))\n        .map(model => asListModelResponse(model));\n      res.status(200).json({ models: models });\n    } catch (err: unknown) {\n      this.doErr(res, 'unable to get models', err);\n    }\n  }\n\n  private streamLine(res: Response, obj: unknown): void {\n    res.write(JSON.stringify(obj) + '\\n');\n  }\n\n  private sendResult(res: Response, obj: unknown, code: number, stream: boolean): void {\n    // eslint-disable-next-line sonarjs/no-selector-parameter\n    if (stream) {\n      this.streamLine(res, obj);\n    } else {\n      res.status(code).json(obj);\n    }\n  }\n\n  pullModel(req: Request, res: Response): void {\n    const modelName = req.body['model'] ?? req.body['name'];\n    let stream: boolean = true;\n    if ('stream' in req.body) {\n      stream = req.body['stream'];\n    }\n    let modelInfo: ModelInfo;\n\n    if (stream) {\n      this.streamLine(res, { status: 'pulling manifest' });\n    }\n\n    try {\n      modelInfo = this.catalogManager.getModelByName(modelName);\n    } catch {\n      this.sendResult(res, { error: 'pull model manifest: file does not exist' }, 500, stream);\n      res.end();\n      return;\n    }\n\n    if (this.modelsManager.isModelOnDisk(modelInfo.id)) {\n      this.sendResult(\n        res,\n        {\n          status: 'success',\n        },\n        200,\n        stream,\n      );\n      res.end();\n      return;\n    }\n\n    const abortController = new AbortController();\n    const downloader = this.modelsManager.createDownloader(modelInfo, abortController.signal);\n\n    if (stream) {\n      downloader.onEvent(event => {\n        if (isProgressEvent(event) && event.id === modelName) {\n          this.streamLine(res, {\n            status: `pulling ${modelInfo.sha256}`,\n            digest: `sha256:${modelInfo.sha256}`,\n            total: event.total,\n            completed: Math.round((event.total * event.value) / 100),\n          });\n        }\n      }, this);\n    }\n\n    downloader\n      .perform(modelName)\n      .then(async () => {\n        await this.modelsManager.getLocalModelsFromDisk();\n        await this.modelsManager.sendModelsInfo();\n        this.sendResult(\n          res,\n          {\n            status: 'success',\n          },\n          200,\n          stream,\n        );\n      })\n      .catch((err: unknown) => {\n        this.sendResult(\n          res,\n          {\n            error: String(err),\n          },\n          500,\n          stream,\n        );\n      })\n      .finally(() => {\n        res.end();\n      });\n  }\n\n  show(req: Request, res: Response): void {\n    res.status(200).json({});\n    res.end();\n  }\n\n  // makeServerAvailable checks if an inference server for the model exists and is started\n  // if not, it creates and/or starts it, and wait for the service to be healthy\n  private async makeServerAvailable(modelInfo: ModelInfo): Promise<InferenceServer> {\n    let servers = this.inferenceManager.getServers();\n    let server = servers.find(s => s.models.map(mi => mi.id).includes(modelInfo.id));\n    if (!server) {\n      const config = await withDefaultConfiguration({\n        modelsInfo: [modelInfo],\n      });\n      await this.inferenceManager.createInferenceServer(config);\n    } else if (server.status === 'stopped') {\n      await this.inferenceManager.startInferenceServer(server.container.containerId);\n    } else {\n      return server;\n    }\n    servers = this.inferenceManager.getServers();\n    server = servers.find(s => s.models.map(mi => mi.id).includes(modelInfo.id));\n    if (!server) {\n      throw new Error('unable to start inference server');\n    }\n\n    // wait for the container to be healthy\n    return new Promise(resolve => {\n      const disposable = this.containerRegistry.onHealthyContainerEvent(event => {\n        if (event.id !== server.container.containerId) {\n          return;\n        }\n        disposable.dispose();\n        resolve(server);\n      });\n      if (server.status === 'running' && server.health?.Status === 'healthy') {\n        disposable.dispose();\n        resolve(server);\n      }\n    });\n  }\n\n  // openAIChatCompletions executes a chat completion on an OpenAI compatible API\n  private async openAIChatCompletions(options: ChatCompletionOptions): Promise<void> {\n    if (!options.modelInfo.file) {\n      throw new Error('model info has undefined file.');\n    }\n    const client = new OpenAI({\n      baseURL: `http://localhost:${options.server.connection.port}/v1`,\n      apiKey: 'dummy',\n    });\n    const createOptions = {\n      messages: options.messages,\n      model: options.modelInfo.name,\n    };\n    // we call `create` with a fixed value of `stream`, to get the specific type of `response`, either Stream<T>, or T\n    if (options.stream) {\n      const response = await client.chat.completions.create({ ...createOptions, stream: options.stream });\n      await options.onStreamResponse(response);\n    } else {\n      const response = await client.chat.completions.create({ ...createOptions, stream: options.stream });\n      options.onNonStreamResponse(response);\n    }\n  }\n\n  // checkModelAvailability checks if a model is in the catalog\n  // AND has been downloaded by the user\n  private checkModelAvailability(modelName: string): ModelInfo {\n    let modelInfo: ModelInfo;\n    try {\n      modelInfo = this.catalogManager.getModelByName(modelName);\n    } catch {\n      throw `chat: model \"${modelName}\" does not exist`;\n    }\n\n    if (!this.modelsManager.isModelOnDisk(modelInfo.id)) {\n      throw `chat: model \"${modelName}\" not found, try pulling it first`;\n    }\n    return modelInfo;\n  }\n\n  // generate first starts the service if necessary\n  // If a prompt is given, it runs a chat completion with a single message and returns the result\n  generate(req: Request, res: Response): void {\n    let stream: boolean = true;\n    if ('stream' in req.body) {\n      stream = req.body['stream'];\n    }\n\n    const prompt = req.body['prompt'];\n\n    const modelName = req.body['model'];\n    let modelInfo: ModelInfo;\n    try {\n      modelInfo = this.checkModelAvailability(modelName);\n    } catch (error) {\n      this.sendResult(res, { error }, 500, stream);\n      res.end();\n      return;\n    }\n\n    // create/start inference server if necessary\n    this.makeServerAvailable(modelInfo)\n      .then(async (server: InferenceServer) => {\n        if (!prompt) {\n          this.sendResult(\n            res,\n            {\n              model: modelName,\n              response: '',\n              done: true,\n              done_reason: 'load',\n            },\n            200,\n            stream,\n          );\n          res.end();\n          return;\n        }\n\n        const messages = [\n          {\n            content: prompt,\n            role: 'user',\n            name: undefined,\n          } as ChatCompletionMessageParam,\n        ];\n\n        await this.openAIChatCompletions({\n          server,\n          modelInfo,\n          messages,\n          stream,\n          onStreamResponse: async response => {\n            for await (const chunk of response) {\n              res.write(\n                JSON.stringify({\n                  model: modelName,\n                  response: chunk.choices[0].delta.content ?? '',\n                  done: chunk.choices[0].finish_reason === 'stop',\n                  done_reason: chunk.choices[0].finish_reason === 'stop' ? 'stop' : undefined,\n                }) + '\\n',\n              );\n            }\n            res.end();\n          },\n          onNonStreamResponse: response => {\n            res.status(200).json({\n              model: modelName,\n              response: response.choices[0].message.content ?? '',\n              done: true,\n              done_reason: 'stop',\n            });\n            res.end();\n          },\n        });\n      })\n      .catch((err: unknown) => console.error(`unable to check if the inference server is running: ${err}`));\n  }\n\n  // chat first starts the service if necessary\n  // then runs a chat completion and returns the result\n  chat(req: Request, res: Response): void {\n    let stream: boolean = true;\n    if ('stream' in req.body) {\n      stream = req.body['stream'];\n    }\n\n    const messagesUser: Message[] = req.body['messages'];\n\n    const modelName = req.body['model'];\n    let modelInfo: ModelInfo;\n    try {\n      modelInfo = this.checkModelAvailability(modelName);\n    } catch (error) {\n      this.sendResult(res, { error }, 500, stream);\n      res.end();\n      return;\n    }\n\n    // create/start inference server if necessary\n\n    this.makeServerAvailable(modelInfo)\n      .then(async (server: InferenceServer) => {\n        const messages = messagesUser.map(\n          message =>\n            ({\n              name: undefined,\n              ...message,\n            }) as ChatCompletionMessageParam,\n        );\n\n        await this.openAIChatCompletions({\n          server,\n          modelInfo,\n          messages,\n          stream,\n          onStreamResponse: async response => {\n            for await (const chunk of response) {\n              res.write(\n                JSON.stringify({\n                  model: modelName,\n                  message: {\n                    role: 'assistant',\n                    content: chunk.choices[0].delta.content ?? '',\n                  },\n                  done: chunk.choices[0].finish_reason === 'stop',\n                  done_reason: chunk.choices[0].finish_reason === 'stop' ? 'stop' : undefined,\n                }) + '\\n',\n              );\n            }\n            res.end();\n          },\n          onNonStreamResponse: response => {\n            res.status(200).json({\n              model: modelName,\n              message: {\n                role: 'assistant',\n                content: response.choices[0].message.content ?? '',\n              },\n              done: true,\n              done_reason: 'stop',\n            });\n            res.end();\n          },\n        });\n      })\n      .catch((err: unknown) => console.error(`unable to check if the inference server is running: ${err}`));\n  }\n\n  ps(_req: Request, res: Response): void {\n    try {\n      const models = this.inferenceManager\n        .getServers()\n        .filter(server => server.status === 'running')\n        .flatMap(server => server.models)\n        .map(model => asProcessModelResponse(model));\n      res.status(200).json({ models });\n    } catch (err: unknown) {\n      this.doErr(res, 'unable to ps', err);\n    }\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/application/applicationManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport type { ContainerProviderConnection, PodInfo, TelemetryLogger } from '@podman-desktop/api';\nimport { containerEngine, window } from '@podman-desktop/api';\nimport type { PodmanConnection } from '../podmanConnection';\nimport type { CatalogManager } from '../catalogManager';\nimport type { ModelsManager } from '../modelsManager';\nimport type { PodManager } from '../recipes/PodManager';\nimport type { RecipeManager } from '../recipes/RecipeManager';\nimport { ApplicationManager } from './applicationManager';\nimport type { Recipe, RecipeImage } from '@shared/models/IRecipe';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { VMType } from '@shared/models/IPodman';\nimport { POD_LABEL_MODEL_ID, POD_LABEL_RECIPE_ID } from '../../utils/RecipeConstants';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport type { LlamaStackManager } from '../llama-stack/llamaStackManager';\nimport type { ApplicationOptions } from '../../models/ApplicationOptions';\n\nconst taskRegistryMock = {\n  createTask: vi.fn(),\n  updateTask: vi.fn(),\n  deleteByLabels: vi.fn(),\n} as unknown as TaskRegistry;\n\nconst rpcExtensionMock = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nconst podmanConnectionMock = {\n  onPodmanConnectionEvent: vi.fn(),\n} as unknown as PodmanConnection;\n\nconst catalogManagerMock = {} as unknown as CatalogManager;\n\nconst modelsManagerMock = {\n  requestDownloadModel: vi.fn(),\n  uploadModelToPodmanMachine: vi.fn(),\n} as unknown as ModelsManager;\n\nconst telemetryMock = {\n  logError: vi.fn(),\n  logUsage: vi.fn(),\n} as unknown as TelemetryLogger;\n\nconst podManager = {\n  onStartPodEvent: vi.fn(),\n  onRemovePodEvent: vi.fn(),\n  getPodsWithLabels: vi.fn(),\n  createPod: vi.fn(),\n  getPod: vi.fn(),\n  findPodByLabelsValues: vi.fn(),\n  startPod: vi.fn(),\n  stopPod: vi.fn(),\n  removePod: vi.fn(),\n} as unknown as PodManager;\n\nconst recipeManager = {\n  cloneRecipe: vi.fn(),\n  buildRecipe: vi.fn(),\n} as unknown as RecipeManager;\n\nconst llamaStackManager = {\n  getLlamaStackContainers: vi.fn(),\n} as unknown as LlamaStackManager;\n\nvi.mock('@podman-desktop/api', () => ({\n  window: {\n    withProgress: vi.fn(),\n  },\n  ProgressLocation: {\n    TASK_WIDGET: 'task-widget',\n  },\n  provider: {\n    getContainerConnections: vi.fn(),\n  },\n  containerEngine: {\n    createContainer: vi.fn(),\n  },\n  Disposable: {\n    create: vi.fn(),\n  },\n}));\n\nconst recipeMock: Recipe = {\n  id: 'recipe-test',\n  name: 'Test Recipe',\n  categories: [],\n  description: 'test recipe description',\n  repository: 'http://test-repository.test',\n  readme: 'test recipe readme',\n};\n\nconst remoteModelMock: ModelInfo = {\n  id: 'model-test',\n  name: 'Test Model',\n  description: 'test model description',\n  url: 'http://test-repository.test',\n};\n\nconst recipeImageInfoMock: RecipeImage = {\n  name: 'test recipe image info',\n  id: 'test-recipe-image-info',\n  appName: 'test-app-name',\n  engineId: 'test-engine-id',\n  ports: [],\n  modelService: false,\n  recipeId: recipeMock.id,\n};\n\nconst connectionMock: ContainerProviderConnection = {\n  name: 'Podman Machine',\n  vmType: VMType.UNKNOWN,\n} as unknown as ContainerProviderConnection;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(rpcExtensionMock.fire).mockResolvedValue(true);\n  vi.mocked(recipeManager.buildRecipe).mockResolvedValue({ images: [recipeImageInfoMock] });\n  vi.mocked(podManager.createPod).mockResolvedValue({ engineId: 'test-engine-id', Id: 'test-pod-id' });\n  vi.mocked(podManager.getPod).mockResolvedValue({ engineId: 'test-engine-id', Id: 'test-pod-id' } as PodInfo);\n  vi.mocked(podManager.getPodsWithLabels).mockResolvedValue([]);\n  vi.mocked(taskRegistryMock.createTask).mockImplementation((name, state, labels) => ({\n    name,\n    state,\n    labels,\n    id: 'fake-task',\n  }));\n  vi.mocked(modelsManagerMock.uploadModelToPodmanMachine).mockResolvedValue('downloaded-model-path');\n  vi.mocked(llamaStackManager.getLlamaStackContainers).mockResolvedValue({\n    server: { containerId: 'container1', port: 10001, state: 'running' },\n    playground: { containerId: 'playground1', port: 10002, state: 'running' },\n  });\n});\n\nfunction getInitializedApplicationManager(): ApplicationManager {\n  const manager = new ApplicationManager(\n    taskRegistryMock,\n    rpcExtensionMock,\n    podmanConnectionMock,\n    catalogManagerMock,\n    modelsManagerMock,\n    telemetryMock,\n    podManager,\n    recipeManager,\n    llamaStackManager,\n  );\n\n  manager.init();\n  return manager;\n}\n\ndescribe('requestPullApplication', () => {\n  test('task should be set to error if pull application raise an error', async () => {\n    vi.mocked(window.withProgress).mockRejectedValue(new Error('pull application error'));\n    const trackingId = await getInitializedApplicationManager().requestPullApplication({\n      connection: connectionMock,\n      recipe: recipeMock,\n      model: remoteModelMock,\n    });\n\n    // ensure the task is created\n    await vi.waitFor(() => {\n      expect(taskRegistryMock.createTask).toHaveBeenCalledWith(`Pulling ${recipeMock.name} recipe`, 'loading', {\n        trackingId: trackingId,\n        'recipe-pulling': recipeMock.id,\n      });\n    });\n\n    // ensure the task is updated\n    await vi.waitFor(() => {\n      expect(taskRegistryMock.updateTask).toHaveBeenCalledWith(\n        expect.objectContaining({\n          state: 'error',\n        }),\n      );\n    });\n  });\n});\n\ndescribe('stopApplication', () => {\n  test('calling stop with exited pod should not create task', async () => {\n    vi.mocked(podManager.findPodByLabelsValues).mockResolvedValue({\n      engineId: 'test-engine-id',\n      Id: 'test-pod-id-existing',\n      Labels: {\n        [POD_LABEL_MODEL_ID]: remoteModelMock.id,\n        [POD_LABEL_RECIPE_ID]: recipeMock.id,\n      },\n      Status: 'Exited',\n    } as unknown as PodInfo);\n\n    await getInitializedApplicationManager().stopApplication(recipeMock.id, remoteModelMock.id);\n\n    expect(taskRegistryMock.createTask).not.toHaveBeenCalled();\n    expect(podManager.stopPod).not.toHaveBeenCalled();\n  });\n\n  test('calling stop application with running pod should create stop task ', async () => {\n    vi.mocked(podManager.findPodByLabelsValues).mockResolvedValue({\n      engineId: 'test-engine-id',\n      Id: 'test-pod-id-existing',\n      Labels: {\n        [POD_LABEL_MODEL_ID]: remoteModelMock.id,\n        [POD_LABEL_RECIPE_ID]: recipeMock.id,\n      },\n      Status: 'Running',\n    } as unknown as PodInfo);\n\n    await getInitializedApplicationManager().stopApplication(recipeMock.id, remoteModelMock.id);\n\n    expect(taskRegistryMock.createTask).toHaveBeenCalledWith('Stopping AI App', 'loading', {\n      'recipe-id': recipeMock.id,\n      'model-id': remoteModelMock.id,\n    });\n    expect(podManager.stopPod).toHaveBeenCalledWith('test-engine-id', 'test-pod-id-existing');\n  });\n\n  test('error raised should make the task as failed', async () => {\n    vi.mocked(podManager.findPodByLabelsValues).mockResolvedValue({\n      engineId: 'test-engine-id',\n      Id: 'test-pod-id-existing',\n      Labels: {\n        [POD_LABEL_MODEL_ID]: remoteModelMock.id,\n        [POD_LABEL_RECIPE_ID]: recipeMock.id,\n      },\n      Status: 'Running',\n    } as unknown as PodInfo);\n\n    vi.mocked(podManager.stopPod).mockRejectedValue(new Error('stop pod error'));\n\n    await expect(() => {\n      return getInitializedApplicationManager().stopApplication(recipeMock.id, remoteModelMock.id);\n    }).rejects.toThrowError('stop pod error');\n\n    expect(taskRegistryMock.updateTask).toHaveBeenCalledWith(\n      expect.objectContaining({\n        state: 'error',\n      }),\n    );\n  });\n});\n\ndescribe('startApplication', () => {\n  test('expect startPod in podManager to be properly called', async () => {\n    vi.mocked(podManager.findPodByLabelsValues).mockResolvedValue({\n      engineId: 'test-engine-id',\n      Id: 'test-pod-id-existing',\n      Labels: {\n        [POD_LABEL_MODEL_ID]: remoteModelMock.id,\n        [POD_LABEL_RECIPE_ID]: recipeMock.id,\n      },\n      Status: 'Exited',\n    } as unknown as PodInfo);\n\n    await getInitializedApplicationManager().startApplication(recipeMock.id, remoteModelMock.id);\n\n    expect(podManager.startPod).toHaveBeenCalledWith('test-engine-id', 'test-pod-id-existing');\n  });\n\n  test('error raised should make the task as failed', async () => {\n    vi.mocked(podManager.findPodByLabelsValues).mockResolvedValue({\n      engineId: 'test-engine-id',\n      Id: 'test-pod-id-existing',\n      Labels: {\n        [POD_LABEL_MODEL_ID]: remoteModelMock.id,\n        [POD_LABEL_RECIPE_ID]: recipeMock.id,\n      },\n      Status: 'Exited',\n    } as unknown as PodInfo);\n\n    vi.mocked(podManager.startPod).mockRejectedValue(new Error('start pod error'));\n\n    await expect(() => {\n      return getInitializedApplicationManager().startApplication(recipeMock.id, remoteModelMock.id);\n    }).rejects.toThrowError('start pod error');\n\n    expect(taskRegistryMock.updateTask).toHaveBeenCalledWith(\n      expect.objectContaining({\n        state: 'error',\n      }),\n    );\n  });\n});\n\ndescribe.each([true, false])('pullApplication, with model is %o', withModel => {\n  let applicationOptions: ApplicationOptions;\n  beforeEach(() => {\n    applicationOptions = withModel\n      ? {\n          connection: connectionMock,\n          recipe: recipeMock,\n          model: remoteModelMock,\n        }\n      : {\n          connection: connectionMock,\n          recipe: recipeMock,\n          dependencies: {\n            llamaStack: true,\n          },\n        };\n  });\n\n  test('labels should be propagated', async () => {\n    await getInitializedApplicationManager().pullApplication(applicationOptions, {\n      'test-label': 'test-value',\n    });\n\n    // clone the recipe\n    expect(recipeManager.cloneRecipe).toHaveBeenCalledWith(recipeMock, {\n      'test-label': 'test-value',\n      'model-id': withModel ? remoteModelMock.id : '<none>',\n    });\n    if (withModel) {\n      // download model\n      expect(modelsManagerMock.requestDownloadModel).toHaveBeenCalledWith(remoteModelMock, {\n        'test-label': 'test-value',\n        'recipe-id': recipeMock.id,\n        'model-id': remoteModelMock.id,\n      });\n      // upload model to podman machine\n      expect(modelsManagerMock.uploadModelToPodmanMachine).toHaveBeenCalledWith(connectionMock, remoteModelMock, {\n        'test-label': 'test-value',\n        'recipe-id': recipeMock.id,\n        'model-id': remoteModelMock.id,\n      });\n    }\n    // build the recipe\n    expect(recipeManager.buildRecipe).toHaveBeenCalledWith(\n      {\n        connection: connectionMock,\n        recipe: recipeMock,\n        model: withModel ? remoteModelMock : undefined,\n        dependencies: applicationOptions.dependencies,\n      },\n      {\n        'test-label': 'test-value',\n        'recipe-id': recipeMock.id,\n        'model-id': withModel ? remoteModelMock.id : '<none>',\n      },\n    );\n    // create AI App task must be created\n    expect(taskRegistryMock.createTask).toHaveBeenCalledWith('Creating AI App', 'loading', {\n      'test-label': 'test-value',\n      'recipe-id': recipeMock.id,\n      'model-id': withModel ? remoteModelMock.id : '<none>',\n    });\n\n    // a pod must have been created\n    expect(podManager.createPod).toHaveBeenCalledWith({\n      provider: connectionMock,\n      name: expect.any(String),\n      portmappings: [],\n      labels: {\n        [POD_LABEL_MODEL_ID]: withModel ? remoteModelMock.id : '<none>',\n        [POD_LABEL_RECIPE_ID]: recipeMock.id,\n      },\n    });\n\n    expect(containerEngine.createContainer).toHaveBeenCalledWith('test-engine-id', {\n      Image: recipeImageInfoMock.id,\n      name: expect.any(String),\n      Env: withModel ? [] : ['MODEL_ENDPOINT=http://host.containers.internal:10001'],\n      HealthCheck: undefined,\n      HostConfig: undefined,\n      Detach: true,\n      pod: 'test-pod-id',\n      start: false,\n    });\n\n    // finally the pod must be started\n    expect(podManager.startPod).toHaveBeenCalledWith('test-engine-id', 'test-pod-id');\n  });\n\n  test('requestDownloadModel skipped with inference server', async () => {\n    vi.mocked(recipeManager.buildRecipe).mockResolvedValue({\n      images: [recipeImageInfoMock],\n      inferenceServer: {\n        connection: {\n          port: 56001,\n        },\n      } as InferenceServer,\n    });\n    vi.mocked(modelsManagerMock.requestDownloadModel).mockResolvedValue('/path/to/model');\n    await getInitializedApplicationManager().pullApplication(applicationOptions, {\n      'test-label': 'test-value',\n    });\n\n    // clone the recipe\n    expect(recipeManager.cloneRecipe).toHaveBeenCalledWith(recipeMock, {\n      'test-label': 'test-value',\n      'model-id': withModel ? remoteModelMock.id : '<none>',\n    });\n    if (withModel) {\n      // download model\n      expect(modelsManagerMock.requestDownloadModel).toHaveBeenCalledWith(remoteModelMock, {\n        'test-label': 'test-value',\n        'recipe-id': recipeMock.id,\n        'model-id': remoteModelMock.id,\n      });\n      // upload model to podman machine\n      expect(modelsManagerMock.uploadModelToPodmanMachine).not.toHaveBeenCalled();\n    }\n    // build the recipe\n    expect(recipeManager.buildRecipe).toHaveBeenCalledWith(\n      {\n        connection: connectionMock,\n        recipe: recipeMock,\n        model: withModel ? remoteModelMock : undefined,\n        dependencies: applicationOptions.dependencies,\n      },\n      {\n        'test-label': 'test-value',\n        'recipe-id': recipeMock.id,\n        'model-id': withModel ? remoteModelMock.id : '<none>',\n      },\n    );\n    // create AI App task must be created\n    expect(taskRegistryMock.createTask).toHaveBeenCalledWith('Creating AI App', 'loading', {\n      'test-label': 'test-value',\n      'recipe-id': recipeMock.id,\n      'model-id': withModel ? remoteModelMock.id : '<none>',\n    });\n\n    // a pod must have been created\n    expect(podManager.createPod).toHaveBeenCalledWith({\n      provider: connectionMock,\n      name: expect.any(String),\n      portmappings: [],\n      labels: {\n        [POD_LABEL_MODEL_ID]: withModel ? remoteModelMock.id : '<none>',\n        [POD_LABEL_RECIPE_ID]: recipeMock.id,\n      },\n    });\n\n    expect(containerEngine.createContainer).toHaveBeenCalledWith('test-engine-id', {\n      Image: recipeImageInfoMock.id,\n      name: expect.any(String),\n      Env: withModel\n        ? ['MODEL_ENDPOINT=http://host.containers.internal:56001']\n        : ['MODEL_ENDPOINT=http://host.containers.internal:10001'],\n      HealthCheck: undefined,\n      HostConfig: undefined,\n      Detach: true,\n      pod: 'test-pod-id',\n      start: false,\n    });\n\n    // finally the pod must be started\n    expect(podManager.startPod).toHaveBeenCalledWith('test-engine-id', 'test-pod-id');\n  });\n\n  test('existing application should be removed', async () => {\n    vi.mocked(podManager.findPodByLabelsValues).mockResolvedValue({\n      engineId: 'test-engine-id',\n      Id: 'test-pod-id-existing',\n      Labels: {\n        [POD_LABEL_MODEL_ID]: remoteModelMock.id,\n        [POD_LABEL_RECIPE_ID]: recipeMock.id,\n      },\n    } as unknown as PodInfo);\n\n    await getInitializedApplicationManager().pullApplication(applicationOptions);\n\n    // removing existing application should create a task to notify the user\n    expect(taskRegistryMock.createTask).toHaveBeenCalledWith('Removing AI App', 'loading', {\n      'recipe-id': recipeMock.id,\n      'model-id': withModel ? remoteModelMock.id : '<none>',\n    });\n    // the remove pod should have been called\n    expect(podManager.removePod).toHaveBeenCalledWith('test-engine-id', 'test-pod-id-existing');\n  });\n\n  test('qemu connection should have specific flag', async () => {\n    vi.mocked(podManager.findPodByLabelsValues).mockResolvedValue(undefined);\n\n    vi.mocked(recipeManager.buildRecipe).mockResolvedValue({\n      images: [\n        recipeImageInfoMock,\n        {\n          modelService: true,\n          ports: ['8888'],\n          name: 'llamacpp',\n          id: 'llamacpp',\n          appName: 'llamacpp',\n          engineId: recipeImageInfoMock.engineId,\n          recipeId: recipeMock.id,\n        },\n      ],\n    });\n\n    await getInitializedApplicationManager().pullApplication(applicationOptions);\n\n    // the remove pod should have been called\n    expect(containerEngine.createContainer).toHaveBeenCalledWith(\n      recipeImageInfoMock.engineId,\n      expect.objectContaining({\n        HostConfig: withModel\n          ? {\n              Mounts: [\n                {\n                  Mode: 'Z',\n                  Source: 'downloaded-model-path',\n                  Target: '/downloaded-model-path',\n                  Type: 'bind',\n                },\n              ],\n            }\n          : undefined,\n      }),\n    );\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/managers/application/applicationManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { RecipeComponents, RecipeImage } from '@shared/models/IRecipe';\nimport * as path from 'node:path';\nimport { containerEngine, Disposable, window, ProgressLocation } from '@podman-desktop/api';\nimport type {\n  PodCreatePortOptions,\n  TelemetryLogger,\n  PodInfo,\n  HostConfig,\n  HealthConfig,\n  PodContainerInfo,\n  ContainerProviderConnection,\n} from '@podman-desktop/api';\nimport type { ModelsManager } from '../modelsManager';\nimport { getPortsFromLabel, getPortsInfo } from '../../utils/ports';\nimport { getDurationSecondsSince, timeout } from '../../utils/utils';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\nimport type { PodmanConnection } from '../podmanConnection';\nimport { MSG_APPLICATIONS_STATE_UPDATE } from '@shared/Messages';\nimport type { CatalogManager } from '../catalogManager';\nimport { ApplicationRegistry } from '../../registries/ApplicationRegistry';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport { Publisher } from '../../utils/Publisher';\nimport { getModelPropertiesForEnvironment } from '../../utils/modelsUtils';\nimport { getRandomName, getRandomString } from '../../utils/randomUtils';\nimport type { PodManager } from '../recipes/PodManager';\nimport { SECOND } from '../../workers/provider/LlamaCppPython';\nimport type { RecipeManager } from '../recipes/RecipeManager';\nimport {\n  POD_LABEL_APP_PORTS,\n  POD_LABEL_MODEL_ID,\n  POD_LABEL_MODEL_PORTS,\n  POD_LABEL_RECIPE_ID,\n} from '../../utils/RecipeConstants';\nimport { VMType } from '@shared/models/IPodman';\nimport { RECIPE_START_ROUTE } from '../../registries/NavigationRegistry';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { TaskRunner } from '../TaskRunner';\nimport { getInferenceType } from '../../utils/inferenceUtils';\nimport type { LlamaStackManager } from '../llama-stack/llamaStackManager';\nimport { isApplicationOptionsWithModelInference, type ApplicationOptions } from '../../models/ApplicationOptions';\n\nexport class ApplicationManager extends Publisher<ApplicationState[]> implements Disposable {\n  #applications: ApplicationRegistry<ApplicationState>;\n  protectTasks: Set<string> = new Set();\n  #disposables: Disposable[];\n  #taskRunner: TaskRunner;\n\n  constructor(\n    private taskRegistry: TaskRegistry,\n    rpcExtension: RpcExtension,\n    private podmanConnection: PodmanConnection,\n    private catalogManager: CatalogManager,\n    private modelsManager: ModelsManager,\n    private telemetry: TelemetryLogger,\n    private podManager: PodManager,\n    private recipeManager: RecipeManager,\n    private llamaStackManager: LlamaStackManager,\n  ) {\n    super(rpcExtension, MSG_APPLICATIONS_STATE_UPDATE, () => this.getApplicationsState());\n    this.#applications = new ApplicationRegistry<ApplicationState>();\n    this.#taskRunner = new TaskRunner(this.taskRegistry);\n    this.#disposables = [];\n  }\n\n  async requestPullApplication(options: ApplicationOptions): Promise<string> {\n    // create a tracking id to put in the labels\n    const trackingId: string = getRandomString();\n\n    const labels: Record<string, string> = {\n      trackingId: trackingId,\n    };\n\n    this.#taskRunner\n      .runAsTask(\n        {\n          ...labels,\n          'recipe-pulling': options.recipe.id, // this label should only be on the master task\n        },\n        {\n          loadingLabel: `Pulling ${options.recipe.name} recipe`,\n          errorMsg: err => `Something went wrong while pulling ${options.recipe.name}: ${String(err)}`,\n        },\n        () =>\n          window.withProgress(\n            {\n              location: ProgressLocation.TASK_WIDGET,\n              title: `Pulling ${options.recipe.name}.`,\n              details: {\n                routeId: RECIPE_START_ROUTE,\n                routeArgs: [options.recipe.id, trackingId],\n              },\n            },\n            () => this.pullApplication(options, labels),\n          ),\n      )\n      .catch(() => {});\n\n    return trackingId;\n  }\n\n  async pullApplication(options: ApplicationOptions, labels: Record<string, string> = {}): Promise<void> {\n    let modelId: string;\n    if (isApplicationOptionsWithModelInference(options)) {\n      modelId = options.model.id;\n    } else {\n      modelId = '<none>';\n    }\n\n    // clear any existing status / tasks related to the pair recipeId-modelId.\n    this.taskRegistry.deleteByLabels({\n      'recipe-id': options.recipe.id,\n      'model-id': modelId,\n    });\n\n    const startTime = performance.now();\n    try {\n      // init application (git clone, models download etc.)\n      const podInfo: PodInfo = await this.initApplication(options, labels);\n      // start the pod\n      await this.runApplication(podInfo, {\n        ...labels,\n        'recipe-id': options.recipe.id,\n        'model-id': modelId,\n      });\n\n      // measure init + start time\n      const durationSeconds = getDurationSecondsSince(startTime);\n      this.telemetry.logUsage('recipe.pull', {\n        'recipe.id': options.recipe.id,\n        'recipe.name': options.recipe.name,\n        durationSeconds,\n      });\n    } catch (err: unknown) {\n      const durationSeconds = getDurationSecondsSince(startTime);\n      this.telemetry.logError('recipe.pull', {\n        'recipe.id': options.recipe.id,\n        'recipe.name': options.recipe.name,\n        durationSeconds,\n        message: 'error pulling application',\n        error: err,\n      });\n      throw err;\n    }\n  }\n\n  /**\n   * This method will execute the following tasks\n   * - git clone\n   * - git checkout\n   * - register local repository\n   * - download models\n   * - upload models\n   * - build containers\n   * - create pod\n   *\n   * @param connection\n   * @param recipe\n   * @param model\n   * @param labels\n   * @private\n   */\n  private async initApplication(options: ApplicationOptions, labels: Record<string, string> = {}): Promise<PodInfo> {\n    let modelId: string;\n    if (isApplicationOptionsWithModelInference(options)) {\n      modelId = options.model.id;\n    } else {\n      modelId = '<none>';\n    }\n\n    // clone the recipe\n    await this.recipeManager.cloneRecipe(options.recipe, { ...labels, 'model-id': modelId });\n\n    let modelPath: string | undefined;\n    if (isApplicationOptionsWithModelInference(options)) {\n      // get model by downloading it or retrieving locally\n      modelPath = await this.modelsManager.requestDownloadModel(options.model, {\n        ...labels,\n        'recipe-id': options.recipe.id,\n        'model-id': modelId,\n      });\n    }\n    // build all images, one per container (for a basic sample we should have 2 containers = sample app + model service)\n    const recipeComponents = await this.recipeManager.buildRecipe(options, {\n      ...labels,\n      'recipe-id': options.recipe.id,\n      'model-id': modelId,\n    });\n\n    if (isApplicationOptionsWithModelInference(options)) {\n      // upload model to podman machine if user system is supported\n      if (!recipeComponents.inferenceServer) {\n        modelPath = await this.modelsManager.uploadModelToPodmanMachine(options.connection, options.model, {\n          ...labels,\n          'recipe-id': options.recipe.id,\n          'model-id': modelId,\n        });\n      }\n    }\n\n    // first delete any existing pod with matching labels\n    if (await this.hasApplicationPod(options.recipe.id, modelId)) {\n      await this.removeApplication(options.recipe.id, modelId);\n    }\n\n    // create a pod containing all the containers to run the application\n    return this.createApplicationPod(options, recipeComponents, modelPath, {\n      ...labels,\n      'recipe-id': options.recipe.id,\n      'model-id': modelId,\n    });\n  }\n\n  /**\n   * Given an ApplicationPodInfo, start the corresponding pod\n   * @param podInfo\n   * @param labels\n   */\n  protected async runApplication(podInfo: PodInfo, labels?: { [key: string]: string }): Promise<void> {\n    await this.#taskRunner.runAsTask(\n      labels ?? {},\n      {\n        loadingLabel: 'Starting AI App',\n        successLabel: 'AI App is running',\n        errorMsg: err => String(err),\n      },\n      async () => {\n        await this.podManager.startPod(podInfo.engineId, podInfo.Id);\n\n        // check if all containers have started successfully\n        for (const container of podInfo.Containers ?? []) {\n          await this.waitContainerIsRunning(podInfo.engineId, container);\n        }\n      },\n    );\n    return this.checkPodsHealth();\n  }\n\n  protected async waitContainerIsRunning(engineId: string, container: PodContainerInfo): Promise<void> {\n    const TIME_FRAME_MS = 5000;\n    const MAX_ATTEMPTS = 60 * (60000 / TIME_FRAME_MS); // try for 1 hour\n    for (let i = 0; i < MAX_ATTEMPTS; i++) {\n      const sampleAppContainerInspectInfo = await containerEngine.inspectContainer(engineId, container.Id);\n      if (sampleAppContainerInspectInfo.State.Running) {\n        return;\n      }\n      await timeout(TIME_FRAME_MS);\n    }\n    throw new Error(`Container ${container.Id} not started in time`);\n  }\n\n  protected async createApplicationPod(\n    options: ApplicationOptions,\n    components: RecipeComponents,\n    modelPath: string | undefined,\n    labels?: { [key: string]: string },\n  ): Promise<PodInfo> {\n    return this.#taskRunner.runAsTask<PodInfo>(\n      labels ?? {},\n      {\n        loadingLabel: 'Creating AI App',\n        errorMsg: err => `Something went wrong while creating pod: ${String(err)}`,\n      },\n      async ({ updateLabels }): Promise<PodInfo> => {\n        const podInfo = await this.createPod(options, components.images);\n        updateLabels(labels => ({\n          ...labels,\n          'pod-id': podInfo.Id,\n        }));\n        await this.createContainerAndAttachToPod(options, podInfo, components, modelPath, labels);\n        return podInfo;\n      },\n    );\n  }\n\n  protected async createContainerAndAttachToPod(\n    options: ApplicationOptions,\n    podInfo: PodInfo,\n    components: RecipeComponents,\n    modelPath: string | undefined,\n    labels?: { [key: string]: string },\n  ): Promise<void> {\n    const vmType = options.connection.vmType ?? VMType.UNKNOWN;\n    // temporary check to set Z flag or not - to be removed when switching to podman 5\n    await Promise.all(\n      components.images.map(async image => {\n        let hostConfig: HostConfig | undefined = undefined;\n        let envs: string[] = [];\n        let healthcheck: HealthConfig | undefined = undefined;\n        // if it's a model service we mount the model as a volume\n        if (modelPath && isApplicationOptionsWithModelInference(options)) {\n          if (image.modelService) {\n            const modelName = path.basename(modelPath);\n            hostConfig = {\n              Mounts: [\n                {\n                  Target: `/${modelName}`,\n                  Source: modelPath,\n                  Type: 'bind',\n                  Mode: vmType === VMType.QEMU ? undefined : 'Z',\n                },\n              ],\n            };\n            envs = [`MODEL_PATH=/${modelName}`];\n            envs.push(...getModelPropertiesForEnvironment(options.model));\n          } else if (components.inferenceServer) {\n            const endPoint = `http://host.containers.internal:${components.inferenceServer.connection.port}`;\n            envs = [`MODEL_ENDPOINT=${endPoint}`];\n          } else {\n            const modelService = components.images.find(image => image.modelService);\n            if (modelService && modelService.ports.length > 0) {\n              const endPoint = `http://localhost:${modelService.ports[0]}`;\n              envs = [`MODEL_ENDPOINT=${endPoint}`];\n            }\n          }\n        } else if (options.dependencies?.llamaStack) {\n          let stack = await this.llamaStackManager.getLlamaStackContainers();\n          if (!stack) {\n            await this.llamaStackManager.createLlamaStackContainers(options.connection, labels ?? {});\n            stack = await this.llamaStackManager.getLlamaStackContainers();\n          }\n          if (stack) {\n            envs = [`MODEL_ENDPOINT=http://host.containers.internal:${stack.server?.port}`];\n          }\n        }\n        if (image.ports.length > 0) {\n          healthcheck = {\n            // must be the port INSIDE the container not the exposed one\n            Test: ['CMD-SHELL', `curl -s localhost:${image.ports[0]} > /dev/null`],\n            Interval: SECOND * 5,\n            Retries: 4 * 5,\n            Timeout: SECOND * 2,\n          };\n        }\n\n        const podifiedName = getRandomName(`${image.appName}-podified`);\n        await containerEngine.createContainer(podInfo.engineId, {\n          Image: image.id,\n          name: podifiedName,\n          Detach: true,\n          HostConfig: hostConfig,\n          Env: envs,\n          start: false,\n          pod: podInfo.Id,\n          HealthCheck: healthcheck,\n        });\n      }),\n    );\n  }\n\n  protected async createPod(options: ApplicationOptions, images: RecipeImage[]): Promise<PodInfo> {\n    // find the exposed port of the sample app so we can open its ports on the new pod\n    const sampleAppImageInfo = images.find(image => !image.modelService);\n    if (!sampleAppImageInfo) {\n      console.error('no sample app image found');\n      throw new Error('no sample app found');\n    }\n\n    const portmappings: PodCreatePortOptions[] = [];\n    // we expose all ports so we can check the model service if it is actually running\n    for (const image of images) {\n      for (const exposed of image.ports) {\n        const localPorts = await getPortsInfo(exposed);\n        if (localPorts) {\n          portmappings.push({\n            container_port: parseInt(exposed),\n            host_port: parseInt(localPorts),\n            host_ip: '',\n            protocol: '',\n            range: 1,\n          });\n        }\n      }\n    }\n\n    // create new pod\n    const labels: Record<string, string> = {\n      [POD_LABEL_RECIPE_ID]: options.recipe.id,\n    };\n\n    if (isApplicationOptionsWithModelInference(options)) {\n      labels[POD_LABEL_MODEL_ID] = options.model.id;\n    } else {\n      labels[POD_LABEL_MODEL_ID] = '<none>';\n    }\n    // collecting all modelService ports\n    const modelPorts = images\n      .filter(img => img.modelService)\n      .flatMap(img => img.ports)\n      .map(port => portmappings.find(pm => `${pm.container_port}` === port)?.host_port);\n    if (modelPorts.length) {\n      labels[POD_LABEL_MODEL_PORTS] = modelPorts.join(',');\n    }\n    // collecting all application ports (excluding service ports)\n    const appPorts = images\n      .filter(img => !img.modelService)\n      .flatMap(img => img.ports)\n      .map(port => portmappings.find(pm => `${pm.container_port}` === port)?.host_port);\n    if (appPorts.length) {\n      labels[POD_LABEL_APP_PORTS] = appPorts.join(',');\n    }\n    const { engineId, Id } = await this.podManager.createPod({\n      provider: options.connection,\n      name: getRandomName(`pod-${sampleAppImageInfo.appName}`),\n      portmappings: portmappings,\n      labels,\n    });\n\n    return this.podManager.getPod(engineId, Id);\n  }\n\n  /**\n   * Stop the pod with matching recipeId and modelId\n   * @param recipeId\n   * @param modelId\n   */\n  async stopApplication(recipeId: string, modelId: string): Promise<PodInfo> {\n    // clear existing tasks\n    this.clearTasks(recipeId, modelId);\n\n    // get the application pod\n    const appPod = await this.getApplicationPod(recipeId, modelId);\n\n    // if the pod is already stopped skip\n    if (appPod.Status !== 'Exited') {\n      await this.#taskRunner.runAsTask(\n        {\n          'recipe-id': recipeId,\n          'model-id': modelId,\n        },\n        {\n          loadingLabel: 'Stopping AI App',\n          successLabel: 'AI App Stopped',\n          errorLabel: 'Error stopping AI App',\n          errorMsg: err => `Error removing the pod.: ${String(err)}`,\n        },\n        () => this.podManager.stopPod(appPod.engineId, appPod.Id),\n      );\n      await this.checkPodsHealth();\n    }\n    return appPod;\n  }\n\n  /**\n   * Utility method to start a pod using (recipeId, modelId)\n   * @param recipeId\n   * @param modelId\n   */\n  async startApplication(recipeId: string, modelId: string): Promise<void> {\n    this.clearTasks(recipeId, modelId);\n    const pod = await this.getApplicationPod(recipeId, modelId);\n\n    return this.runApplication(pod, {\n      'recipe-id': recipeId,\n      'model-id': modelId,\n    });\n  }\n\n  protected refresh(): void {\n    // clear existing applications\n    this.#applications.clear();\n    // collect all pods based on label\n    this.podManager\n      .getPodsWithLabels([POD_LABEL_RECIPE_ID])\n      .then(pods => {\n        pods.forEach(pod => this.adoptPod(pod));\n      })\n      .catch((err: unknown) => {\n        console.error('error during adoption of existing playground containers', err);\n      });\n    // notify\n    this.notify();\n  }\n\n  init(): void {\n    this.podmanConnection.onPodmanConnectionEvent(() => {\n      this.refresh();\n    });\n\n    this.podManager.onStartPodEvent((pod: PodInfo) => {\n      this.adoptPod(pod);\n    });\n    this.podManager.onRemovePodEvent(({ podId }) => {\n      this.forgetPodById(podId);\n    });\n\n    const ticker = (): void => {\n      this.checkPodsHealth()\n        .catch((err: unknown) => {\n          console.error('error getting pods statuses', err);\n        })\n        .finally(() => (timerId = setTimeout(ticker, 10000)));\n    };\n\n    // using a recursive setTimeout instead of setInterval as we don't know how long the operation takes\n    let timerId = setTimeout(ticker, 1000);\n\n    this.#disposables.push(\n      Disposable.create(() => {\n        clearTimeout(timerId);\n      }),\n    );\n\n    // refresh on init\n    this.refresh();\n  }\n\n  protected adoptPod(pod: PodInfo): void {\n    if (!pod.Labels) {\n      return;\n    }\n    const recipeId = pod.Labels[POD_LABEL_RECIPE_ID];\n    const modelId = pod.Labels[POD_LABEL_MODEL_ID];\n    if (!recipeId || !modelId) {\n      return;\n    }\n    const appPorts = getPortsFromLabel(pod.Labels, POD_LABEL_APP_PORTS);\n    const modelPorts = getPortsFromLabel(pod.Labels, POD_LABEL_MODEL_PORTS);\n    if (this.#applications.has({ recipeId, modelId })) {\n      return;\n    }\n    const state: ApplicationState = {\n      recipeId,\n      modelId,\n      pod,\n      appPorts,\n      modelPorts,\n      health: 'starting',\n      backend: getInferenceType(this.modelsManager.getModelsInfo().filter(m => m.id === modelId)),\n    };\n    this.updateApplicationState(recipeId, modelId, state);\n  }\n\n  protected forgetPodById(podId: string): void {\n    const app = Array.from(this.#applications.values()).find(p => p.pod.Id === podId);\n    if (!app) {\n      return;\n    }\n    if (!app.pod.Labels) {\n      return;\n    }\n    const recipeId = app.pod.Labels[POD_LABEL_RECIPE_ID];\n    const modelId = app.pod.Labels[POD_LABEL_MODEL_ID];\n    if (!recipeId || !modelId) {\n      return;\n    }\n    if (!this.#applications.has({ recipeId, modelId })) {\n      return;\n    }\n    this.#applications.delete({ recipeId, modelId });\n    this.notify();\n\n    const protect = this.protectTasks.has(podId);\n    if (!protect) {\n      this.taskRegistry.createTask('AI App stopped manually', 'success', {\n        'recipe-id': recipeId,\n        'model-id': modelId,\n      });\n    } else {\n      this.protectTasks.delete(podId);\n    }\n  }\n\n  protected async checkPodsHealth(): Promise<void> {\n    const pods = await this.podManager.getPodsWithLabels([POD_LABEL_RECIPE_ID, POD_LABEL_MODEL_ID]);\n    let changes = false;\n\n    for (const pod of pods) {\n      const recipeId = pod.Labels[POD_LABEL_RECIPE_ID];\n      const modelId = pod.Labels[POD_LABEL_MODEL_ID];\n      if (!this.#applications.has({ recipeId, modelId })) {\n        // a fresh pod could not have been added yet, we will handle it at next iteration\n        continue;\n      }\n\n      const podHealth = await this.podManager.getHealth(pod);\n      const state = this.#applications.get({ recipeId, modelId });\n      if (state.health !== podHealth) {\n        state.health = podHealth;\n        state.pod = pod;\n        this.#applications.set({ recipeId, modelId }, state);\n        changes = true;\n      }\n      if (pod.Status !== state.pod.Status) {\n        state.pod = pod;\n        changes = true;\n      }\n    }\n    if (changes) {\n      this.notify();\n    }\n  }\n\n  protected updateApplicationState(recipeId: string, modelId: string, state: ApplicationState): void {\n    this.#applications.set({ recipeId, modelId }, state);\n    this.notify();\n  }\n\n  getApplicationsState(): ApplicationState[] {\n    return Array.from(this.#applications.values());\n  }\n\n  protected clearTasks(recipeId: string, modelId: string): void {\n    // clear any existing status / tasks related to the pair recipeId-modelId.\n    this.taskRegistry.deleteByLabels({\n      'recipe-id': recipeId,\n      'model-id': modelId,\n    });\n  }\n\n  /**\n   * Method that will stop then remove a pod corresponding to the recipe and model provided\n   * @param recipeId\n   * @param modelId\n   */\n  async removeApplication(recipeId: string, modelId: string): Promise<void> {\n    const appPod = await this.stopApplication(recipeId, modelId);\n\n    this.protectTasks.add(appPod.Id);\n\n    await this.#taskRunner.runAsTask(\n      {\n        'recipe-id': recipeId,\n        'model-id': modelId,\n      },\n      {\n        loadingLabel: 'Removing AI App',\n        successLabel: 'AI App Removed',\n        errorLabel: 'Error stopping AI App',\n        errorMsg: () => 'error removing the pod. Please try to remove the pod manually',\n      },\n      () => this.podManager.removePod(appPod.engineId, appPod.Id),\n    );\n  }\n\n  async restartApplication(connection: ContainerProviderConnection, recipeId: string, modelId: string): Promise<void> {\n    const appPod = await this.getApplicationPod(recipeId, modelId);\n    await this.removeApplication(recipeId, modelId);\n    const recipe = this.catalogManager.getRecipeById(recipeId);\n    let opts: ApplicationOptions;\n    if (appPod.Labels[POD_LABEL_MODEL_ID] === '<none>') {\n      opts = {\n        connection,\n        recipe,\n      };\n    } else {\n      const model = this.catalogManager.getModelById(appPod.Labels[POD_LABEL_MODEL_ID]);\n      opts = {\n        connection,\n        recipe,\n        model,\n      };\n    }\n\n    // init the recipe\n    const podInfo = await this.initApplication(opts);\n\n    // start the pod\n    return this.runApplication(podInfo, {\n      'recipe-id': recipeId,\n      'model-id': modelId,\n    });\n  }\n\n  async getApplicationPorts(recipeId: string, modelId: string): Promise<number[]> {\n    const state = this.#applications.get({ recipeId, modelId });\n    if (state) {\n      return state.appPorts;\n    }\n    throw new Error(`Recipe ${recipeId} has no ports available`);\n  }\n\n  protected async getApplicationPod(recipeId: string, modelId: string): Promise<PodInfo> {\n    const appPod = await this.findPod(recipeId, modelId);\n    if (!appPod) {\n      throw new Error(`no pod found with recipe Id ${recipeId} and model Id ${modelId}`);\n    }\n    return appPod;\n  }\n\n  protected async hasApplicationPod(recipeId: string, modelId: string): Promise<boolean> {\n    const pod = await this.podManager.findPodByLabelsValues({\n      LABEL_RECIPE_ID: recipeId,\n      LABEL_MODEL_ID: modelId,\n    });\n    return !!pod;\n  }\n\n  protected async findPod(recipeId: string, modelId: string): Promise<PodInfo | undefined> {\n    return this.podManager.findPodByLabelsValues({\n      [POD_LABEL_RECIPE_ID]: recipeId,\n      [POD_LABEL_MODEL_ID]: modelId,\n    });\n  }\n\n  dispose(): void {\n    this.#disposables.forEach(disposable => disposable.dispose());\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/catalogManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\n/* eslint-disable @typescript-eslint/no-explicit-any */\n\nimport { afterEach, beforeEach, describe, expect, test, vi } from 'vitest';\nimport content from '../tests/ai-test.json';\nimport userContent from '../tests/ai-user-test.json';\nimport { EventEmitter, window } from '@podman-desktop/api';\nimport { CatalogManager } from './catalogManager';\n\nimport type { Stats } from 'node:fs';\nimport { promises, existsSync } from 'node:fs';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\nimport path from 'node:path';\nimport { version } from '../assets/ai.json';\nimport * as catalogUtils from '../utils/catalogUtils';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nvi.mock('../assets/ai.json', async importOriginal => {\n  // eslint-disable-next-line @typescript-eslint/consistent-type-imports\n  const { version } = await importOriginal<typeof import('../assets/ai.json')>();\n  return {\n    default: { ...content, version: version },\n    version: version,\n  };\n});\n\nvi.mock('node:fs');\nvi.mock('node:fs/promises');\nvi.mock('node:path');\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    EventEmitter: vi.fn(),\n    window: {\n      showNotification: vi.fn(),\n    },\n    ProgressLocation: {\n      TASK_WIDGET: 'TASK_WIDGET',\n    },\n    fs: {\n      createFileSystemWatcher: (): unknown => ({\n        onDidCreate: vi.fn(),\n        onDidDelete: vi.fn(),\n        onDidChange: vi.fn(),\n      }),\n    },\n  };\n});\n\nlet catalogManager: CatalogManager;\n\nbeforeEach(async () => {\n  vi.resetAllMocks();\n\n  // mock EventEmitter logic for all tests\n  vi.mocked(EventEmitter).mockImplementation(() => {\n    const listeners: ((value: unknown) => void)[] = [];\n    return {\n      event: vi.fn().mockImplementation(callback => {\n        listeners.push(callback);\n      }),\n      fire: vi.fn().mockImplementation((content: unknown) => {\n        listeners.forEach(listener => listener(content));\n      }),\n    } as unknown as EventEmitter<unknown>;\n  });\n\n  const appUserDirectory = '.';\n\n  // Creating CatalogManager\n  catalogManager = new CatalogManager(\n    {\n      fire: vi.fn().mockResolvedValue(true),\n    } as unknown as RpcExtension,\n    appUserDirectory,\n  );\n});\n\ndescribe('invalid user catalog', () => {\n  beforeEach(async () => {\n    vi.mocked(promises.readFile).mockResolvedValue('invalid json');\n    await catalogManager.init();\n  });\n\n  test('expect correct model is returned with valid id', () => {\n    const model = catalogManager.getModelById('llama-2-7b-chat.Q5_K_S');\n    expect(model).toBeDefined();\n    expect(model.name).toEqual('Llama-2-7B-Chat-GGUF');\n    expect(model.registry).toEqual('Hugging Face');\n    expect(model.url).toEqual(\n      'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_S.gguf',\n    );\n  });\n\n  test('expect error if id does not correspond to any model', () => {\n    expect(() => catalogManager.getModelById('unknown')).toThrowError('No model found having id unknown');\n  });\n});\n\ntest('expect correct model is returned from default catalog with valid id when no user catalog exists', async () => {\n  vi.mocked(existsSync).mockReturnValue(false);\n  await catalogManager.init();\n  await vi.waitUntil(() => catalogManager.getRecipes().length > 0);\n\n  const model = catalogManager.getModelById('llama-2-7b-chat.Q5_K_S');\n  expect(model).toBeDefined();\n  expect(model.name).toEqual('Llama-2-7B-Chat-GGUF');\n  expect(model.registry).toEqual('Hugging Face');\n  expect(model.url).toEqual(\n    'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_S.gguf',\n  );\n});\n\ntest('expect correct model is returned with valid id when the user catalog is valid', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(promises.readFile).mockResolvedValue(JSON.stringify(userContent));\n\n  await catalogManager.init();\n  await vi.waitUntil(() => catalogManager.getModels().some(model => model.id === 'model1'));\n\n  const model = catalogManager.getModelById('model1');\n  expect(model).toBeDefined();\n  expect(model.name).toEqual('Model 1');\n  expect(model.registry).toEqual('Hugging Face');\n  expect(model.url).toEqual('https://model1.example.com');\n});\n\ntest('expect to call writeFile in addLocalModelsToCatalog with catalog updated', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(promises.readFile).mockResolvedValue(JSON.stringify(userContent));\n\n  await catalogManager.init();\n  await vi.waitUntil(() => catalogManager.getRecipes().length > 0);\n\n  const mtimeDate = new Date('2024-04-03T09:51:15.766Z');\n  vi.mocked(promises.stat).mockResolvedValue({\n    size: 1,\n    mtime: mtimeDate,\n  } as Stats);\n  vi.mocked(path.resolve).mockReturnValue('path');\n\n  vi.mocked(promises.writeFile).mockResolvedValue();\n\n  await catalogManager.importUserModels([\n    {\n      name: 'custom-model',\n      path: '/root/path/file.gguf',\n    },\n  ]);\n\n  expect(promises.mkdir).toHaveBeenCalled();\n  expect(promises.writeFile).toBeCalledWith('path', expect.any(String), 'utf-8');\n});\n\ntest('expect to call writeFile in removeLocalModelFromCatalog with catalog updated', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(promises.readFile).mockResolvedValue(JSON.stringify(userContent));\n  vi.mocked(path.resolve).mockReturnValue('path');\n\n  await catalogManager.init();\n  await vi.waitUntil(() => catalogManager.getRecipes().length > 0);\n\n  vi.mocked(promises.writeFile).mockResolvedValue();\n\n  const updatedCatalog: ApplicationCatalog = { ...userContent };\n  updatedCatalog.models = updatedCatalog.models.filter(m => m.id !== 'model1');\n\n  await catalogManager.removeUserModel('model1');\n\n  expect(promises.writeFile).toBeCalledWith(\n    'path',\n    expect.stringContaining(`\"version\": \"${catalogUtils.CatalogFormat.CURRENT}\"`),\n    'utf-8',\n  );\n});\n\ntest('catalog should be the combination of user catalog and default catalog', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(promises.readFile).mockResolvedValue(JSON.stringify(userContent));\n  vi.mocked(path.resolve).mockReturnValue('path');\n\n  await catalogManager.init();\n  await vi.waitUntil(() => catalogManager.getModels().length > userContent.models.length);\n\n  const mtimeDate = new Date('2024-04-03T09:51:15.766Z');\n  vi.mocked(promises.stat).mockResolvedValue({\n    size: 1,\n    mtime: mtimeDate,\n  } as Stats);\n  vi.mocked(path.resolve).mockReturnValue('path');\n\n  const catalog = catalogManager.getCatalog();\n\n  expect(catalog).toEqual({\n    version: catalogUtils.CatalogFormat.CURRENT,\n    recipes: [...content.recipes, ...userContent.recipes],\n    models: [...content.models, ...userContent.models],\n    categories: [...content.categories, ...userContent.categories],\n  });\n});\n\ntest('catalog should use user items in favour of default', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(path.resolve).mockReturnValue('path');\n\n  const overwriteFullCatalog: ApplicationCatalog = {\n    version: catalogUtils.CatalogFormat.CURRENT,\n    recipes: content.recipes.map(recipe => ({\n      ...recipe,\n      name: 'user-recipe-overwrite',\n    })),\n    models: content.models.map(model => ({\n      ...model,\n      name: 'user-model-overwrite',\n    })),\n    categories: content.categories.map(category => ({\n      ...category,\n      name: 'user-model-overwrite',\n    })),\n  };\n\n  vi.mocked(promises.readFile).mockResolvedValue(JSON.stringify(overwriteFullCatalog));\n\n  await catalogManager.init();\n  await vi.waitUntil(() => catalogManager.getModels().length > 0);\n\n  const mtimeDate = new Date('2024-04-03T09:51:15.766Z');\n  vi.mocked(promises.stat).mockResolvedValue({\n    size: 1,\n    mtime: mtimeDate,\n  } as Stats);\n  vi.mocked(path.resolve).mockReturnValue('path');\n\n  const catalog = catalogManager.getCatalog();\n\n  expect(catalog).toEqual(overwriteFullCatalog);\n});\n\ntest('default catalog should have latest version', () => {\n  expect(version).toBe(catalogUtils.CatalogFormat.CURRENT);\n});\n\ntest('wrong catalog version should create a notification', () => {\n  catalogManager['onUserCatalogUpdate']({ version: catalogUtils.CatalogFormat.UNKNOWN });\n\n  expect(window.showNotification).toHaveBeenCalledWith(\n    expect.objectContaining({\n      title: 'Incompatible user-catalog',\n    }),\n  );\n});\n\ntest('malformed catalog should create a notification', async () => {\n  vi.mocked(existsSync).mockReturnValue(false);\n  vi.mocked(path.resolve).mockReturnValue('path');\n\n  catalogManager['onUserCatalogUpdate']({\n    version: catalogUtils.CatalogFormat.CURRENT,\n    models: [\n      {\n        fakeProperty: 'hello',\n      },\n    ],\n    recipes: [],\n    categories: [],\n  });\n\n  expect(window.showNotification).toHaveBeenCalledWith(\n    expect.objectContaining({\n      title: 'Error loading the user catalog',\n      body: 'Something went wrong while trying to load the user catalog: Error: invalid model format',\n    }),\n  );\n});\n\ndescribe('spy on catalogUtils.sanitize', () => {\n  beforeEach(() => {\n    // do not mock the complete catalogUtils module but only spy the `sanitize` function,\n    // as we want to keep the original `catalogUtils.hasCatalogWrongFormat` function\n    vi.spyOn(catalogUtils, 'sanitize');\n  });\n\n  afterEach(() => {\n    vi.mocked(catalogUtils.sanitize).mockRestore();\n  });\n\n  test('catalog with undefined version should call sanitize function to try converting it', () => {\n    vi.mocked(promises.writeFile).mockResolvedValue();\n    catalogManager['onUserCatalogUpdate']({\n      recipes: [\n        {\n          id: 'chatbot',\n          description: 'This is a Streamlit chat demo application.',\n          name: 'ChatBot',\n          repository: 'https://github.com/containers/ai-lab-recipes',\n          ref: 'v1.1.3',\n          icon: 'natural-language-processing',\n          categories: ['natural-language-processing'],\n          basedir: 'recipes/natural_language_processing/chatbot',\n          readme: '',\n          models: ['hf.instructlab.granite-7b-lab-GGUF', 'hf.instructlab.merlinite-7b-lab-GGUF'],\n        },\n      ],\n      models: [],\n    });\n\n    expect(catalogUtils.sanitize).toHaveBeenCalled();\n    expect(promises.writeFile).toHaveBeenCalled();\n  });\n});\n\ntest('filter recipes by language', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(promises.readFile).mockResolvedValue(JSON.stringify(userContent));\n\n  await catalogManager.init();\n  await vi.waitUntil(() => catalogManager.getModels().some(model => model.id === 'model1'));\n  const result1 = catalogManager.filterRecipes({\n    languages: ['lang1'],\n  });\n  expect(result1.result.map(r => r.id)).toEqual(['recipe1']);\n  expect(result1.choices).toEqual({\n    languages: [\n      { name: 'lang1', count: 1 },\n      { name: 'lang10', count: 2 },\n      { name: 'lang11', count: 1 },\n      { name: 'lang2', count: 1 },\n      { name: 'lang3', count: 1 },\n    ],\n    frameworks: [\n      { name: 'fw1', count: 1 },\n      { name: 'fw10', count: 1 },\n    ],\n    tools: [{ name: 'tool1', count: 1 }],\n  });\n\n  const result2 = catalogManager.filterRecipes({\n    languages: ['lang2'],\n  });\n  expect(result2.result.map(r => r.id)).toEqual(['recipe2']);\n  expect(result2.choices).toEqual({\n    languages: [\n      { name: 'lang1', count: 1 },\n      { name: 'lang10', count: 2 },\n      { name: 'lang11', count: 1 },\n      { name: 'lang2', count: 1 },\n      { name: 'lang3', count: 1 },\n    ],\n    frameworks: [\n      { name: 'fw10', count: 1 },\n      { name: 'fw2', count: 1 },\n    ],\n    tools: [{ name: 'tool2', count: 1 }],\n  });\n});\n\ntest('filter recipes by tool', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(promises.readFile).mockResolvedValue(JSON.stringify(userContent));\n\n  await catalogManager.init();\n  await vi.waitUntil(() => catalogManager.getModels().some(model => model.id === 'model1'));\n\n  const result1 = catalogManager.filterRecipes({\n    tools: ['tool1'],\n  });\n  expect(result1.result.map(r => r.id)).toEqual(['recipe1']);\n  expect(result1.choices).toEqual({\n    frameworks: [\n      { name: 'fw1', count: 1 },\n      { name: 'fw10', count: 1 },\n    ],\n    languages: [\n      { name: 'lang1', count: 1 },\n      { name: 'lang10', count: 1 },\n    ],\n    tools: [\n      { name: 'tool1', count: 1 },\n      { name: 'tool2', count: 1 },\n      { name: 'tool3', count: 1 },\n    ],\n  });\n\n  const result2 = catalogManager.filterRecipes({\n    tools: ['tool2'],\n  });\n  expect(result2.result.map(r => r.id)).toEqual(['recipe2']);\n  expect(result2.choices).toEqual({\n    frameworks: [\n      { name: 'fw10', count: 1 },\n      { name: 'fw2', count: 1 },\n    ],\n    languages: [\n      { name: 'lang10', count: 1 },\n      { name: 'lang2', count: 1 },\n    ],\n    tools: [\n      { name: 'tool1', count: 1 },\n      { name: 'tool2', count: 1 },\n      { name: 'tool3', count: 1 },\n    ],\n  });\n\n  const result3 = catalogManager.filterRecipes({\n    tools: ['tool1', 'tool2'],\n  });\n  expect(result3.result.map(r => r.id)).toEqual(['recipe1', 'recipe2']);\n  expect(result3.choices).toEqual({\n    frameworks: [\n      { name: 'fw1', count: 1 },\n      { name: 'fw10', count: 2 },\n      { name: 'fw2', count: 1 },\n    ],\n    languages: [\n      { name: 'lang1', count: 1 },\n      { name: 'lang10', count: 2 },\n      { name: 'lang2', count: 1 },\n    ],\n    tools: [\n      { name: 'tool1', count: 1 },\n      { name: 'tool2', count: 1 },\n      { name: 'tool3', count: 1 },\n    ],\n  });\n});\n\ntest('filter recipes by framework', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(promises.readFile).mockResolvedValue(JSON.stringify(userContent));\n\n  await catalogManager.init();\n  await vi.waitUntil(() => catalogManager.getModels().some(model => model.id === 'model1'));\n\n  const result1 = catalogManager.filterRecipes({\n    frameworks: ['fw1'],\n  });\n  expect(result1.result.map(r => r.id)).toEqual(['recipe1']);\n  expect(result1.choices).toEqual({\n    languages: [\n      { name: 'lang1', count: 1 },\n      { name: 'lang10', count: 1 },\n    ],\n    frameworks: [\n      { name: 'fw1', count: 1 },\n      { name: 'fw10', count: 3 },\n      { name: 'fw11', count: 1 },\n      { name: 'fw2', count: 2 },\n    ],\n    tools: [{ name: 'tool1', count: 1 }],\n  });\n\n  const result2 = catalogManager.filterRecipes({\n    frameworks: ['fw2'],\n  });\n  expect(result2.result.map(r => r.id)).toEqual(['recipe2', 'recipe3']);\n  expect(result2.choices).toEqual({\n    languages: [\n      { name: 'lang10', count: 1 },\n      { name: 'lang11', count: 1 },\n      { name: 'lang2', count: 1 },\n      { name: 'lang3', count: 1 },\n    ],\n    frameworks: [\n      { name: 'fw1', count: 1 },\n      { name: 'fw10', count: 3 },\n      { name: 'fw11', count: 1 },\n      { name: 'fw2', count: 2 },\n    ],\n    tools: [\n      { name: 'tool2', count: 1 },\n      { name: 'tool3', count: 1 },\n    ],\n  });\n\n  const result3 = catalogManager.filterRecipes({\n    frameworks: ['fw1', 'fw2'],\n  });\n  expect(result3.result.map(r => r.id)).toEqual(['recipe1', 'recipe2', 'recipe3']);\n  expect(result3.choices).toEqual({\n    languages: [\n      { name: 'lang1', count: 1 },\n      { name: 'lang10', count: 2 },\n      { name: 'lang11', count: 1 },\n      { name: 'lang2', count: 1 },\n      { name: 'lang3', count: 1 },\n    ],\n    frameworks: [\n      { name: 'fw1', count: 1 },\n      { name: 'fw10', count: 3 },\n      { name: 'fw11', count: 1 },\n      { name: 'fw2', count: 2 },\n    ],\n    tools: [\n      { name: 'tool1', count: 1 },\n      { name: 'tool2', count: 1 },\n      { name: 'tool3', count: 1 },\n    ],\n  });\n});\n\ntest('filter recipes by language and framework', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(promises.readFile).mockResolvedValue(JSON.stringify(userContent));\n\n  await catalogManager.init();\n  await vi.waitUntil(() => catalogManager.getModels().some(model => model.id === 'model1'));\n\n  const result1 = catalogManager.filterRecipes({\n    languages: ['lang2'],\n    frameworks: ['fw2'],\n  });\n  expect(result1.result.map(r => r.id)).toEqual(['recipe2']);\n  expect(result1.choices).toEqual({\n    languages: [\n      { name: 'lang10', count: 1 },\n      { name: 'lang11', count: 1 },\n      { name: 'lang2', count: 1 },\n      { name: 'lang3', count: 1 },\n    ],\n    frameworks: [\n      { name: 'fw10', count: 1 },\n      { name: 'fw2', count: 1 },\n    ],\n    tools: [{ name: 'tool2', count: 1 }],\n  });\n});\n\ntest('filter recipes by language, tool and framework', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(promises.readFile).mockResolvedValue(JSON.stringify(userContent));\n\n  await catalogManager.init();\n  await vi.waitUntil(() => catalogManager.getModels().some(model => model.id === 'model1'));\n\n  const result1 = catalogManager.filterRecipes({\n    languages: ['lang1'],\n    tools: ['tool1'],\n    frameworks: ['fw1'],\n  });\n  expect(result1.result.map(r => r.id)).toEqual(['recipe1']);\n  expect(result1.choices).toEqual({\n    languages: [\n      { name: 'lang1', count: 1 },\n      { name: 'lang10', count: 1 },\n    ],\n    frameworks: [\n      { name: 'fw1', count: 1 },\n      { name: 'fw10', count: 1 },\n    ],\n    tools: [{ name: 'tool1', count: 1 }],\n  });\n});\n\ntest('models are loaded as soon as init is finished when no user catalog', async () => {\n  await catalogManager.init();\n  expect(catalogManager.getModels()).toHaveLength(3);\n});\n\ntest('models are loaded as soon as init is finished when user catalog exists', async () => {\n  vi.mocked(promises.readFile).mockResolvedValue(JSON.stringify(userContent));\n  vi.mocked(existsSync).mockReturnValue(true);\n  await catalogManager.init();\n  expect(catalogManager.getModels()).toHaveLength(5);\n});\n"
  },
  {
    "path": "packages/backend/src/managers/catalogManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\nimport fs, { promises } from 'node:fs';\nimport path from 'node:path';\nimport crypto from 'node:crypto';\nimport defaultCatalog from '../assets/ai.json';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { MSG_NEW_CATALOG_STATE } from '@shared/Messages';\nimport { type Disposable, type Event, EventEmitter, window } from '@podman-desktop/api';\nimport { JsonWatcher } from '../utils/JsonWatcher';\nimport { Publisher } from '../utils/Publisher';\nimport type { LocalModelImportInfo } from '@shared/models/ILocalModelInfo';\nimport { InferenceType } from '@shared/models/IInference';\nimport { CatalogFormat, hasCatalogWrongFormat, merge, sanitize } from '../utils/catalogUtils';\nimport type { FilterRecipesResult, RecipeChoices, RecipeFilters } from '@shared/models/FilterRecipesResult';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nexport const USER_CATALOG = 'user-catalog.json';\n\nexport class CatalogManager extends Publisher<ApplicationCatalog> implements Disposable {\n  private readonly _onUpdate = new EventEmitter<ApplicationCatalog>();\n  readonly onUpdate: Event<ApplicationCatalog> = this._onUpdate.event;\n\n  private catalog: ApplicationCatalog;\n  #jsonWatcher: JsonWatcher<ApplicationCatalog> | undefined;\n  #notification: Disposable | undefined;\n\n  constructor(\n    rpcExtension: RpcExtension,\n    private appUserDirectory: string,\n  ) {\n    super(rpcExtension, MSG_NEW_CATALOG_STATE, () => this.getCatalog());\n    // We start with an empty catalog, for the methods to work before the catalog is loaded\n    this.catalog = {\n      version: CatalogFormat.CURRENT,\n      categories: [],\n      models: [],\n      recipes: [],\n    };\n  }\n\n  /**\n   * The init method will start a watcher on the user catalog.json\n   */\n  async init(): Promise<void> {\n    return new Promise<void>(resolve => {\n      // Creating a json watcher\n      this.#jsonWatcher = new JsonWatcher(this.getUserCatalogPath(), {\n        version: CatalogFormat.CURRENT,\n        recipes: [],\n        models: [],\n        categories: [],\n      });\n      this.#jsonWatcher.onContentUpdated(content => {\n        this.onUserCatalogUpdate(content);\n        resolve();\n      });\n      this.#jsonWatcher.init();\n    });\n  }\n\n  private loadDefaultCatalog(): void {\n    this.catalog = defaultCatalog as ApplicationCatalog;\n    this.notify();\n  }\n\n  private onUserCatalogUpdate(content: unknown): void {\n    // if there is no version in the user catalog, we try to sanitize it\n    // most likely it can be converted automatically to the current version without showing any notification to the user\n    if (content && typeof content === 'object' && hasCatalogWrongFormat(content)) {\n      try {\n        content = sanitize(content);\n        // overwrite the catalog on disk\n        const userCatalogPath = this.getUserCatalogPath();\n        promises.writeFile(userCatalogPath, JSON.stringify(content, undefined, 2), 'utf-8').catch((err: unknown) => {\n          console.error('Something went wrong while trying to save catalog', err);\n        });\n      } catch (e) {\n        console.error(e);\n      }\n    }\n\n    if (!content || typeof content !== 'object') {\n      this.loadDefaultCatalog();\n      return;\n    }\n\n    // Get the user-catalog version\n    let userCatalogFormat: string = CatalogFormat.UNKNOWN;\n    if ('version' in content && typeof content.version === 'string') {\n      userCatalogFormat = content.version;\n    }\n\n    if (userCatalogFormat !== CatalogFormat.CURRENT) {\n      this.loadDefaultCatalog();\n      if (!this.#notification) {\n        this.#notification = window.showNotification({\n          type: 'error',\n          title: 'Incompatible user-catalog',\n          body: `The catalog is using an older version of the catalog incompatible with current version ${CatalogFormat.CURRENT}.`,\n          markdownActions:\n            ':button[See migration guide]{href=https://github.com/containers/podman-desktop-extension-ai-lab/blob/main/MIGRATION.md title=\"Migration guide\"}',\n        });\n      }\n      console.error(\n        `the user-catalog provided is using version ${userCatalogFormat} expected ${CatalogFormat.CURRENT}. You can follow the migration guide.`,\n      );\n      return;\n    }\n\n    // merging default catalog with user catalog\n    try {\n      this.catalog = merge(sanitize(defaultCatalog), sanitize({ ...content, version: userCatalogFormat }));\n\n      // reset notification if everything went smoothly\n      this.#notification?.dispose();\n      this.#notification = undefined;\n    } catch (err: unknown) {\n      if (!this.#notification) {\n        this.#notification = window.showNotification({\n          type: 'error',\n          title: 'Error loading the user catalog',\n          body: `Something went wrong while trying to load the user catalog: ${String(err)}`,\n        });\n      }\n      console.error(err);\n      this.loadDefaultCatalog();\n    }\n\n    this.notify();\n  }\n\n  override notify(): void {\n    super.notify();\n    this._onUpdate.fire(this.getCatalog());\n  }\n\n  dispose(): void {\n    this.#jsonWatcher?.dispose();\n    this.#notification?.dispose();\n  }\n\n  public getCatalog(): ApplicationCatalog {\n    return this.catalog;\n  }\n\n  public getModels(): ModelInfo[] {\n    return this.catalog.models;\n  }\n\n  public getModelById(modelId: string): ModelInfo {\n    const model = this.getModels().find(m => modelId === m.id);\n    if (!model) {\n      throw new Error(`No model found having id ${modelId}`);\n    }\n    return model;\n  }\n\n  public getModelByName(modelName: string): ModelInfo {\n    const model = this.getModels().find(m => modelName === m.name);\n    if (!model) {\n      throw new Error(`No model found having name ${modelName}`);\n    }\n    return model;\n  }\n\n  public getRecipes(): Recipe[] {\n    return this.catalog.recipes;\n  }\n\n  public getRecipeById(recipeId: string): Recipe {\n    const recipe = this.getRecipes().find(r => recipeId === r.id);\n    if (!recipe) {\n      throw new Error(`No recipe found having id ${recipeId}`);\n    }\n    return recipe;\n  }\n\n  /**\n   * This method is used to imports user's local models.\n   * @param localModels the models to imports\n   */\n  async importUserModels(localModels: LocalModelImportInfo[]): Promise<void> {\n    const userCatalogPath = this.getUserCatalogPath();\n    let content: ApplicationCatalog;\n\n    // check if we already have an existing user's catalog\n    if (fs.existsSync(userCatalogPath)) {\n      const raw = await promises.readFile(userCatalogPath, 'utf-8');\n      content = sanitize(JSON.parse(raw));\n    } else {\n      content = {\n        version: CatalogFormat.CURRENT,\n        recipes: [],\n        models: [],\n        categories: [],\n      };\n    }\n\n    // Transform local models into ModelInfo\n    const models: ModelInfo[] = await Promise.all(\n      localModels.map(async local => {\n        const statFile = await promises.stat(local.path);\n        const sha256 = crypto.createHash('sha256').update(local.path).digest('hex');\n        return {\n          id: sha256,\n          name: local.name,\n          description: `Model imported from ${local.path}`,\n          file: {\n            path: path.dirname(local.path),\n            file: path.basename(local.path),\n            size: statFile.size,\n            creation: statFile.mtime,\n          },\n          memory: statFile.size,\n          backend: local.backend ?? InferenceType.NONE,\n        };\n      }),\n    );\n\n    // Add all our models infos to the user's models catalog\n    content.models.push(...models);\n\n    // ensure parent directory exists\n    await promises.mkdir(path.dirname(userCatalogPath), { recursive: true });\n\n    // overwrite the existing catalog\n    return promises.writeFile(userCatalogPath, JSON.stringify(content, undefined, 2), 'utf-8');\n  }\n\n  /**\n   * Remove a model from the user's catalog.\n   * @param modelId\n   */\n  async removeUserModel(modelId: string): Promise<void> {\n    const userCatalogPath = this.getUserCatalogPath();\n    if (!fs.existsSync(userCatalogPath)) {\n      throw new Error('User catalog does not exist.');\n    }\n\n    const raw = await promises.readFile(userCatalogPath, 'utf-8');\n    const content = sanitize(JSON.parse(raw));\n\n    return promises.writeFile(\n      userCatalogPath,\n      JSON.stringify(\n        {\n          version: content.version,\n          recipes: content.recipes,\n          models: content.models.filter(model => model.id !== modelId),\n          categories: content.categories,\n        },\n        undefined,\n        2,\n      ),\n      'utf-8',\n    );\n  }\n\n  /**\n   * Return the path to the user catalog\n   */\n  private getUserCatalogPath(): string {\n    return path.resolve(this.appUserDirectory, USER_CATALOG);\n  }\n\n  public filterRecipes(filters: RecipeFilters): FilterRecipesResult {\n    let result = this.getRecipes();\n    for (const [filter, values] of Object.entries(filters)) {\n      switch (filter) {\n        case 'languages': {\n          let res: Recipe[] = [];\n          for (const value of values) {\n            res = [...res, ...result.filter(r => r.languages?.includes(value))];\n          }\n          result = res;\n          break;\n        }\n        case 'tools':\n          result = result.filter(r => values.includes(r.backend ?? ''));\n          break;\n        case 'frameworks': {\n          let res: Recipe[] = [];\n          for (const value of values) {\n            res = [...res, ...result.filter(r => r.frameworks?.includes(value))];\n          }\n          result = res;\n          break;\n        }\n      }\n    }\n    const choices: RecipeChoices = {};\n    if ('languages' in filters) {\n      const subfilters = structuredClone(filters);\n      delete subfilters.languages;\n      choices.languages = this.filterRecipes(subfilters).choices.languages;\n    } else {\n      choices.languages = result\n        .flatMap(r => r.languages)\n        .filter(l => l !== undefined)\n        .filter((value, index, array) => array.indexOf(value) === index)\n        .sort((a, b) => a.localeCompare(b))\n        .map(l => ({\n          name: l,\n          count: result.filter(r => r.languages?.includes(l)).length,\n        }));\n    }\n\n    if ('tools' in filters) {\n      const subfilters = structuredClone(filters);\n      delete subfilters.tools;\n      choices.tools = this.filterRecipes(subfilters).choices.tools;\n    } else {\n      choices.tools = result\n        .map(r => r.backend)\n        .filter(b => b !== undefined)\n        .filter((value, index, array) => array.indexOf(value) === index)\n        .sort((a, b) => a.localeCompare(b))\n        .map(t => ({\n          name: t,\n          count: result.filter(r => r.backend === t).length,\n        }));\n    }\n\n    if ('frameworks' in filters) {\n      const subfilters = structuredClone(filters);\n      delete subfilters.frameworks;\n      choices.frameworks = this.filterRecipes(subfilters).choices.frameworks;\n    } else {\n      choices.frameworks = result\n        .flatMap(r => r.frameworks)\n        .filter(f => f !== undefined)\n        .filter((value, index, array) => array.indexOf(value) === index)\n        .sort((a, b) => a.localeCompare(b))\n        .map(f => ({\n          name: f,\n          count: result.filter(r => r.frameworks?.includes(f)).length,\n        }));\n    }\n    return {\n      filters,\n      choices,\n      result,\n    };\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/gitManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { describe, expect, test, vi, beforeEach } from 'vitest';\nimport { GitManager } from './gitManager';\nimport { statSync, existsSync, mkdirSync, type Stats, rmSync } from 'node:fs';\nimport { window } from '@podman-desktop/api';\nimport type { ReadCommitResult } from 'isomorphic-git';\nimport git from 'isomorphic-git';\n\nvi.mock('isomorphic-git', () => {\n  return {\n    default: {\n      clone: vi.fn(),\n      currentBranch: vi.fn(),\n      log: vi.fn(),\n      resolveRef: vi.fn(),\n      fetch: vi.fn(),\n      getConfig: vi.fn(),\n      statusMatrix: vi.fn(),\n    },\n  };\n});\n\nvi.mock('node:fs');\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    window: {\n      showWarningMessage: vi.fn(),\n    },\n  };\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(git.resolveRef).mockResolvedValue('dummyCommit');\n});\n\ndescribe('cloneRepository', () => {\n  const gitmanager = new GitManager();\n  test('clone and checkout if ref is specified', async () => {\n    await gitmanager.cloneRepository({\n      repository: 'repo',\n      targetDirectory: 'target',\n      ref: '000',\n    });\n    expect(git.clone).toBeCalledWith({\n      fs: expect.anything(),\n      http: expect.anything(),\n      url: 'repo',\n      dir: 'target',\n      ref: '000',\n      singleBranch: true,\n      depth: 1,\n    });\n  });\n  test('clone and checkout if ref is NOT specified', async () => {\n    await gitmanager.cloneRepository({\n      repository: 'repo',\n      targetDirectory: 'target',\n    });\n    expect(git.clone).toBeCalledWith({\n      fs: expect.anything(),\n      http: expect.anything(),\n      url: 'repo',\n      dir: 'target',\n      ref: undefined,\n      singleBranch: true,\n      depth: 1,\n    });\n  });\n});\n\ndescribe('processCheckout', () => {\n  test('first install no existing folder', async () => {\n    vi.mocked(existsSync).mockReturnValue(false);\n\n    await new GitManager().processCheckout({\n      repository: 'repo',\n      targetDirectory: 'target',\n      ref: '000',\n    });\n\n    expect(existsSync).toHaveBeenCalledWith('target');\n    expect(mkdirSync).toHaveBeenCalledWith('target', { recursive: true });\n    expect(git.clone).toBeCalledWith({\n      fs: expect.anything(),\n      http: expect.anything(),\n      url: 'repo',\n      dir: 'target',\n      ref: '000',\n      singleBranch: true,\n      depth: 1,\n    });\n  });\n\n  test('existing folder valid', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'isRepositoryUpToDate').mockResolvedValue({ ok: true });\n\n    await gitmanager.processCheckout({\n      repository: 'repo',\n      targetDirectory: 'target',\n      ref: '000',\n    });\n\n    expect(gitmanager.isRepositoryUpToDate).toHaveBeenCalled();\n    expect(existsSync).toHaveBeenCalledWith('target');\n    expect(statSync).toHaveBeenCalledWith('target');\n\n    expect(mkdirSync).not.toHaveBeenCalled();\n    expect(git.clone).not.toHaveBeenCalled();\n  });\n\n  test('existing folder detached and user cancel', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(window.showWarningMessage).mockResolvedValue('Cancel');\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'isRepositoryUpToDate').mockResolvedValue({ ok: false, updatable: false });\n\n    await expect(\n      gitmanager.processCheckout({\n        repository: 'repo',\n        targetDirectory: 'target',\n        ref: '000',\n      }),\n    ).rejects.toThrowError('Cancelled');\n  });\n\n  test('existing folder not-updatable and user continue', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(window.showWarningMessage).mockResolvedValue('Continue');\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'isRepositoryUpToDate').mockResolvedValue({ ok: false, updatable: false });\n\n    await gitmanager.processCheckout({\n      repository: 'repo',\n      targetDirectory: 'target',\n      ref: '000',\n    });\n\n    expect(rmSync).not.toHaveBeenCalled();\n    expect(mkdirSync).not.toHaveBeenCalled();\n    expect(git.clone).not.toHaveBeenCalled();\n  });\n\n  test('existing folder not-updatable and user reset', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(window.showWarningMessage).mockResolvedValue('Reset');\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'isRepositoryUpToDate').mockResolvedValue({ ok: false, updatable: false });\n\n    await gitmanager.processCheckout({\n      repository: 'repo',\n      targetDirectory: 'target',\n      ref: '000',\n    });\n\n    expect(window.showWarningMessage).toHaveBeenCalledWith(expect.anything(), 'Cancel', 'Continue', 'Reset');\n    expect(rmSync).toHaveBeenCalledWith('target', { recursive: true });\n  });\n\n  test('existing folder updatable and user update', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(window.showWarningMessage).mockResolvedValue('Update');\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'isRepositoryUpToDate').mockResolvedValue({ ok: false, updatable: true });\n    vi.spyOn(gitmanager, 'pull').mockResolvedValue(undefined);\n\n    await gitmanager.processCheckout({\n      repository: 'repo',\n      targetDirectory: 'target',\n      ref: '000',\n    });\n\n    expect(window.showWarningMessage).toHaveBeenCalledWith(expect.anything(), 'Cancel', 'Continue', 'Update');\n    expect(rmSync).not.toHaveBeenCalled();\n    expect(gitmanager.pull).toHaveBeenCalled();\n  });\n});\n\ndescribe('isRepositoryUpToDate', () => {\n  test('no remote defined', async () => {\n    const gitmanager = new GitManager();\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'other-repo',\n      },\n    ]);\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe(\n      'The local repository does not have remote repo configured. Remotes: origin other-repo (fetch)',\n    );\n  });\n\n  test('detached invalid without ref', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.mocked(git.currentBranch).mockResolvedValue(undefined);\n\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe('The local repository is detached.');\n  });\n\n  test('detached invalid with invalid ref', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.spyOn(gitmanager, 'getTagCommitId').mockResolvedValue(undefined); // ref is not a tag\n    vi.mocked(git.currentBranch).mockResolvedValue(undefined);\n\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'invalidRef');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe('The local repository is detached. HEAD is dummyCommit expected invalidRef.');\n  });\n\n  test('detached invalid with expected ref', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.mocked(git.statusMatrix).mockResolvedValue([['a', 1, 1, 1]]);\n    vi.mocked(git.currentBranch).mockResolvedValue(undefined);\n\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'dummyCommit');\n    expect(result.ok).toBeTruthy();\n    expect(result.error).toBeUndefined();\n  });\n\n  test('detached with expected ref and modified files', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.mocked(git.statusMatrix).mockResolvedValue([\n      ['a', 1, 1, 1],\n      ['a_file', 1, 2, 1],\n    ]);\n\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'dummyCommit');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe('The local repository has modified files.');\n  });\n\n  test('detached with expected ref and deleted files', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.mocked(git.statusMatrix).mockResolvedValue([\n      ['a', 1, 1, 1],\n      ['a_file', 1, 0, 1],\n    ]);\n\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'dummyCommit');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe('The local repository has deleted files.');\n  });\n\n  test('detached with expected ref and created files', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.mocked(git.statusMatrix).mockResolvedValue([\n      ['a', 1, 1, 1],\n      ['a_file', 0, 2, 2],\n    ]);\n\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'dummyCommit');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe('The local repository has created files.');\n  });\n\n  test('detached with expected ref and repository is not clean', async () => {\n    vi.mocked(existsSync).mockReturnValue(true);\n    vi.mocked(statSync).mockReturnValue({\n      isDirectory: () => true,\n    } as unknown as Stats);\n\n    const gitmanager = new GitManager();\n\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.spyOn(gitmanager, 'getRepositoryStatus').mockResolvedValue({\n      modified: [],\n      created: [],\n      deleted: [],\n      clean: false,\n    });\n\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'dummyCommit');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe('The local repository is not clean.');\n  });\n\n  test('using main branch and no local change', async () => {\n    const gitmanager = new GitManager();\n    vi.mocked(git.currentBranch).mockResolvedValue('main');\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.spyOn(gitmanager, 'getTrackingBranch').mockResolvedValue('origin/main');\n    vi.spyOn(gitmanager, 'getBehindAhead').mockResolvedValue({ behind: 0, ahead: 0 });\n    vi.spyOn(gitmanager, 'getRepositoryStatus').mockResolvedValue({\n      modified: [],\n      created: [],\n      deleted: [],\n      clean: true,\n    });\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'main');\n    expect(result.ok).toBeTruthy();\n  });\n\n  test('using main branch and tracking wrong branch', async () => {\n    const gitmanager = new GitManager();\n    vi.mocked(git.currentBranch).mockResolvedValue('main');\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.spyOn(gitmanager, 'getTrackingBranch').mockResolvedValue('origin/other-branch');\n    vi.spyOn(gitmanager, 'getBehindAhead').mockResolvedValue({ behind: 0, ahead: 0 });\n    vi.spyOn(gitmanager, 'getRepositoryStatus').mockResolvedValue({\n      modified: [],\n      created: [],\n      deleted: [],\n      clean: true,\n    });\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'main');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe(\n      'The local repository is not tracking the right branch. (tracking origin/other-branch when expected main)',\n    );\n  });\n\n  test('using main branch and ahead', async () => {\n    const gitmanager = new GitManager();\n    vi.mocked(git.currentBranch).mockResolvedValue('main');\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.spyOn(gitmanager, 'getTrackingBranch').mockResolvedValue('origin/main');\n    vi.spyOn(gitmanager, 'getBehindAhead').mockResolvedValue({ behind: 1, ahead: 2 });\n    vi.spyOn(gitmanager, 'getRepositoryStatus').mockResolvedValue({\n      modified: [],\n      created: [],\n      deleted: [],\n      clean: true,\n    });\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'main');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe('The local repository has 2 commit(s) ahead.');\n  });\n\n  test('using main branch and behind', async () => {\n    const gitmanager = new GitManager();\n    vi.mocked(git.currentBranch).mockResolvedValue('main');\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.spyOn(gitmanager, 'getTrackingBranch').mockResolvedValue('origin/main');\n    vi.spyOn(gitmanager, 'getBehindAhead').mockResolvedValue({ behind: 1, ahead: 0 });\n    vi.spyOn(gitmanager, 'getRepositoryStatus').mockResolvedValue({\n      modified: [],\n      created: [],\n      deleted: [],\n      clean: true,\n    });\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'main');\n    expect(result.ok).toBeTruthy();\n    expect(result.updatable).toBeTruthy();\n  });\n\n  test('using main branch and modified files', async () => {\n    const gitmanager = new GitManager();\n    vi.mocked(git.currentBranch).mockResolvedValue('main');\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.spyOn(gitmanager, 'getTrackingBranch').mockResolvedValue('origin/main');\n    vi.spyOn(gitmanager, 'getBehindAhead').mockResolvedValue({ behind: 0, ahead: 0 });\n    vi.spyOn(gitmanager, 'getRepositoryStatus').mockResolvedValue({\n      modified: ['a_modified_file.txt'],\n      created: [],\n      deleted: [],\n      clean: true,\n    });\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'main');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe('The local repository has modified files.');\n  });\n\n  test('using main branch and deleted files', async () => {\n    const gitmanager = new GitManager();\n    vi.mocked(git.currentBranch).mockResolvedValue('main');\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.spyOn(gitmanager, 'getTrackingBranch').mockResolvedValue('origin/main');\n    vi.spyOn(gitmanager, 'getBehindAhead').mockResolvedValue({ behind: 0, ahead: 0 });\n    vi.spyOn(gitmanager, 'getRepositoryStatus').mockResolvedValue({\n      modified: [],\n      created: [],\n      deleted: ['a_deleted_file.txt'],\n      clean: true,\n    });\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'main');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe('The local repository has deleted files.');\n  });\n\n  test('using main branch and created files', async () => {\n    const gitmanager = new GitManager();\n    vi.mocked(git.currentBranch).mockResolvedValue('main');\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.spyOn(gitmanager, 'getTrackingBranch').mockResolvedValue('origin/main');\n    vi.spyOn(gitmanager, 'getBehindAhead').mockResolvedValue({ behind: 0, ahead: 0 });\n    vi.spyOn(gitmanager, 'getRepositoryStatus').mockResolvedValue({\n      modified: [],\n      created: ['a_created_file.txt'],\n      deleted: [],\n      clean: true,\n    });\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'main');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe('The local repository has created files.');\n  });\n\n  test('using main branch and repository is not clean', async () => {\n    const gitmanager = new GitManager();\n    vi.mocked(git.currentBranch).mockResolvedValue('main');\n    vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n      {\n        remote: 'origin',\n        url: 'repo',\n      },\n    ]);\n    vi.spyOn(gitmanager, 'getTrackingBranch').mockResolvedValue('origin/main');\n    vi.spyOn(gitmanager, 'getBehindAhead').mockResolvedValue({ behind: 0, ahead: 0 });\n    vi.spyOn(gitmanager, 'getRepositoryStatus').mockResolvedValue({\n      modified: [],\n      created: [],\n      deleted: [],\n      clean: false,\n    });\n    const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'main');\n    expect(result.ok).toBeFalsy();\n    expect(result.error).toBe('The local repository is not clean.');\n  });\n});\n\ntest('using tag and no local change', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(statSync).mockReturnValue({\n    isDirectory: () => true,\n  } as unknown as Stats);\n\n  const gitmanager = new GitManager();\n\n  vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n    {\n      remote: 'origin',\n      url: 'repo',\n    },\n  ]);\n  vi.spyOn(gitmanager, 'getTagCommitId').mockResolvedValue('dummyCommit'); // ref is a tag and points to commit\n  vi.spyOn(gitmanager, 'getRepositoryStatus').mockResolvedValue({\n    modified: [],\n    created: [],\n    deleted: [],\n    clean: true,\n  });\n  vi.mocked(git.currentBranch).mockResolvedValue(undefined);\n\n  const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'v1.0.0');\n  expect(result.ok).toBeTruthy();\n});\n\ntest('using wrong tag', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(statSync).mockReturnValue({\n    isDirectory: () => true,\n  } as unknown as Stats);\n\n  const gitmanager = new GitManager();\n\n  vi.spyOn(gitmanager, 'getRepositoryRemotes').mockResolvedValue([\n    {\n      remote: 'origin',\n      url: 'repo',\n    },\n  ]);\n  vi.spyOn(gitmanager, 'getTagCommitId').mockResolvedValue('otherCommit'); // ref is a tag and points to commit\n  vi.spyOn(gitmanager, 'getRepositoryStatus').mockResolvedValue({\n    modified: [],\n    created: [],\n    deleted: [],\n    clean: true,\n  });\n  vi.mocked(git.currentBranch).mockResolvedValue(undefined);\n\n  const result = await gitmanager.isRepositoryUpToDate('target', 'repo', 'v1.0.0');\n  expect(result.ok).toBeFalsy();\n  expect(result.error).toBe('The local repository is detached. HEAD is dummyCommit expected otherCommit.');\n});\n\ntest('getBehindAhead', async () => {\n  const gitmanager = new GitManager();\n\n  vi.mocked(git.log).mockImplementation(async ({ ref }: { ref?: string }) => {\n    return new Promise(resolve => {\n      if (ref === 'main') {\n        resolve([\n          {\n            oid: '1',\n          },\n          {\n            oid: '6',\n          },\n          {\n            oid: '2',\n          },\n          {\n            oid: '3',\n          },\n        ] as ReadCommitResult[]);\n      } else if (ref === 'origin/main') {\n        resolve([\n          {\n            oid: '1',\n          },\n          {\n            oid: '4',\n          },\n          {\n            oid: '2',\n          },\n          {\n            oid: '5',\n          },\n          {\n            oid: '3',\n          },\n        ] as ReadCommitResult[]);\n      } else {\n        resolve([]);\n      }\n    });\n  });\n  vi.spyOn(gitmanager, 'getTrackingBranch').mockResolvedValue('origin/main');\n  const { behind, ahead } = await gitmanager.getBehindAhead('path/to/repo', 'main');\n  expect(behind).toEqual(2);\n  expect(ahead).toEqual(1);\n});\n\ntest('getTrackingBranch', async () => {\n  const gitmanager = new GitManager();\n  vi.mocked(git.getConfig).mockImplementation(async ({ path }: { path: string }): Promise<string> => {\n    if (path === 'branch.my-branch.remote') {\n      return 'origin';\n    } else if (path === 'branch.my-branch.merge') {\n      return 'refs/heads/my-remote-branch';\n    }\n    throw new Error('should never been reached');\n  });\n  const result = await gitmanager.getTrackingBranch('path/to/repository', 'my-branch');\n  expect(result).toEqual('origin/my-remote-branch');\n});\n"
  },
  {
    "path": "packages/backend/src/managers/gitManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { window } from '@podman-desktop/api';\nimport fs, { statSync, existsSync, mkdirSync, rmSync } from 'node:fs';\nimport git from 'isomorphic-git';\nimport http from 'isomorphic-git/http/node';\n\nexport interface GitCloneInfo {\n  repository: string;\n  ref?: string;\n  targetDirectory: string;\n}\n\nexport class GitManager {\n  async cloneRepository(gitCloneInfo: GitCloneInfo): Promise<void> {\n    // clone repo\n    await git.clone({\n      fs,\n      http,\n      dir: gitCloneInfo.targetDirectory,\n      url: gitCloneInfo.repository,\n      ref: gitCloneInfo.ref,\n      singleBranch: true,\n      depth: 1,\n    });\n  }\n\n  async getRepositoryRemotes(directory: string): Promise<\n    {\n      remote: string;\n      url: string;\n    }[]\n  > {\n    return git.listRemotes({ fs, dir: directory });\n  }\n\n  /* see https://isomorphic-git.org/docs/en/statusMatrix\n   *\n   * - The HEAD status is either absent (0) or present (1).\n   * - The WORKDIR status is either absent (0), identical to HEAD (1), or different from HEAD (2).\n   * - The STAGE status is either absent (0), identical to HEAD (1), identical to WORKDIR (2), or different from WORKDIR (3).\n   *\n   * // example StatusMatrix\n   * [\n   *   [\"a.txt\", 0, 2, 0], // new, untracked\n   *   [\"b.txt\", 0, 2, 2], // added, staged\n   *   [\"c.txt\", 0, 2, 3], // added, staged, with unstaged changes\n   *   [\"d.txt\", 1, 1, 1], // unmodified\n   *   [\"e.txt\", 1, 2, 1], // modified, unstaged\n   *   [\"f.txt\", 1, 2, 2], // modified, staged\n   *   [\"g.txt\", 1, 2, 3], // modified, staged, with unstaged changes\n   *   [\"h.txt\", 1, 0, 1], // deleted, unstaged\n   *   [\"i.txt\", 1, 0, 0], // deleted, staged\n   *   [\"j.txt\", 1, 2, 0], // deleted, staged, with unstaged-modified changes (new file of the same name)\n   *   [\"k.txt\", 1, 1, 0], // deleted, staged, with unstaged changes (new file of the same name)\n   * ]\n   */\n  async getRepositoryStatus(directory: string): Promise<{\n    modified: string[];\n    created: string[];\n    deleted: string[];\n    clean: boolean;\n  }> {\n    const status = await git.statusMatrix({\n      fs,\n      dir: directory,\n    });\n\n    const FILE = 0,\n      HEAD = 1,\n      WORKDIR = 2,\n      STAGE = 3;\n\n    const created = status.filter(row => row[HEAD] === 0 && row[WORKDIR] === 2).map(row => row[FILE]);\n\n    const deleted = status\n      .filter(row => row[HEAD] === 1 && (row[WORKDIR] === 0 || row[STAGE] === 0))\n      .map(row => row[FILE]);\n\n    const modified = status.filter(row => row[HEAD] === 1 && row[WORKDIR] === 2).map(row => row[FILE]);\n\n    const notClean = status.filter(row => row[HEAD] !== 1 || row[WORKDIR] !== 1 || row[STAGE] !== 1);\n\n    return {\n      modified,\n      created,\n      deleted,\n      clean: notClean.length === 0,\n    };\n  }\n\n  async getCurrentCommit(directory: string): Promise<string> {\n    return git.resolveRef({ fs, dir: directory, ref: 'HEAD' });\n  }\n\n  async pull(directory: string): Promise<void> {\n    return git.pull({\n      fs,\n      http,\n      dir: directory,\n    });\n  }\n\n  async processCheckout(gitCloneInfo: GitCloneInfo): Promise<void> {\n    // Check for existing cloned repository\n    if (existsSync(gitCloneInfo.targetDirectory) && statSync(gitCloneInfo.targetDirectory).isDirectory()) {\n      const result = await this.isRepositoryUpToDate(\n        gitCloneInfo.targetDirectory,\n        gitCloneInfo.repository,\n        gitCloneInfo.ref,\n      );\n\n      if (result.ok) {\n        return;\n      }\n\n      const error = `The repository \"${gitCloneInfo.repository}\" appears to have already been cloned and does not match the expected configuration: ${result.error}`;\n\n      // Ask user\n      const selected = await window.showWarningMessage(\n        `${error} By continuing, the AI application may not run as expected. `,\n        'Cancel',\n        'Continue',\n        result.updatable ? 'Update' : 'Reset',\n      );\n\n      switch (selected) {\n        case undefined:\n        case 'Cancel':\n          throw new Error('Cancelled');\n        case 'Continue':\n          return;\n        case 'Update':\n          await this.pull(gitCloneInfo.targetDirectory);\n          return;\n        case 'Reset':\n          rmSync(gitCloneInfo.targetDirectory, { recursive: true });\n          break;\n      }\n    }\n\n    // Create folder\n    mkdirSync(gitCloneInfo.targetDirectory, { recursive: true });\n\n    // Clone the repository\n    console.log(`Cloning repository ${gitCloneInfo.repository} in ${gitCloneInfo.targetDirectory}.`);\n    await this.cloneRepository(gitCloneInfo);\n  }\n\n  async isRepositoryUpToDate(\n    directory: string,\n    origin: string,\n    ref?: string,\n  ): Promise<{ ok?: boolean; updatable?: boolean; error?: string }> {\n    // fetch updates\n    await git.fetch({\n      fs,\n      http,\n      dir: directory,\n    });\n\n    const remotes = await this.getRepositoryRemotes(directory);\n\n    if (!remotes.some(remote => remote.url === origin)) {\n      return {\n        error: `The local repository does not have remote ${origin} configured. Remotes: ${remotes\n          .map(remote => `${remote.remote} ${remote.url} (fetch)`)\n          .join(',')}`,\n      };\n    }\n\n    const branch = await git.currentBranch({\n      fs,\n      dir: directory,\n    });\n\n    if (!branch) {\n      // when the repository is detached\n      if (ref === undefined) {\n        return { error: 'The local repository is detached.' };\n      } else {\n        const tag = await this.getTagCommitId(directory, ref);\n        if (tag) {\n          ref = tag;\n        }\n        const commit = await this.getCurrentCommit(directory);\n        if (!commit.startsWith(ref)) {\n          return { error: `The local repository is detached. HEAD is ${commit} expected ${ref}.` };\n        }\n      }\n    }\n\n    if (branch) {\n      const tracking = await this.getTrackingBranch(directory, branch);\n      if (ref && tracking !== `origin/${ref}`) {\n        return {\n          error: `The local repository is not tracking the right branch. (tracking ${tracking} when expected ${ref})`,\n        };\n      }\n\n      const { behind, ahead } = await this.getBehindAhead(directory, branch);\n\n      if (ahead !== 0) {\n        return { error: `The local repository has ${ahead} commit(s) ahead.` };\n      }\n      if (behind !== 0) {\n        return { ok: true, updatable: true };\n      }\n    }\n\n    const status = await this.getRepositoryStatus(directory);\n    if (status.modified.length > 0) {\n      return { error: 'The local repository has modified files.' };\n    } else if (status.created.length > 0) {\n      return { error: 'The local repository has created files.' };\n    } else if (status.deleted.length > 0) {\n      return { error: 'The local repository has deleted files.' };\n    } else if (!status.clean) {\n      return { error: 'The local repository is not clean.' };\n    }\n\n    return { ok: true }; // If none of the error conditions are met\n  }\n\n  async getTrackingBranch(directory: string, branch: string): Promise<string | undefined> {\n    const mergeRef = await git.getConfig({\n      fs,\n      dir: directory,\n      path: `branch.${branch}.merge`,\n    });\n    const remote = await git.getConfig({\n      fs,\n      dir: directory,\n      path: `branch.${branch}.remote`,\n    });\n    return mergeRef && remote ? `${remote}/${mergeRef.replace(/^refs\\/heads\\//, '')}` : undefined;\n  }\n\n  async getBehindAhead(dir: string, localBranch: string): Promise<{ behind: number; ahead: number }> {\n    const remoteBranch = await this.getTrackingBranch(dir, localBranch);\n\n    const remoteCommits = (\n      await git.log({\n        fs,\n        dir,\n        ref: remoteBranch,\n      })\n    )\n      .map(c => c.oid)\n      .sort((a, b) => a.localeCompare(b));\n    const localCommits = (\n      await git.log({\n        fs,\n        dir,\n        ref: localBranch,\n      })\n    )\n      .map(c => c.oid)\n      .sort((a, b) => a.localeCompare(b));\n\n    let behind = 0;\n    let ahead = 0;\n    while (remoteCommits.length && localCommits.length) {\n      const remote = remoteCommits.pop();\n      const local = localCommits.pop();\n      if (!remote || !local) {\n        break;\n      }\n      if (remote === local) {\n        continue;\n      }\n      if (remote > local) {\n        behind++;\n        localCommits.push(local);\n      } else {\n        ahead++;\n        remoteCommits.push(remote);\n      }\n    }\n    return {\n      behind: behind + remoteCommits.length,\n      ahead: ahead + localCommits.length,\n    };\n  }\n\n  async getTagCommitId(directory: string, tagName: string): Promise<string | undefined> {\n    try {\n      return await git.resolveRef({\n        fs,\n        dir: directory,\n        ref: tagName,\n      });\n    } catch {\n      return undefined;\n    }\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/inference/inferenceManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport {\n  containerEngine,\n  type ContainerInfo,\n  type ContainerInspectInfo,\n  type TelemetryLogger,\n} from '@podman-desktop/api';\nimport type { ContainerRegistry } from '../../registries/ContainerRegistry';\nimport type { PodmanConnection } from '../podmanConnection';\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport { InferenceManager } from './inferenceManager';\nimport type { ModelsManager } from '../modelsManager';\nimport { LABEL_INFERENCE_SERVER } from '../../utils/inferenceUtils';\nimport type { InferenceServerConfig } from '@shared/models/InferenceServerConfig';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport type { InferenceProviderRegistry } from '../../registries/InferenceProviderRegistry';\nimport type { InferenceProvider } from '../../workers/provider/InferenceProvider';\nimport type { CatalogManager } from '../catalogManager';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport { InferenceType } from '@shared/models/IInference';\nimport { VMType } from '@shared/models/IPodman';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { MSG_INFERENCE_SERVERS_UPDATE } from '@shared/Messages';\nimport * as randomUtils from '../../utils/randomUtils';\nimport type { Task } from '@shared/models/ITask';\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    containerEngine: {\n      startContainer: vi.fn(),\n      stopContainer: vi.fn(),\n      inspectContainer: vi.fn(),\n      deleteContainer: vi.fn(),\n      listContainers: vi.fn(),\n    },\n    Disposable: {\n      from: vi.fn(),\n      create: vi.fn(),\n    },\n  };\n});\n\nvi.mock('../../utils/randomUtils');\n\nconst rpcExtensionMock = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nconst containerRegistryMock = {\n  onStartContainerEvent: vi.fn(),\n  subscribe: vi.fn(),\n} as unknown as ContainerRegistry;\n\nconst podmanConnectionMock = {\n  onPodmanConnectionEvent: vi.fn(),\n  findRunningContainerProviderConnection: vi.fn(),\n} as unknown as PodmanConnection;\n\nconst modelsManager = {\n  getLocalModelPath: vi.fn(),\n  uploadModelToPodmanMachine: vi.fn(),\n} as unknown as ModelsManager;\n\nconst telemetryMock = {\n  logUsage: vi.fn(),\n  logError: vi.fn(),\n} as unknown as TelemetryLogger;\n\nconst taskRegistryMock = {\n  createTask: vi.fn(),\n  updateTask: vi.fn(),\n  getTasksByLabels: vi.fn(),\n} as unknown as TaskRegistry;\n\nconst inferenceProviderRegistryMock = {\n  getAll: vi.fn(),\n  getByType: vi.fn(),\n  get: vi.fn(),\n} as unknown as InferenceProviderRegistry;\n\nconst catalogManager = {\n  onUpdate: vi.fn(),\n} as unknown as CatalogManager;\n\nconst getInitializedInferenceManager = async (): Promise<InferenceManager> => {\n  const manager = new InferenceManager(\n    rpcExtensionMock,\n    containerRegistryMock,\n    podmanConnectionMock,\n    modelsManager,\n    telemetryMock,\n    taskRegistryMock,\n    inferenceProviderRegistryMock,\n    catalogManager,\n  );\n  manager.init();\n  await vi.waitUntil(manager.isInitialize.bind(manager), {\n    interval: 200,\n    timeout: 2000,\n  });\n  return manager;\n};\n\nconst mockListContainers = (containers: Partial<ContainerInfo>[]): void => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue(containers as unknown as ContainerInfo[]);\n};\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  // Default listContainers is empty\n  mockListContainers([]);\n  vi.mocked(rpcExtensionMock.fire).mockResolvedValue(true);\n  vi.mocked(containerEngine.inspectContainer).mockResolvedValue({\n    State: {\n      Status: 'running',\n      Health: undefined,\n    },\n  } as unknown as ContainerInspectInfo);\n  vi.mocked(podmanConnectionMock.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  vi.mocked(taskRegistryMock.getTasksByLabels).mockReturnValue([]);\n  vi.mocked(modelsManager.getLocalModelPath).mockReturnValue('/local/model.guff');\n  vi.mocked(modelsManager.uploadModelToPodmanMachine).mockResolvedValue('/mnt/path/model.guff');\n});\n\n/**\n * Testing the initialization of the manager\n */\ndescribe('init Inference Manager', () => {\n  test('should be initialized without catalog events', async () => {\n    const manager = new InferenceManager(\n      rpcExtensionMock,\n      containerRegistryMock,\n      podmanConnectionMock,\n      modelsManager,\n      telemetryMock,\n      taskRegistryMock,\n      inferenceProviderRegistryMock,\n      catalogManager,\n    );\n    manager.init();\n    await vi.waitUntil(manager.isInitialize.bind(manager), {\n      interval: 200,\n      timeout: 2000,\n    });\n  });\n\n  test('should have listed containers', async () => {\n    const inferenceManager = await getInitializedInferenceManager();\n\n    expect(inferenceManager.isInitialize()).toBeTruthy();\n    expect(containerEngine.listContainers).toHaveBeenCalled();\n  });\n\n  test('should ignore containers without the proper label', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyId',\n      },\n    ]);\n\n    const inferenceManager = await getInitializedInferenceManager();\n    expect(inferenceManager.getServers().length).toBe(0);\n  });\n\n  test('should have adopted the existing container', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyContainerId',\n        engineId: 'dummyEngineId',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n\n    const inferenceManager = await getInitializedInferenceManager();\n    expect(inferenceManager.getServers()).toStrictEqual([\n      {\n        connection: {\n          port: -1,\n        },\n        container: {\n          containerId: 'dummyContainerId',\n          engineId: 'dummyEngineId',\n        },\n        health: undefined,\n        models: [],\n        status: 'running',\n        type: expect.anything(),\n        labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n  });\n\n  test('should have adopted all existing container with proper label', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyContainerId-1',\n        engineId: 'dummyEngineId-1',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n      {\n        Id: 'dummyContainerId-2',\n        engineId: 'dummyEngineId-2',\n      },\n      {\n        Id: 'dummyContainerId-3',\n        engineId: 'dummyEngineId-3',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n\n    const inferenceManager = await getInitializedInferenceManager();\n    const servers = inferenceManager.getServers();\n    expect(servers.length).toBe(2);\n    expect(servers.some(server => server.container.containerId === 'dummyContainerId-1')).toBeTruthy();\n    expect(servers.some(server => server.container.containerId === 'dummyContainerId-3')).toBeTruthy();\n  });\n});\n\n/**\n * Testing the creation logic\n */\ndescribe('Create Inference Server', () => {\n  test('no provider available should throw an error', async () => {\n    vi.mocked(inferenceProviderRegistryMock.getByType).mockReturnValue([]);\n\n    const inferenceManager = await getInitializedInferenceManager();\n    await expect(\n      inferenceManager.createInferenceServer({\n        inferenceProvider: undefined,\n        labels: {},\n        modelsInfo: [],\n        port: 8888,\n      }),\n    ).rejects.toThrowError('no enabled provider could be found.');\n  });\n\n  test('inference provider provided should use get from InferenceProviderRegistry', async () => {\n    vi.mocked(inferenceProviderRegistryMock.get).mockReturnValue({\n      enabled: () => false,\n    } as unknown as InferenceProvider);\n\n    const inferenceManager = await getInitializedInferenceManager();\n    await expect(\n      inferenceManager.createInferenceServer({\n        inferenceProvider: 'dummy-inference-provider',\n        labels: {},\n        modelsInfo: [],\n        port: 8888,\n      }),\n    ).rejects.toThrowError('provider requested is not enabled.');\n    expect(inferenceProviderRegistryMock.get).toHaveBeenCalledWith('dummy-inference-provider');\n  });\n\n  test('selected inference provider should receive config', async () => {\n    const provider: InferenceProvider = {\n      enabled: () => true,\n      name: 'dummy-inference-provider',\n      dispose: () => {},\n      prePerform: vi.fn().mockReturnValue(Promise.resolve()),\n      perform: vi.fn<() => InferenceServer>().mockResolvedValue({\n        container: {\n          containerId: 'dummy-container-id',\n          engineId: 'dummy-engine-id',\n        },\n        models: [],\n        status: 'running',\n        type: InferenceType.LLAMA_CPP,\n        connection: { port: 0 },\n        labels: {},\n      }),\n    } as unknown as InferenceProvider;\n    vi.mocked(inferenceProviderRegistryMock.get).mockReturnValue(provider);\n\n    const inferenceManager = await getInitializedInferenceManager();\n\n    const config: InferenceServerConfig = {\n      inferenceProvider: 'dummy-inference-provider',\n      labels: {},\n      modelsInfo: [],\n      port: 8888,\n    };\n    const result = await inferenceManager.createInferenceServer(config);\n\n    expect(provider.perform).toHaveBeenCalledWith(config);\n\n    expect(result).toBe('dummy-container-id');\n  });\n});\n\n/**\n * Testing the starting logic\n */\ndescribe('Start Inference Server', () => {\n  test('containerId unknown', async () => {\n    const inferenceManager = await getInitializedInferenceManager();\n    await expect(inferenceManager.startInferenceServer('unknownContainerId')).rejects.toThrowError(\n      'cannot find a corresponding server for container id unknownContainerId.',\n    );\n  });\n\n  test('valid containerId', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyId',\n        engineId: 'dummyEngineId',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n    const inferenceManager = await getInitializedInferenceManager();\n    await inferenceManager.startInferenceServer('dummyId');\n\n    expect(containerEngine.startContainer).toHaveBeenCalledWith('dummyEngineId', 'dummyId');\n\n    const servers = inferenceManager.getServers();\n    expect(servers.length).toBe(1);\n    expect(servers[0].status).toBe('running');\n  });\n});\n\n/**\n * Testing the stopping logic\n */\ndescribe('Stop Inference Server', () => {\n  test('containerId unknown', async () => {\n    const inferenceManager = await getInitializedInferenceManager();\n    await expect(inferenceManager.stopInferenceServer('unknownContainerId')).rejects.toThrowError(\n      'cannot find a corresponding server for container id unknownContainerId.',\n    );\n  });\n\n  test('valid containerId', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyId',\n        engineId: 'dummyEngineId',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n    const inferenceManager = await getInitializedInferenceManager();\n    await inferenceManager.stopInferenceServer('dummyId');\n\n    expect(containerEngine.stopContainer).toHaveBeenCalledWith('dummyEngineId', 'dummyId');\n\n    const servers = inferenceManager.getServers();\n    expect(servers.length).toBe(1);\n    expect(servers[0].status).toBe('stopped');\n  });\n});\n\ndescribe('Delete Inference Server', () => {\n  test('containerId unknown', async () => {\n    const inferenceManager = await getInitializedInferenceManager();\n    await expect(inferenceManager.deleteInferenceServer('unknownContainerId')).rejects.toThrowError(\n      'cannot find a corresponding server for container id unknownContainerId.',\n    );\n  });\n\n  test('valid running containerId', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyId',\n        engineId: 'dummyEngineId',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n    const inferenceManager = await getInitializedInferenceManager();\n    await inferenceManager.deleteInferenceServer('dummyId');\n\n    expect(containerEngine.stopContainer).toHaveBeenCalledWith('dummyEngineId', 'dummyId');\n    expect(containerEngine.deleteContainer).toHaveBeenCalledWith('dummyEngineId', 'dummyId');\n\n    const servers = inferenceManager.getServers();\n    expect(servers.length).toBe(0);\n  });\n\n  test('valid stopped containerId', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyId',\n        engineId: 'dummyEngineId',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n    vi.mocked(containerEngine.inspectContainer).mockResolvedValue({\n      State: {\n        Status: 'stopped',\n        Health: undefined,\n      },\n    } as unknown as ContainerInspectInfo);\n\n    const inferenceManager = await getInitializedInferenceManager();\n    await inferenceManager.deleteInferenceServer('dummyId');\n\n    expect(containerEngine.stopContainer).not.toHaveBeenCalled();\n    expect(containerEngine.deleteContainer).toHaveBeenCalledWith('dummyEngineId', 'dummyId');\n\n    const servers = inferenceManager.getServers();\n    expect(servers.length).toBe(0);\n  });\n});\n\ndescribe('Request Create Inference Server', () => {\n  beforeEach(() => {\n    vi.mocked(randomUtils.getRandomString).mockReturnValue('random123');\n  });\n\n  test('Should return unique string identifier', async () => {\n    const inferenceManager = await getInitializedInferenceManager();\n    const identifier = inferenceManager.requestCreateInferenceServer({\n      port: 8888,\n      providerId: 'test@providerId',\n      image: 'quay.io/bootsy/playground:v0',\n      modelsInfo: [\n        {\n          id: 'dummyModelId',\n          file: {\n            file: 'dummyFile',\n            path: 'dummyPath',\n          },\n        },\n      ],\n    } as unknown as InferenceServerConfig);\n    expect(identifier).toBeDefined();\n    expect(typeof identifier).toBe('string');\n  });\n\n  test('Task registry should have tasks matching unique identifier provided', async () => {\n    const inferenceManager = await getInitializedInferenceManager();\n    const identifier = inferenceManager.requestCreateInferenceServer({\n      port: 8888,\n      providerId: 'test@providerId',\n      image: 'quay.io/bootsy/playground:v0',\n      modelsInfo: [\n        {\n          id: 'dummyModelId',\n          file: {\n            file: 'dummyFile',\n            path: 'dummyPath',\n          },\n        },\n      ],\n    } as unknown as InferenceServerConfig);\n\n    expect(taskRegistryMock.createTask).toHaveBeenNthCalledWith(1, 'Creating Inference server', 'loading', {\n      trackingId: identifier,\n    });\n  });\n\n  test('all children tasks should be set as error when one fails', async () => {\n    const inferenceManager = await getInitializedInferenceManager();\n    vi.mocked(taskRegistryMock.createTask).mockReturnValue({\n      id: 'task1',\n      name: 'Task 1',\n      state: 'loading',\n    });\n    vi.spyOn(inferenceManager, 'createInferenceServer');\n    const otherTasks: Task[] = [\n      {\n        id: 'subtask1',\n        name: 'Sub task 1',\n        state: 'loading',\n      },\n      {\n        id: 'subtask2',\n        name: 'Sub task 2',\n        state: 'loading',\n      },\n      {\n        id: 'subtask3',\n        name: 'Sub task 3',\n        state: 'error',\n      },\n    ];\n    vi.mocked(taskRegistryMock.getTasksByLabels).mockReturnValue(otherTasks);\n    vi.mocked(inferenceManager.createInferenceServer).mockRejectedValue('an error');\n    inferenceManager.requestCreateInferenceServer({\n      port: 8888,\n      providerId: 'test@providerId',\n      image: 'quay.io/bootsy/playground:v0',\n      modelsInfo: [\n        {\n          id: 'dummyModelId',\n          file: {\n            file: 'dummyFile',\n            path: 'dummyPath',\n          },\n        },\n      ],\n    } as unknown as InferenceServerConfig);\n    await vi.waitFor(() => {\n      expect(taskRegistryMock.updateTask).toHaveBeenCalledTimes(3);\n    });\n    expect(taskRegistryMock.updateTask).toHaveBeenNthCalledWith(1, { ...otherTasks[0], state: 'error' });\n    expect(taskRegistryMock.updateTask).toHaveBeenNthCalledWith(2, { ...otherTasks[1], state: 'error' });\n    expect(taskRegistryMock.updateTask).toHaveBeenNthCalledWith(3, {\n      error: 'Something went wrong while trying to create an inference server an error.',\n      id: 'task1',\n      name: 'Task 1',\n      state: 'error',\n    });\n  });\n});\n\ndescribe('containerRegistry events', () => {\n  test('container die event', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyId',\n        engineId: 'dummyEngineId',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n    const disposableMock = vi.fn();\n    const deferred = new Promise<(status: string) => void>((resolve, reject) => {\n      vi.mocked(containerRegistryMock.subscribe).mockImplementation((containerId, listener) => {\n        if (containerId !== 'dummyId') reject(new Error('invalid container id'));\n        else resolve(listener);\n        return {\n          dispose: disposableMock,\n        };\n      });\n    });\n\n    const inferenceManager = await getInitializedInferenceManager();\n    const listener = await deferred;\n\n    const server = inferenceManager.get('dummyId');\n    expect(server?.status).toBe('running');\n    expect(containerEngine.inspectContainer).toHaveBeenCalledOnce();\n\n    vi.mocked(containerEngine.inspectContainer).mockResolvedValue({\n      State: {\n        Status: 'stopped',\n        Health: undefined,\n      },\n    } as unknown as ContainerInspectInfo);\n\n    listener('die');\n\n    await vi.waitFor(() => {\n      expect(inferenceManager.get('dummyId')?.status).toBe('stopped');\n      expect(containerEngine.inspectContainer).toHaveBeenCalledTimes(2);\n    });\n\n    // we should not have disposed the subscriber, as the container is only stopped, not removed\n    expect(disposableMock).not.toHaveBeenCalled();\n  });\n\n  test('container remove event', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyId',\n        engineId: 'dummyEngineId',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n    const disposableMock = vi.fn();\n    const deferred = new Promise<(status: string) => void>((resolve, reject) => {\n      vi.mocked(containerRegistryMock.subscribe).mockImplementation((containerId, listener) => {\n        if (containerId !== 'dummyId') reject(new Error('invalid container id'));\n        else resolve(listener);\n        return {\n          dispose: disposableMock,\n        };\n      });\n    });\n\n    const inferenceManager = await getInitializedInferenceManager();\n    const listener = await deferred;\n\n    const server = inferenceManager.get('dummyId');\n    expect(server?.status).toBe('running');\n\n    listener('remove');\n\n    await vi.waitFor(() => {\n      expect(inferenceManager.get('dummyId')).toBeUndefined();\n    });\n\n    // we should have disposed the subscriber, as the container is removed\n    expect(disposableMock).toHaveBeenCalled();\n  });\n});\n\ndescribe('transition statuses', () => {\n  test('stopping an inference server should first set status to stopping', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyId',\n        engineId: 'dummyEngineId',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n    vi.mocked(containerEngine.inspectContainer).mockResolvedValue({\n      State: {\n        Status: 'running',\n        Health: undefined,\n      },\n    } as unknown as ContainerInspectInfo);\n\n    const inferenceManager = await getInitializedInferenceManager();\n    await inferenceManager.stopInferenceServer('dummyId');\n\n    // first called with stopping status\n    expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_INFERENCE_SERVERS_UPDATE, [\n      {\n        connection: expect.anything(),\n        container: expect.anything(),\n        models: expect.anything(),\n        health: undefined,\n        status: 'stopping',\n        type: expect.anything(),\n        labels: expect.anything(),\n      },\n    ]);\n\n    // finally have been called with status stopped\n    expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_INFERENCE_SERVERS_UPDATE, [\n      {\n        connection: expect.anything(),\n        container: expect.anything(),\n        models: expect.anything(),\n        health: undefined,\n        status: 'stopped',\n        type: expect.anything(),\n        labels: expect.anything(),\n      },\n    ]);\n  });\n\n  test('deleting an inference server should first set status to stopping', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyId',\n        engineId: 'dummyEngineId',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n    vi.mocked(containerEngine.inspectContainer).mockResolvedValue({\n      State: {\n        Status: 'running',\n        Health: undefined,\n      },\n    } as unknown as ContainerInspectInfo);\n\n    const inferenceManager = await getInitializedInferenceManager();\n    await inferenceManager.deleteInferenceServer('dummyId');\n\n    expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_INFERENCE_SERVERS_UPDATE, [\n      {\n        connection: expect.anything(),\n        container: expect.anything(),\n        models: expect.anything(),\n        health: undefined,\n        status: 'deleting',\n        type: expect.anything(),\n        labels: expect.anything(),\n      },\n    ]);\n  });\n\n  test('starting an inference server should first set status to stopping', async () => {\n    mockListContainers([\n      {\n        Id: 'dummyId',\n        engineId: 'dummyEngineId',\n        Labels: {\n          [LABEL_INFERENCE_SERVER]: '[]',\n        },\n      },\n    ]);\n    vi.mocked(containerEngine.inspectContainer).mockResolvedValue({\n      State: {\n        Status: 'stopped',\n        Health: undefined,\n      },\n    } as unknown as ContainerInspectInfo);\n\n    const inferenceManager = await getInitializedInferenceManager();\n    await inferenceManager.startInferenceServer('dummyId');\n\n    // first status must be set to starting\n    expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_INFERENCE_SERVERS_UPDATE, [\n      {\n        connection: expect.anything(),\n        container: expect.anything(),\n        models: expect.anything(),\n        health: undefined,\n        status: 'starting',\n        type: expect.anything(),\n        labels: expect.anything(),\n      },\n    ]);\n\n    // on success it should have been set to running\n    expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_INFERENCE_SERVERS_UPDATE, [\n      {\n        connection: expect.anything(),\n        container: expect.anything(),\n        models: expect.anything(),\n        health: undefined,\n        status: 'running',\n        type: expect.anything(),\n        labels: expect.anything(),\n      },\n    ]);\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/managers/inference/inferenceManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { InferenceServer, InferenceServerStatus, InferenceType } from '@shared/models/IInference';\nimport type { PodmanConnection, PodmanConnectionEvent } from '../podmanConnection';\nimport { containerEngine, Disposable } from '@podman-desktop/api';\nimport type { ContainerInfo, TelemetryLogger, ContainerProviderConnection } from '@podman-desktop/api';\nimport type { ContainerRegistry, ContainerEvent } from '../../registries/ContainerRegistry';\nimport { getInferenceType, isTransitioning, LABEL_INFERENCE_SERVER } from '../../utils/inferenceUtils';\nimport { Publisher } from '../../utils/Publisher';\nimport { MSG_INFERENCE_SERVERS_UPDATE } from '@shared/Messages';\nimport type { InferenceServerConfig } from '@shared/models/InferenceServerConfig';\nimport type { ModelsManager } from '../modelsManager';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport { getRandomString } from '../../utils/randomUtils';\nimport { basename, dirname } from 'node:path';\nimport type { InferenceProviderRegistry } from '../../registries/InferenceProviderRegistry';\nimport type { InferenceProvider } from '../../workers/provider/InferenceProvider';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { CatalogManager } from '../catalogManager';\nimport { getHash } from '../../utils/sha';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { TaskRunner } from '../TaskRunner';\n\nexport class InferenceManager extends Publisher<InferenceServer[]> implements Disposable {\n  // Inference server map (containerId -> InferenceServer)\n  #servers: Map<string, InferenceServer>;\n  // Is initialized\n  #initialized: boolean;\n  // Disposables\n  #disposables: Disposable[];\n  #taskRunner: TaskRunner;\n\n  constructor(\n    rpcExtension: RpcExtension,\n    private containerRegistry: ContainerRegistry,\n    private podmanConnection: PodmanConnection,\n    private modelsManager: ModelsManager,\n    private telemetry: TelemetryLogger,\n    private taskRegistry: TaskRegistry,\n    private inferenceProviderRegistry: InferenceProviderRegistry,\n    private catalogManager: CatalogManager,\n  ) {\n    super(rpcExtension, MSG_INFERENCE_SERVERS_UPDATE, () => this.getServers());\n    this.#servers = new Map<string, InferenceServer>();\n    this.#disposables = [];\n    this.#initialized = false;\n    this.#taskRunner = new TaskRunner(this.taskRegistry);\n  }\n\n  init(): void {\n    this.podmanConnection.onPodmanConnectionEvent(this.watchMachineEvent.bind(this));\n    this.containerRegistry.onStartContainerEvent(this.watchContainerStart.bind(this));\n    this.catalogManager.onUpdate(() => {\n      this.retryableRefresh(1);\n    });\n    this.retryableRefresh(3);\n  }\n\n  public isInitialize(): boolean {\n    return this.#initialized;\n  }\n\n  /**\n   * Cleanup the manager\n   */\n  dispose(): void {\n    this.cleanDisposables();\n    this.#servers.clear();\n    this.#initialized = false;\n  }\n\n  /**\n   * Clean class disposables\n   */\n  private cleanDisposables(): void {\n    this.#disposables.forEach(disposable => disposable.dispose());\n  }\n\n  /**\n   * Get the Inference servers\n   */\n  public getServers(): InferenceServer[] {\n    return Array.from(this.#servers.values());\n  }\n\n  /**\n   * Get the Unique registered Inference provider types\n   */\n\n  public getRegisteredProviders(): InferenceType[] {\n    const types: InferenceType[] = this.inferenceProviderRegistry.getAll().map(provider => provider.type);\n    return [...new Set(types)];\n  }\n\n  /**\n   * return an inference server\n   * @param containerId the containerId of the inference server\n   */\n  public get(containerId: string): InferenceServer | undefined {\n    return this.#servers.get(containerId);\n  }\n\n  /**\n   * return the first inference server which is using the specific model\n   * it throws if the model backend is not currently supported\n   */\n  public findServerByModel(model: ModelInfo): InferenceServer | undefined {\n    // check if model backend is supported\n    const backend: InferenceType = getInferenceType([model]);\n    const providers: InferenceProvider[] = this.inferenceProviderRegistry\n      .getByType(backend)\n      .filter(provider => provider.enabled());\n    if (providers.length === 0) {\n      throw new Error('no enabled provider could be found.');\n    }\n    return this.getServers().find(s => s.models.some(m => m.id === model.id));\n  }\n\n  /**\n   * Creating an inference server can be heavy task (pulling image, uploading model to WSL etc.)\n   * The frontend cannot wait endlessly, therefore we provide a method returning a tracking identifier\n   * that can be used to fetch the tasks\n   *\n   * @param config the config to use to create the inference server\n   *\n   * @return a unique tracking identifier to follow the creation request\n   */\n  requestCreateInferenceServer(config: InferenceServerConfig): string {\n    // create a tracking id to put in the labels\n    const trackingId: string = getRandomString();\n\n    config.labels = {\n      ...config.labels,\n      trackingId: trackingId,\n    };\n\n    this.#taskRunner\n      .runAsTask(\n        {\n          trackingId: trackingId,\n        },\n        {\n          loadingLabel: 'Creating Inference server',\n          errorMsg: err => `Something went wrong while trying to create an inference server ${String(err)}.`,\n          failFastSubtasks: true,\n        },\n        async ({ updateLabels }) => {\n          const containerId = await this.createInferenceServer(config);\n          updateLabels(labels => ({ ...labels, containerId }));\n        },\n      )\n      .catch(() => {});\n\n    return trackingId;\n  }\n\n  /**\n   * Given an engineId, it will create an inference server using an InferenceProvider.\n   * @param config\n   *\n   * @return the containerId of the created inference server\n   */\n  async createInferenceServer(config: InferenceServerConfig): Promise<string> {\n    if (!this.isInitialize()) throw new Error('Cannot start the inference server: not initialized.');\n\n    // Get the backend for the model inference server {@link InferenceType}\n    const backend: InferenceType = getInferenceType(config.modelsInfo);\n\n    let provider: InferenceProvider;\n    if (config.inferenceProvider) {\n      provider = this.inferenceProviderRegistry.get(config.inferenceProvider);\n      if (!provider.enabled()) throw new Error('provider requested is not enabled.');\n    } else {\n      const providers: InferenceProvider[] = this.inferenceProviderRegistry\n        .getByType(backend)\n        .filter(provider => provider.enabled());\n      if (providers.length === 0) throw new Error('no enabled provider could be found.');\n      provider = providers[0];\n    }\n\n    let connection: ContainerProviderConnection | undefined = undefined;\n    if (config.connection) {\n      connection = this.podmanConnection.getContainerProviderConnection(config.connection);\n    } else {\n      connection = this.podmanConnection.findRunningContainerProviderConnection();\n    }\n\n    if (!connection) throw new Error('cannot find running container provider connection');\n\n    await provider.prePerform(config);\n\n    // upload models to podman machine if user system is supported\n    config.modelsInfo = await Promise.all(\n      config.modelsInfo.map(modelInfo =>\n        this.modelsManager.uploadModelToPodmanMachine(connection, modelInfo, config.labels).then(path => ({\n          ...modelInfo,\n          file: {\n            path: dirname(path),\n            file: basename(path),\n          },\n        })),\n      ),\n    );\n\n    // create the inference server using the selected inference provider\n    const inferenceServer = await provider.perform(config);\n\n    // Adding a new inference server\n    this.#servers.set(inferenceServer.container.containerId, inferenceServer);\n\n    // Watch for container changes\n    this.watchContainerStatus(inferenceServer.container.engineId, inferenceServer.container.containerId);\n\n    // Log usage\n    this.telemetry.logUsage('inference.start', {\n      models: config.modelsInfo.map(model => getHash(model.id)),\n    });\n\n    this.notify();\n    return inferenceServer.container.containerId;\n  }\n\n  /**\n   * Given an engineId and a containerId, inspect the container and update the servers\n   * @param engineId\n   * @param containerId\n   * @private\n   */\n  private updateServerStatus(engineId: string, containerId: string): void {\n    const server = this.#servers.get(containerId);\n    if (server === undefined)\n      throw new Error('Something went wrong while trying to get container status got undefined Inference Server.');\n\n    // we should not update the server while we are in a transition state.\n    if (isTransitioning(server)) return;\n\n    // Inspect container\n    containerEngine\n      .inspectContainer(engineId, containerId)\n      .then(result => {\n        // Update server\n        this.#servers.set(containerId, {\n          ...server,\n          status: result.State.Status === 'running' ? 'running' : 'stopped',\n          health: result.State.Health,\n        });\n        this.notify();\n      })\n      .catch((err: unknown) => {\n        console.error(\n          `Something went wrong while trying to inspect container ${containerId}. Trying to refresh servers.`,\n          err,\n        );\n        this.retryableRefresh(2);\n      });\n  }\n\n  /**\n   * Watch for container status changes\n   * @param engineId\n   * @param containerId the container to watch out\n   */\n  private watchContainerStatus(engineId: string, containerId: string): void {\n    // Update now\n    this.updateServerStatus(engineId, containerId);\n\n    // Create a pulling update for container health check\n    const intervalId = setInterval(this.updateServerStatus.bind(this, engineId, containerId), 10000);\n\n    this.#disposables.push(\n      Disposable.create(() => {\n        clearInterval(intervalId);\n      }),\n    );\n    // Subscribe to container status update\n    const disposable = this.containerRegistry.subscribe(containerId, (status: string) => {\n      switch (status) {\n        case 'die':\n          this.updateServerStatus(engineId, containerId);\n          clearInterval(intervalId);\n          break;\n        case 'remove':\n          // Update the list of servers\n          this.removeInferenceServer(containerId);\n          disposable.dispose();\n          clearInterval(intervalId);\n          break;\n      }\n    });\n    // Allowing cleanup if extension is stopped\n    this.#disposables.push(disposable);\n  }\n\n  private watchMachineEvent(_event: PodmanConnectionEvent): void {\n    this.retryableRefresh(2);\n  }\n\n  /**\n   * Listener for container start events\n   * @param event the event containing the id of the container\n   */\n  private watchContainerStart(event: ContainerEvent): void {\n    // We might have a start event for an inference server we already know about\n    if (this.#servers.has(event.id)) return;\n\n    containerEngine\n      .listContainers()\n      .then(containers => {\n        const container = containers.find(c => c.Id === event.id);\n        if (container === undefined) {\n          return;\n        }\n        if (container.Labels && LABEL_INFERENCE_SERVER in container.Labels) {\n          this.watchContainerStatus(container.engineId, container.Id);\n        }\n      })\n      .catch((err: unknown) => {\n        console.error(`Something went wrong in container start listener.`, err);\n      });\n  }\n\n  /**\n   * This non-async utility method is made to retry refreshing the inference server with some delay\n   * in case of error raised.\n   *\n   * @param retry the number of retry allowed\n   */\n  private retryableRefresh(retry: number = 3): void {\n    if (retry === 0) {\n      console.error('Cannot refresh inference servers: retry limit has been reached. Cleaning manager.');\n      this.cleanDisposables();\n      this.#servers.clear();\n      this.#initialized = false;\n      return;\n    }\n    this.refreshInferenceServers().catch((err: unknown): void => {\n      console.warn(`Something went wrong while trying to refresh inference server. (retry left ${retry})`, err);\n      setTimeout(\n        () => {\n          this.retryableRefresh(retry - 1);\n        },\n        // eslint-disable-next-line sonarjs/pseudo-random\n        2000 + Math.random() * 1000,\n      );\n    });\n  }\n\n  /**\n   * Refresh the inference servers by listing all containers.\n   *\n   * This method has an important impact as it (re-)create all inference servers\n   */\n  private async refreshInferenceServers(): Promise<void> {\n    const containers: ContainerInfo[] = await containerEngine.listContainers();\n    const filtered = containers.filter(c => c.Labels && LABEL_INFERENCE_SERVER in c.Labels);\n\n    // clean existing disposables\n    this.cleanDisposables();\n    this.#servers = new Map<string, InferenceServer>(\n      filtered.map(containerInfo => {\n        let modelInfos: ModelInfo[] = [];\n        try {\n          const modelIds: string[] = JSON.parse(containerInfo.Labels[LABEL_INFERENCE_SERVER]);\n          modelInfos = modelIds\n            .filter(id => this.modelsManager.isModelOnDisk(id))\n            .map(id => this.modelsManager.getModelInfo(id));\n        } catch (err: unknown) {\n          console.error('Something went wrong while getting the models ids from the label.', err);\n        }\n\n        return [\n          containerInfo.Id,\n          {\n            container: {\n              containerId: containerInfo.Id,\n              engineId: containerInfo.engineId,\n            },\n            connection: {\n              port: !!containerInfo.Ports && containerInfo.Ports.length > 0 ? containerInfo.Ports[0].PublicPort : -1,\n            },\n            status: containerInfo.Status === 'running' ? 'running' : 'stopped',\n            models: modelInfos,\n            type: getInferenceType(modelInfos),\n            labels: containerInfo.Labels || {},\n          },\n        ];\n      }),\n    );\n\n    // (re-)create container watchers\n    this.#servers.forEach(server => this.watchContainerStatus(server.container.engineId, server.container.containerId));\n    this.#initialized = true;\n    // notify update\n    this.notify();\n  }\n\n  /**\n   * Remove the reference of the inference server\n   * /!\\ Does not delete the corresponding container\n   * @param containerId\n   */\n  private removeInferenceServer(containerId: string): void {\n    this.#servers.delete(containerId);\n    this.notify();\n  }\n\n  /**\n   * Delete the InferenceServer instance from #servers and matching container\n   * @param containerId the id of the container running the Inference Server\n   */\n  async deleteInferenceServer(containerId: string): Promise<void> {\n    const server = this.#servers.get(containerId);\n    if (!server) {\n      throw new Error(`cannot find a corresponding server for container id ${containerId}.`);\n    }\n\n    try {\n      // Set status a deleting\n      this.setInferenceServerStatus(server.container.containerId, 'deleting');\n\n      // If the server is running we need to stop it.\n      if (server.status === 'running') {\n        await containerEngine.stopContainer(server.container.engineId, server.container.containerId);\n      }\n\n      // Delete the container\n      await containerEngine.deleteContainer(server.container.engineId, server.container.containerId);\n\n      // Delete the reference\n      this.removeInferenceServer(containerId);\n    } catch (err: unknown) {\n      console.error('Something went wrong while trying to delete the inference server.', err);\n      this.setInferenceServerStatus(server.container.containerId, 'error');\n      this.retryableRefresh(2);\n    }\n  }\n\n  /**\n   * Start an inference server from the container id\n   * @param containerId the identifier of the container to start\n   */\n  async startInferenceServer(containerId: string): Promise<void> {\n    if (!this.isInitialize()) throw new Error('Cannot start the inference server.');\n\n    const server = this.#servers.get(containerId);\n    if (server === undefined) throw new Error(`cannot find a corresponding server for container id ${containerId}.`);\n\n    try {\n      // set status to starting\n      this.setInferenceServerStatus(server.container.containerId, 'starting');\n      await containerEngine.startContainer(server.container.engineId, server.container.containerId);\n\n      this.setInferenceServerStatus(server.container.containerId, 'running');\n      // start watch for container status update\n      this.watchContainerStatus(server.container.engineId, server.container.containerId);\n    } catch (error: unknown) {\n      console.error(error);\n      this.telemetry.logError('inference.start', {\n        message: 'error starting inference',\n        error: error,\n      });\n      this.setInferenceServerStatus(server.container.containerId, 'error');\n      this.retryableRefresh(1);\n    }\n  }\n\n  /**\n   * Stop an inference server from the container id\n   * @param containerId the identifier of the container to stop\n   */\n  async stopInferenceServer(containerId: string): Promise<void> {\n    if (!this.isInitialize()) throw new Error('Cannot stop the inference server.');\n\n    const server = this.#servers.get(containerId);\n    if (server === undefined) throw new Error(`cannot find a corresponding server for container id ${containerId}.`);\n\n    if (isTransitioning(server)) throw new Error(`cannot stop a transitioning server.`);\n\n    try {\n      // set server to stopping\n      this.setInferenceServerStatus(server.container.containerId, 'stopping');\n\n      await containerEngine.stopContainer(server.container.engineId, server.container.containerId);\n      // once stopped update the status\n      this.setInferenceServerStatus(server.container.containerId, 'stopped');\n    } catch (error: unknown) {\n      console.error(error);\n      this.telemetry.logError('inference.stop', {\n        message: 'error stopping inference',\n        error: error,\n      });\n\n      this.setInferenceServerStatus(server.container.containerId, 'error');\n      this.retryableRefresh(1);\n    }\n  }\n\n  /**\n   * Given an containerId, set the status of the corresponding inference server\n   * @param containerId\n   * @param status\n   */\n  private setInferenceServerStatus(containerId: string, status: InferenceServerStatus): void {\n    const server = this.#servers.get(containerId);\n    if (server === undefined) throw new Error(`cannot find a corresponding server for container id ${containerId}.`);\n\n    this.#servers.set(server.container.containerId, {\n      ...server,\n      status: status,\n      health: undefined, // always reset health history when changing status\n    });\n    this.notify();\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/instructlab/instructlabManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { TaskRegistry } from '../../registries/TaskRegistry';\nimport { beforeAll, beforeEach, expect, test, vi } from 'vitest';\nimport type { ContainerCreateResult, ContainerInfo, ImageInfo, TelemetryLogger } from '@podman-desktop/api';\nimport { containerEngine, EventEmitter } from '@podman-desktop/api';\nimport type { PodmanConnection } from '../podmanConnection';\nimport { INSTRUCTLAB_CONTAINER_LABEL, InstructlabManager } from './instructlabManager';\nimport { ContainerRegistry } from '../../registries/ContainerRegistry';\nimport { TestEventEmitter } from '../../tests/utils';\nimport { VMType } from '@shared/models/IPodman';\nimport type { Task } from '@shared/models/ITask';\nimport instructlab_images from '../../assets/instructlab-images.json';\nimport { INSTRUCTLAB_CONTAINER_TRACKINGID } from '@shared/models/instructlab/IInstructlabContainerInfo';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nvi.mock('@podman-desktop/api', () => {\n  return {\n    EventEmitter: vi.fn(),\n    containerEngine: {\n      listContainers: vi.fn(),\n      listImages: vi.fn(),\n      createContainer: vi.fn(),\n      onEvent: vi.fn(),\n    },\n  };\n});\n\nconst taskRegistry = new TaskRegistry({ fire: vi.fn().mockResolvedValue(true) } as unknown as RpcExtension);\n\nconst podmanConnection: PodmanConnection = {\n  onPodmanConnectionEvent: vi.fn(),\n  findRunningContainerProviderConnection: vi.fn(),\n} as unknown as PodmanConnection;\n\nconst telemetryMock = {\n  logUsage: vi.fn(),\n  logError: vi.fn(),\n} as unknown as TelemetryLogger;\n\nlet instructlabManager: InstructlabManager;\n\nbeforeAll(() => {\n  vi.mocked(EventEmitter).mockImplementation(() => new TestEventEmitter() as unknown as EventEmitter<unknown>);\n});\n\nbeforeEach(() => {\n  const containerRegistry = new ContainerRegistry();\n  containerRegistry.init();\n  instructlabManager = new InstructlabManager('', taskRegistry, podmanConnection, containerRegistry, telemetryMock);\n  instructlabManager.init();\n  taskRegistry.deleteByLabels({ trackingId: INSTRUCTLAB_CONTAINER_TRACKINGID });\n});\n\ntest('getInstructLabContainer should return undefined if no containers', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  const containerId = await instructlabManager.getInstructLabContainer();\n  expect(containerId).toBeUndefined();\n});\n\ntest('getInstructLabContainer should return undefined if no instructlab container', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([{ Id: 'dummyId' } as unknown as ContainerInfo]);\n  const containerId = await instructlabManager.getInstructLabContainer();\n  expect(containerId).toBeUndefined();\n});\n\ntest('getInstructLabContainer should return id if instructlab container', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([\n    {\n      Id: 'dummyId',\n      State: 'running',\n      Labels: { [`${INSTRUCTLAB_CONTAINER_LABEL}`]: 'dummyLabel' },\n    } as unknown as ContainerInfo,\n  ]);\n  const containerId = await instructlabManager.getInstructLabContainer();\n  expect(containerId).toBe('dummyId');\n});\n\ntest('requestCreateInstructlabContainer throws error if no podman connection', async () => {\n  const containerIdPromise = instructlabManager.requestCreateInstructlabContainer({});\n  await expect(containerIdPromise).rejects.toBeInstanceOf(Error);\n});\n\nasync function waitTasks(id: string, nb: number): Promise<Task[]> {\n  return vi.waitFor(() => {\n    const tasks = taskRegistry.getTasksByLabels({ trackingId: id });\n    if (tasks.length !== nb) {\n      throw new Error('not completed');\n    }\n    return tasks;\n  });\n}\n\ntest('requestCreateInstructlabContainer returns id and error if listImage returns error', async () => {\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  vi.mocked(containerEngine.listImages).mockRejectedValue(new Error());\n  await instructlabManager.requestCreateInstructlabContainer({});\n  const tasks = await waitTasks(INSTRUCTLAB_CONTAINER_TRACKINGID, 2);\n  expect(tasks.some(task => task.state === 'error')).toBeTruthy();\n});\n\ntest('requestCreateInstructlabContainer returns id and error if listImage returns image', async () => {\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  vi.mocked(containerEngine.listImages).mockResolvedValue([\n    { RepoTags: [instructlab_images.default] } as unknown as ImageInfo,\n  ]);\n  await instructlabManager.requestCreateInstructlabContainer({});\n  const tasks = await waitTasks(INSTRUCTLAB_CONTAINER_TRACKINGID, 3);\n  expect(tasks.some(task => task.state === 'error')).toBeTruthy();\n});\n\ntest('requestCreateInstructlabContainer returns id and no error if createContainer returns id', async () => {\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  vi.mocked(containerEngine.listImages).mockResolvedValue([\n    { RepoTags: [instructlab_images.default] } as unknown as ImageInfo,\n  ]);\n  vi.mocked(containerEngine.createContainer).mockResolvedValue({\n    id: 'containerId',\n  } as unknown as ContainerCreateResult);\n  await instructlabManager.requestCreateInstructlabContainer({});\n  const tasks = await waitTasks(INSTRUCTLAB_CONTAINER_TRACKINGID, 3);\n  expect(tasks.some(task => task.state === 'error')).toBeFalsy();\n});\n"
  },
  {
    "path": "packages/backend/src/managers/instructlab/instructlabManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { InstructlabSession } from '@shared/models/instructlab/IInstructlabSession';\nimport type { InstructlabContainerConfiguration } from '@shared/models/instructlab/IInstructlabContainerConfiguration';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport {\n  type TelemetryLogger,\n  containerEngine,\n  type ContainerProviderConnection,\n  type ContainerCreateOptions,\n  type Disposable,\n} from '@podman-desktop/api';\nimport type { PodmanConnection, PodmanConnectionEvent } from '../podmanConnection';\nimport instructlab_images from '../../assets/instructlab-images.json';\nimport { getImageInfo } from '../../utils/inferenceUtils';\nimport path from 'node:path';\nimport fs from 'node:fs/promises';\nimport type { ContainerRegistry, ContainerEvent } from '../../registries/ContainerRegistry';\nimport { DISABLE_SELINUX_LABEL_SECURITY_OPTION } from '../../utils/utils';\nimport { INSTRUCTLAB_CONTAINER_TRACKINGID } from '@shared/models/instructlab/IInstructlabContainerInfo';\nimport { getRandomName } from '../../utils/randomUtils';\n\nexport const INSTRUCTLAB_CONTAINER_LABEL = 'ai-lab-instructlab-container';\n\nexport class InstructlabManager implements Disposable {\n  #initialized: boolean;\n  #containerId: string | undefined;\n  #disposables: Disposable[];\n\n  constructor(\n    private readonly appUserDirectory: string,\n    private taskRegistry: TaskRegistry,\n    private podmanConnection: PodmanConnection,\n    private containerRegistry: ContainerRegistry,\n    private telemetryLogger: TelemetryLogger,\n  ) {\n    this.#initialized = false;\n    this.#disposables = [];\n  }\n\n  init(): void {\n    this.#disposables.push(this.podmanConnection.onPodmanConnectionEvent(this.watchMachineEvent.bind(this)));\n    this.#disposables.push(this.containerRegistry.onStartContainerEvent(this.onStartContainerEvent.bind(this)));\n    this.#disposables.push(this.containerRegistry.onStopContainerEvent(this.onStopContainerEvent.bind(this)));\n  }\n\n  dispose(): void {\n    this.#disposables.forEach(disposable => disposable.dispose());\n    this.#disposables = [];\n  }\n\n  private async refreshInstructlabContainer(id?: string): Promise<void> {\n    const containers = await containerEngine.listContainers();\n    const containerId = (this.#containerId = containers\n      .filter(c => !id || c.Id === id)\n      .filter(c => c.State === 'running' && c.Labels && INSTRUCTLAB_CONTAINER_LABEL in c.Labels)\n      .map(c => c.Id)\n      .at(0));\n    if ((id && containerId) || !id) {\n      this.#containerId = containerId;\n    }\n  }\n\n  private async watchMachineEvent(event: PodmanConnectionEvent): Promise<void> {\n    if ((event.status === 'started' && !this.#containerId) || (event.status === 'stopped' && this.#containerId)) {\n      await this.refreshInstructlabContainer();\n    }\n  }\n\n  private async onStartContainerEvent(event: ContainerEvent): Promise<void> {\n    await this.refreshInstructlabContainer(event.id);\n  }\n\n  private onStopContainerEvent(event: ContainerEvent): void {\n    console.log('event id:', event.id, ' containerId: ', this.#containerId);\n    if (this.#containerId === event.id) {\n      this.#containerId = undefined;\n      this.taskRegistry.deleteByLabels({ trackingId: INSTRUCTLAB_CONTAINER_TRACKINGID });\n    }\n  }\n\n  public getSessions(): InstructlabSession[] {\n    return [\n      {\n        name: 'session 1',\n        modelId: 'hf.facebook.detr-resnet-101',\n        targetModel: 'hf.facebook.detr-resnet-101-target',\n        repository: '/a1',\n        status: 'fine-tuned',\n        createdTime: new Date(new Date().getTime() - 6 * 24 * 60 * 60 * 1000).getTime() / 1000, // 6 days ago\n      },\n      {\n        name: 'session 2',\n        modelId: 'hf.ibm-granite.granite-8b-code-instruct',\n        targetModel: 'hf.ibm-granite.granite-8b-code-instruct-target',\n        repository: '/a2',\n        status: 'generating-instructions',\n        createdTime: new Date(new Date().getTime() - 4 * 60 * 60 * 1000).getTime() / 1000, // 4 hours ago\n      },\n    ];\n  }\n\n  async getInstructLabContainer(): Promise<string | undefined> {\n    if (!this.#initialized) {\n      const containers = await containerEngine.listContainers();\n      this.#containerId = containers\n        .filter(c => c.State === 'running' && c.Labels && INSTRUCTLAB_CONTAINER_LABEL in c.Labels)\n        .map(c => c.Id)\n        .at(0);\n      this.#initialized = true;\n    }\n    return this.#containerId;\n  }\n\n  async requestCreateInstructlabContainer(config: InstructlabContainerConfiguration): Promise<void> {\n    // create a tracking id to put in the labels\n    const trackingId: string = INSTRUCTLAB_CONTAINER_TRACKINGID;\n\n    const labels = {\n      trackingId: trackingId,\n    };\n\n    const task = this.taskRegistry.createTask('Creating InstructLab container', 'loading', {\n      trackingId: trackingId,\n    });\n\n    let connection: ContainerProviderConnection | undefined;\n    if (config.connection) {\n      connection = this.podmanConnection.getContainerProviderConnection(config.connection);\n    } else {\n      connection = this.podmanConnection.findRunningContainerProviderConnection();\n    }\n\n    if (!connection) throw new Error('cannot find running container provider connection');\n\n    this.createInstructlabContainer(connection, labels)\n      .then((containerId: string) => {\n        this.#containerId = containerId;\n        this.taskRegistry.updateTask({\n          ...task,\n          state: 'success',\n          labels: {\n            ...task.labels,\n            containerId: containerId,\n          },\n        });\n        this.telemetryLogger.logUsage('instructlab.startContainer');\n      })\n      .catch((err: unknown) => {\n        // Get all tasks using the tracker\n        const tasks = this.taskRegistry.getTasksByLabels({\n          trackingId: trackingId,\n        });\n        // Filter the one no in loading state\n        tasks\n          .filter(t => t.state === 'loading' && t.id !== task.id)\n          .forEach(t => {\n            this.taskRegistry.updateTask({\n              ...t,\n              state: 'error',\n            });\n          });\n        // Update the main task\n        this.taskRegistry.updateTask({\n          ...task,\n          state: 'error',\n          error: `Something went wrong while trying to create an inference server ${String(err)}.`,\n        });\n        this.telemetryLogger.logError('instructlab.startContainer', { error: err });\n      });\n  }\n\n  async createInstructlabContainer(\n    connection: ContainerProviderConnection,\n    labels: { [p: string]: string },\n  ): Promise<string> {\n    const image = instructlab_images.default;\n    const pullingTask = this.taskRegistry.createTask(`Pulling ${image}.`, 'loading', labels);\n    const imageInfo = await getImageInfo(connection, image, () => {})\n      .catch((err: unknown) => {\n        pullingTask.state = 'error';\n        pullingTask.progress = undefined;\n        pullingTask.error = `Something went wrong while pulling ${image}: ${String(err)}`;\n        throw err;\n      })\n      .then(imageInfo => {\n        pullingTask.state = 'success';\n        pullingTask.progress = undefined;\n        return imageInfo;\n      })\n      .finally(() => {\n        this.taskRegistry.updateTask(pullingTask);\n      });\n\n    const folder = await this.getInstructLabContainerFolder();\n\n    const containerTask = this.taskRegistry.createTask('Starting InstructLab container', 'loading', labels);\n    const createContainerOptions: ContainerCreateOptions = {\n      Image: imageInfo.Id,\n      name: getRandomName('instructlab'),\n      Labels: { [INSTRUCTLAB_CONTAINER_LABEL]: image },\n      HostConfig: {\n        AutoRemove: true,\n        SecurityOpt: [DISABLE_SELINUX_LABEL_SECURITY_OPTION],\n        Mounts: [\n          {\n            Target: '/instructlab/.cache/instructlab',\n            Source: path.join(folder, '.cache'),\n            Type: 'bind',\n          },\n          {\n            Target: '/instructlab/.config/instructlab',\n            Source: path.join(folder, '.config'),\n            Type: 'bind',\n          },\n          {\n            Target: '/instructlab/.local/share/instructlab',\n            Source: path.join(folder, '.local'),\n            Type: 'bind',\n          },\n        ],\n        UsernsMode: 'keep-id:uid=1000,gid=1000',\n      },\n      OpenStdin: true,\n      start: true,\n    };\n    try {\n      const { id } = await containerEngine.createContainer(imageInfo.engineId, createContainerOptions);\n      // update the task\n      containerTask.state = 'success';\n      containerTask.progress = undefined;\n      return id;\n    } catch (err: unknown) {\n      containerTask.state = 'error';\n      containerTask.progress = undefined;\n      containerTask.error = `Something went wrong while creating container: ${String(err)}`;\n      throw err;\n    } finally {\n      this.taskRegistry.updateTask(containerTask);\n    }\n  }\n\n  private async getInstructLabContainerFolder(): Promise<string> {\n    const instructlabPath = path.join(this.appUserDirectory, 'instructlab', 'container');\n    await fs.mkdir(instructlabPath, { recursive: true });\n    await fs.mkdir(path.join(instructlabPath, '.cache'), { recursive: true });\n    await fs.mkdir(path.join(instructlabPath, '.config'), { recursive: true });\n    await fs.mkdir(path.join(instructlabPath, '.local'), { recursive: true });\n    return instructlabPath;\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/llama-stack/llamaStackManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { TaskRegistry } from '../../registries/TaskRegistry';\nimport { assert, beforeEach, expect, test, vi } from 'vitest';\nimport type { ContainerCreateResult, ContainerInfo, Disposable, ImageInfo, TelemetryLogger } from '@podman-desktop/api';\nimport { containerEngine } from '@podman-desktop/api';\nimport type { PodmanConnection } from '../podmanConnection';\nimport type { ContainerRegistry } from '../../registries/ContainerRegistry';\nimport { VMType } from '@shared/models/IPodman';\nimport type { Task } from '@shared/models/ITask';\nimport llama_stack_images from '../../assets/llama-stack-images.json';\nimport llama_stack_playground_images from '../../assets/llama-stack-playground-images.json';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport {\n  LLAMA_STACK_API_PORT_LABEL,\n  LLAMA_STACK_CONTAINER_LABEL,\n  LLAMA_STACK_PLAYGROUND_PORT_LABEL,\n  LlamaStackManager,\n} from './llamaStackManager';\nimport {\n  LLAMA_STACK_CONTAINER_TRACKINGID,\n  type LlamaStackContainers,\n} from '@shared/models/llama-stack/LlamaStackContainerInfo';\nimport type { ConfigurationRegistry } from '../../registries/ConfigurationRegistry';\nimport type { ExtensionConfiguration } from '@shared/models/IExtensionConfiguration';\nimport type { ModelsManager } from '../modelsManager';\nimport * as utilsPorts from '../../utils/ports';\n\nvi.mock('@podman-desktop/api', () => {\n  return {\n    EventEmitter: vi.fn(),\n    containerEngine: {\n      listContainers: vi.fn(),\n      listImages: vi.fn(),\n      createContainer: vi.fn(),\n      onEvent: vi.fn(),\n      pullImage: vi.fn(),\n      inspectContainer: vi.fn(),\n      startContainer: vi.fn(),\n      stopContainer: vi.fn(),\n      deleteContainer: vi.fn(),\n    },\n    env: {\n      isWindows: false,\n    },\n  };\n});\n\nvi.mock('../../utils/ports');\n\nclass TestLlamaStackManager extends LlamaStackManager {\n  public override async refreshLlamaStackContainers(): Promise<void> {\n    return super.refreshLlamaStackContainers();\n  }\n\n  public override getContainersInfo(): LlamaStackContainers | undefined {\n    return super.getContainersInfo();\n  }\n}\n\nconst podmanConnection: PodmanConnection = {\n  onPodmanConnectionEvent: vi.fn(),\n  findRunningContainerProviderConnection: vi.fn(),\n  execute: vi.fn(),\n} as unknown as PodmanConnection;\n\nconst containerRegistry = {\n  onStartContainerEvent: vi.fn(),\n  onStopContainerEvent: vi.fn(),\n  onHealthyContainerEvent: vi.fn(),\n} as unknown as ContainerRegistry;\n\nconst configurationRegistry = {\n  getExtensionConfiguration: vi.fn(),\n} as unknown as ConfigurationRegistry;\n\nconst telemetryMock = {\n  logUsage: vi.fn(),\n  logError: vi.fn(),\n} as unknown as TelemetryLogger;\n\nconst modelsManagerMock = {\n  getModelsInfo: vi.fn(),\n} as unknown as ModelsManager;\n\nlet taskRegistry: TaskRegistry;\n\nlet llamaStackManager: TestLlamaStackManager;\n\nconst LLAMA_STACK_CONTAINER_RUNNING = {\n  Id: 'dummyId',\n  State: 'running',\n  Labels: {\n    [LLAMA_STACK_CONTAINER_LABEL]: 'dummyLabel',\n    [LLAMA_STACK_API_PORT_LABEL]: '50000',\n  },\n} as unknown as ContainerInfo;\n\nconst LLAMA_STACK_CONTAINER_STOPPED = {\n  Id: 'dummyId',\n  State: 'stopped',\n} as unknown as ContainerInfo;\n\nconst NON_LLAMA_STACK_CONTAINER = { Id: 'dummyId' } as unknown as ContainerInfo;\n\nconst NO_OP_DISPOSABLE = {\n  dispose: (): void => {},\n} as Disposable;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  taskRegistry = new TaskRegistry({ fire: vi.fn().mockResolvedValue(true) } as unknown as RpcExtension);\n  llamaStackManager = new TestLlamaStackManager(\n    '',\n    taskRegistry,\n    podmanConnection,\n    containerRegistry,\n    configurationRegistry,\n    telemetryMock,\n    modelsManagerMock,\n  );\n});\n\ntest('getLlamaStackContainers should return undefined if no containers', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  const stack_containers = await llamaStackManager.getLlamaStackContainers();\n  expect(stack_containers).toEqual({ server: undefined, playground: undefined });\n});\n\ntest('getLlamaStackContainers should return undefined if no llama stack container', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([NON_LLAMA_STACK_CONTAINER]);\n  const stack_containers = await llamaStackManager.getLlamaStackContainers();\n  expect(stack_containers).toEqual({ server: undefined, playground: undefined });\n});\n\ntest('getLlamaStackContainers should return server info if llama stack server container', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([LLAMA_STACK_CONTAINER_RUNNING]);\n  const containerInfo = await llamaStackManager.getLlamaStackContainers();\n  expect(containerInfo).toEqual({\n    server: { containerId: 'dummyId', port: 50000, state: 'running' },\n    playground: undefined,\n  });\n});\n\ntest('requestcreateLlamaStackContainerss throws error if no podman connection', async () => {\n  const containerIdPromise = llamaStackManager.requestcreateLlamaStackContainerss({});\n  await expect(containerIdPromise).rejects.toBeInstanceOf(Error);\n});\n\nasync function waitTasks(id: string, nb: number): Promise<Task[]> {\n  return vi.waitFor(() => {\n    const tasks = taskRegistry.getTasksByLabels({ trackingId: id });\n    if (tasks.length < nb) {\n      throw new Error('not completed');\n    }\n    return tasks.slice(0, nb);\n  });\n}\n\ntest('requestcreateLlamaStackContainerss returns id and error if listImage returns error', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  vi.mocked(containerEngine.listImages).mockRejectedValue(new Error());\n  await llamaStackManager.requestcreateLlamaStackContainerss({});\n  const tasks = await waitTasks(LLAMA_STACK_CONTAINER_TRACKINGID, 2);\n  expect(tasks.some(task => task.state === 'error')).toBeTruthy();\n});\n\ntest('requestcreateLlamaStackContainerss returns id and error if listImage returns image', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  vi.mocked(containerEngine.listImages).mockResolvedValue([\n    { RepoTags: [llama_stack_images.default] } as unknown as ImageInfo,\n  ]);\n  vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n    apiPort: 10000,\n  } as ExtensionConfiguration);\n  await llamaStackManager.requestcreateLlamaStackContainerss({});\n  const tasks = await waitTasks(LLAMA_STACK_CONTAINER_TRACKINGID, 3);\n  expect(tasks.some(task => task.state === 'error')).toBeTruthy();\n});\n\ntest('requestcreateLlamaStackContainerss returns no error if createContainer returns id and container becomes healthy', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  vi.mocked(containerEngine.listImages).mockResolvedValue([\n    {\n      RepoTags: [llama_stack_images.default, llama_stack_playground_images.default],\n      Id: 'imageId',\n      engineId: 'engine1',\n    } as unknown as ImageInfo,\n  ]);\n  vi.mocked(containerEngine.createContainer).mockResolvedValue({\n    id: 'containerId',\n  } as unknown as ContainerCreateResult);\n  vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n    apiPort: 10000,\n  } as ExtensionConfiguration);\n  vi.mocked(utilsPorts.getFreeRandomPort).mockResolvedValueOnce(1234).mockResolvedValueOnce(5678);\n  vi.mocked(containerEngine.pullImage).mockResolvedValue();\n  vi.mocked(modelsManagerMock.getModelsInfo).mockReturnValue([]);\n  vi.mocked(podmanConnection.execute).mockResolvedValue({ stdout: '', stderr: '', command: '' });\n  vi.mocked(containerRegistry.onHealthyContainerEvent).mockImplementation(cb => {\n    // Fire the callback immediately for testing\n    setTimeout(() => cb({ id: 'containerId' }), 100);\n    return NO_OP_DISPOSABLE;\n  });\n  await llamaStackManager.requestcreateLlamaStackContainerss({});\n  const tasks = await waitTasks(LLAMA_STACK_CONTAINER_TRACKINGID, 4);\n  expect(tasks.some(task => task.state === 'error')).toBeFalsy();\n});\n\ntest('requestcreateLlamaStackContainerss registers all local models', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  vi.mocked(containerEngine.listImages).mockResolvedValue([\n    { RepoTags: [llama_stack_images.default, llama_stack_playground_images.default] } as unknown as ImageInfo,\n  ]);\n  vi.mocked(containerEngine.createContainer).mockResolvedValue({\n    id: 'containerId',\n  } as unknown as ContainerCreateResult);\n  vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n    apiPort: 10000,\n  } as ExtensionConfiguration);\n  vi.mocked(utilsPorts.getFreeRandomPort).mockResolvedValueOnce(1234).mockResolvedValueOnce(5678);\n  vi.mocked(containerEngine.pullImage).mockResolvedValue();\n  vi.mocked(podmanConnection.execute).mockResolvedValue({ stdout: '', stderr: '', command: '' });\n  vi.mocked(containerRegistry.onHealthyContainerEvent).mockImplementation(cb => {\n    setTimeout(() => cb({ id: 'containerId' }), 100);\n    return NO_OP_DISPOSABLE;\n  });\n  vi.mocked(modelsManagerMock.getModelsInfo).mockReturnValue([\n    {\n      id: 'model1',\n      name: 'Model 1',\n      description: '',\n      file: { file: 'model1', path: '/path/to' },\n    },\n    {\n      id: 'model2',\n      name: 'Model 2',\n      description: '',\n      file: { file: 'model2', path: '/path/to' },\n    },\n    {\n      id: 'model3',\n      name: 'Model 3',\n      description: '',\n    },\n  ]);\n  await llamaStackManager.requestcreateLlamaStackContainerss({});\n  const tasks = await waitTasks(LLAMA_STACK_CONTAINER_TRACKINGID, 6);\n  expect(tasks.some(task => task.state === 'error')).toBeFalsy();\n  await vi.waitFor(() => {\n    expect(podmanConnection.execute).toHaveBeenCalledTimes(2);\n  });\n  expect(podmanConnection.execute).toHaveBeenCalledWith(expect.anything(), [\n    'exec',\n    'containerId',\n    'llama-stack-client',\n    'models',\n    'register',\n    'Model 1',\n    '--provider-id',\n    'podman-ai-lab',\n  ]);\n  expect(podmanConnection.execute).toHaveBeenCalledWith(expect.anything(), [\n    'exec',\n    'containerId',\n    'llama-stack-client',\n    'models',\n    'register',\n    'Model 2',\n    '--provider-id',\n    'podman-ai-lab',\n  ]);\n});\n\ntest('requestcreateLlamaStackContainerss creates playground container', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  vi.mocked(containerEngine.listImages).mockResolvedValue([\n    { RepoTags: [llama_stack_images.default, llama_stack_playground_images.default] } as unknown as ImageInfo,\n  ]);\n  vi.mocked(containerEngine.createContainer).mockResolvedValue({\n    id: 'containerId',\n  } as unknown as ContainerCreateResult);\n  vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n    apiPort: 10000,\n  } as ExtensionConfiguration);\n  vi.mocked(utilsPorts.getFreeRandomPort).mockResolvedValueOnce(1234).mockResolvedValueOnce(5678);\n  vi.mocked(containerEngine.pullImage).mockResolvedValue();\n  vi.mocked(podmanConnection.execute).mockResolvedValue({ stdout: '', stderr: '', command: '' });\n  vi.mocked(containerRegistry.onHealthyContainerEvent).mockImplementation(cb => {\n    setTimeout(() => cb({ id: 'containerId' }), 100);\n    return NO_OP_DISPOSABLE;\n  });\n  vi.mocked(modelsManagerMock.getModelsInfo).mockReturnValue([\n    {\n      id: 'model1',\n      name: 'Model 1',\n      description: '',\n      file: { file: 'model1', path: '/path/to' },\n    },\n    {\n      id: 'model2',\n      name: 'Model 2',\n      description: '',\n      file: { file: 'model2', path: '/path/to' },\n    },\n    {\n      id: 'model3',\n      name: 'Model 3',\n      description: '',\n    },\n  ]);\n  await llamaStackManager.requestcreateLlamaStackContainerss({});\n  const tasks = await waitTasks(LLAMA_STACK_CONTAINER_TRACKINGID, 7);\n  expect(tasks.some(task => task.state === 'error')).toBeFalsy();\n  expect(containerEngine.createContainer).toHaveBeenCalledTimes(2);\n  expect(containerEngine.createContainer).toHaveBeenNthCalledWith(\n    2,\n    undefined,\n    expect.objectContaining({\n      Env: ['LLAMA_STACK_ENDPOINT=http://host.containers.internal:1234'],\n      HostConfig: expect.objectContaining({\n        PortBindings: {\n          '8501/tcp': [\n            {\n              HostPort: '5678',\n            },\n          ],\n        },\n      }),\n    }),\n  );\n});\n\ntest('requestcreateLlamaStackContainerss starts both if server and playground exist', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  const server = { Id: 'serverId', Labels: { [LLAMA_STACK_API_PORT_LABEL]: '50000' } } as unknown as ContainerInfo;\n  const playground = {\n    Id: 'playgroundId',\n    Labels: { [LLAMA_STACK_PLAYGROUND_PORT_LABEL]: '60000' },\n  } as unknown as ContainerInfo;\n\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([server, playground]);\n  const startBothSpy = vi\n    .spyOn(llamaStackManager as unknown as { startBoth: () => Promise<void> }, 'startBoth')\n    .mockResolvedValue(undefined);\n\n  await llamaStackManager.requestcreateLlamaStackContainerss({});\n\n  expect(startBothSpy).toHaveBeenCalledWith(server, playground, expect.any(Object));\n});\n\ntest('requestcreateLlamaStackContainerss creates playground if server exists but playground missing', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  const server = { Id: 'serverId', Labels: { [LLAMA_STACK_API_PORT_LABEL]: '50000' } } as unknown as ContainerInfo;\n\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([server]);\n  const createPlaygroundSpy = vi\n    .spyOn(\n      llamaStackManager as unknown as { createPlaygroundFromServer: () => Promise<void> },\n      'createPlaygroundFromServer',\n    )\n    .mockResolvedValue(undefined);\n\n  await llamaStackManager.requestcreateLlamaStackContainerss({});\n\n  expect(createPlaygroundSpy).toHaveBeenCalledWith(server, expect.any(Object), expect.anything());\n});\n\ntest('requestcreateLlamaStackContainerss deletes existing playground and creates both if server missing', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  const playground = {\n    Id: 'playgroundId',\n    Labels: { [LLAMA_STACK_PLAYGROUND_PORT_LABEL]: '60000' },\n  } as unknown as ContainerInfo;\n\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([playground]);\n  const createBothSpy = vi\n    .spyOn(llamaStackManager as unknown as { createBoth: () => Promise<void> }, 'createBoth')\n    .mockResolvedValue(undefined);\n\n  await llamaStackManager.requestcreateLlamaStackContainerss({});\n\n  expect(createBothSpy).toHaveBeenCalledWith(playground, expect.any(Object), expect.anything());\n});\n\ntest('requestcreateLlamaStackContainerss creates both if server and playground missing', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n    name: 'Podman Machine',\n    vmType: VMType.UNKNOWN,\n    type: 'podman',\n    status: () => 'started',\n    endpoint: {\n      socketPath: 'socket.sock',\n    },\n  });\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  const createBothSpy = vi\n    .spyOn(llamaStackManager as unknown as { createBoth: () => Promise<void> }, 'createBoth')\n    .mockResolvedValue(undefined);\n\n  await llamaStackManager.requestcreateLlamaStackContainerss({});\n\n  expect(createBothSpy).toHaveBeenCalledWith(undefined, expect.any(Object), expect.anything());\n});\n\ntest('onPodmanConnectionEvent start event should call refreshLlamaStackContainers and set containerInfo', async () => {\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.spyOn(llamaStackManager, 'refreshLlamaStackContainers');\n  vi.mocked(containerEngine.listContainers).mockResolvedValueOnce([LLAMA_STACK_CONTAINER_RUNNING]);\n  vi.mocked(podmanConnection.onPodmanConnectionEvent).mockImplementation(f => {\n    f({\n      status: 'started',\n    });\n    return NO_OP_DISPOSABLE;\n  });\n\n  llamaStackManager.init();\n\n  expect(llamaStackManager.refreshLlamaStackContainers).toHaveBeenCalledWith();\n\n  await vi.waitFor(() => {\n    expect(llamaStackManager.getContainersInfo()).toEqual({\n      server: { containerId: 'dummyId', port: 50000, state: 'running' },\n      playground: undefined,\n    });\n  });\n});\n\ntest('onPodmanConnectionEvent stop event should call refreshLlamaStackContainers and clear containerInfo', async () => {\n  vi.spyOn(llamaStackManager, 'refreshLlamaStackContainers');\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.mocked(containerEngine.listContainers).mockResolvedValueOnce([LLAMA_STACK_CONTAINER_RUNNING]);\n  vi.mocked(podmanConnection.onPodmanConnectionEvent).mockReturnValue(NO_OP_DISPOSABLE);\n\n  llamaStackManager.init();\n  const listener = vi.mocked(podmanConnection.onPodmanConnectionEvent).mock.calls[0][0];\n  assert(listener, 'onPodmanConnectionEvent should have been called');\n\n  listener({ status: 'started' });\n\n  expect(llamaStackManager.refreshLlamaStackContainers).toHaveBeenCalledWith();\n  await vi.waitFor(() => {\n    expect(llamaStackManager.getContainersInfo()).toEqual({\n      server: { containerId: 'dummyId', port: 50000, state: 'running' },\n      playground: undefined,\n    });\n  });\n\n  vi.mocked(llamaStackManager.refreshLlamaStackContainers).mockClear();\n  vi.mocked(containerEngine.listContainers).mockResolvedValueOnce([LLAMA_STACK_CONTAINER_STOPPED]);\n\n  listener({ status: 'stopped' });\n\n  expect(llamaStackManager.refreshLlamaStackContainers).toHaveBeenCalledWith();\n  await vi.waitFor(async () => {\n    expect(llamaStackManager.getContainersInfo()).toEqual({ server: undefined, playground: undefined });\n  });\n});\n\ntest('onStartContainerEvent event should call refreshLlamaStackContainers and set containerInfo', async () => {\n  vi.spyOn(llamaStackManager, 'refreshLlamaStackContainers');\n  vi.mocked(containerEngine.listContainers).mockResolvedValue([]);\n  vi.mocked(containerEngine.listContainers).mockResolvedValueOnce([LLAMA_STACK_CONTAINER_RUNNING]);\n  vi.mocked(containerRegistry.onStartContainerEvent).mockImplementation(f => {\n    f({\n      id: 'dummyId',\n    });\n    return NO_OP_DISPOSABLE;\n  });\n\n  llamaStackManager.init();\n\n  expect(llamaStackManager.refreshLlamaStackContainers).toHaveBeenCalledWith();\n\n  await vi.waitFor(() => {\n    expect(llamaStackManager.getContainersInfo()).toEqual({\n      server: { containerId: 'dummyId', port: 50000, state: 'running' },\n      playground: undefined,\n    });\n  });\n});\n\ntest('onStopContainerEvent event should call refreshLlamaStackContainers and clear containerInfo', async () => {\n  vi.spyOn(llamaStackManager, 'refreshLlamaStackContainers');\n  vi.spyOn(taskRegistry, 'deleteByLabels');\n  vi.mocked(containerEngine.listContainers).mockResolvedValueOnce([LLAMA_STACK_CONTAINER_RUNNING]);\n  vi.mocked(containerRegistry.onStartContainerEvent).mockImplementation(f => {\n    f({\n      id: 'dummyId',\n    });\n    return NO_OP_DISPOSABLE;\n  });\n  vi.mocked(containerRegistry.onStopContainerEvent).mockReturnValue(NO_OP_DISPOSABLE);\n\n  llamaStackManager.init();\n\n  expect(llamaStackManager.refreshLlamaStackContainers).toHaveBeenCalledWith();\n\n  await vi.waitFor(() => {\n    expect(llamaStackManager.getContainersInfo()).toEqual({\n      server: { containerId: 'dummyId', port: 50000, state: 'running' },\n      playground: undefined,\n    });\n  });\n\n  vi.mocked(llamaStackManager.refreshLlamaStackContainers).mockClear();\n  vi.mocked(containerEngine.listContainers).mockResolvedValueOnce([LLAMA_STACK_CONTAINER_STOPPED]);\n\n  const listener = vi.mocked(containerRegistry.onStopContainerEvent).mock.calls[0][0];\n  assert(listener, 'onStopContainerEvent should have been called');\n\n  listener({ id: 'dummyId' });\n\n  expect(taskRegistry.deleteByLabels).toHaveBeenCalled();\n  await vi.waitFor(async () => {\n    expect(llamaStackManager.getContainersInfo()).toBeUndefined();\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/managers/llama-stack/llamaStackManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport {\n  containerEngine,\n  env,\n  process,\n  type ContainerInfo,\n  type Disposable,\n  type TelemetryLogger,\n  type ContainerProviderConnection,\n  type ContainerCreateOptions,\n  type ImageInfo,\n} from '@podman-desktop/api';\nimport type { PodmanConnection, PodmanConnectionEvent } from '../podmanConnection';\nimport llama_stack_images from '../../assets/llama-stack-images.json';\nimport llama_stack_playground_images from '../../assets/llama-stack-playground-images.json';\nimport { getImageInfo } from '../../utils/inferenceUtils';\nimport type { ContainerRegistry, ContainerEvent, ContainerHealthy } from '../../registries/ContainerRegistry';\nimport { DISABLE_SELINUX_LABEL_SECURITY_OPTION } from '../../utils/utils';\nimport { getRandomName } from '../../utils/randomUtils';\nimport type { LlamaStackContainerInfo, LlamaStackContainers } from '@shared/models/llama-stack/LlamaStackContainerInfo';\nimport { LLAMA_STACK_CONTAINER_TRACKINGID } from '@shared/models/llama-stack/LlamaStackContainerInfo';\nimport type { LlamaStackContainerConfiguration } from '@shared/models/llama-stack/LlamaStackContainerConfiguration';\nimport path from 'node:path';\nimport fs from 'node:fs/promises';\nimport type { ConfigurationRegistry } from '../../registries/ConfigurationRegistry';\nimport { getFreeRandomPort } from '../../utils/ports';\nimport { TaskRunner } from '../TaskRunner';\nimport type { ModelsManager } from '../modelsManager';\nimport { getPodmanCli, getPodmanMachineName } from '../../utils/podman';\n\nexport const LLAMA_STACK_CONTAINER_LABEL = 'ai-lab-llama-stack-container';\nexport const LLAMA_STACK_API_PORT_LABEL = 'ai-lab-llama-stack-api-port';\nexport const LLAMA_STACK_PLAYGROUND_PORT_LABEL = 'ai-lab-llama-stack-playground-port';\nexport const SECOND: number = 1_000_000_000;\n\n/*\n * Get the local IP address of the Podman machine.\n * See https://learn.microsoft.com/en-us/windows/wsl/networking\n */\nasync function getLocalIPAddress(connection: ContainerProviderConnection): Promise<string> {\n  const cli = getPodmanCli();\n  const machineName = getPodmanMachineName(connection);\n  const result = await process.exec(cli, [\n    'machine',\n    'ssh',\n    machineName,\n    'ip',\n    'route',\n    'show',\n    '|',\n    'grep',\n    '-i',\n    'default',\n    '|',\n    'awk',\n    // eslint-disable-next-line quotes\n    \"'{print $3}'\",\n  ]);\n  return result.stdout.trim();\n}\n\nexport class LlamaStackManager implements Disposable {\n  #initialized: boolean;\n  #stack_containers: LlamaStackContainers | undefined;\n  #creationInProgress = false;\n  #disposables: Disposable[];\n  #taskRunner: TaskRunner;\n\n  constructor(\n    private readonly appUserDirectory: string,\n    private taskRegistry: TaskRegistry,\n    private podmanConnection: PodmanConnection,\n    private containerRegistry: ContainerRegistry,\n    private configurationRegistry: ConfigurationRegistry,\n    private telemetryLogger: TelemetryLogger,\n    private modelsManager: ModelsManager,\n  ) {\n    this.#initialized = false;\n    this.#disposables = [];\n    this.#taskRunner = new TaskRunner(this.taskRegistry);\n  }\n\n  init(): void {\n    this.#disposables.push(this.podmanConnection.onPodmanConnectionEvent(this.watchMachineEvent.bind(this)));\n    this.#disposables.push(this.containerRegistry.onStartContainerEvent(this.onStartContainerEvent.bind(this)));\n    this.#disposables.push(this.containerRegistry.onStopContainerEvent(this.onStopContainerEvent.bind(this)));\n  }\n\n  dispose(): void {\n    this.#disposables.forEach(disposable => disposable.dispose());\n    this.#disposables = [];\n  }\n\n  private async watchMachineEvent(event: PodmanConnectionEvent): Promise<void> {\n    if (\n      (event.status === 'started' && (!this.#stack_containers?.server || !this.#stack_containers?.playground)) ||\n      (event.status === 'stopped' && (this.#stack_containers?.server || this.#stack_containers?.playground))\n    ) {\n      await this.refreshLlamaStackContainers();\n    }\n  }\n\n  private async onStartContainerEvent(): Promise<void> {\n    await this.refreshLlamaStackContainers();\n  }\n\n  private async onStopContainerEvent(event: ContainerEvent): Promise<void> {\n    const serverId = this.#stack_containers?.server?.containerId;\n    const playgroundId = this.#stack_containers?.playground?.containerId;\n    if (this.#creationInProgress) return;\n\n    if (serverId === event.id || playgroundId === event.id) {\n      this.#stack_containers = undefined;\n      this.taskRegistry.deleteByLabels({ trackingId: LLAMA_STACK_CONTAINER_TRACKINGID });\n    }\n\n    await this.refreshLlamaStackContainers();\n  }\n\n  /**\n   * getLlamaStackContainers returns the first running container with a Llama Stack label.\n   * The container is searched only the first time and the result is cached for subsequent calls.\n   *\n   * Returns undefined if no container is found\n   */\n  async getLlamaStackContainers(): Promise<LlamaStackContainers | undefined> {\n    if (!this.#initialized) {\n      await this.refreshLlamaStackContainers();\n      this.#initialized = true;\n    }\n    return this.#stack_containers;\n  }\n\n  /**\n   * refreshLlamaStackContainers refreshes the container info.\n   * It is called when the machine is started or when a container is stopped.\n   */\n  protected async refreshLlamaStackContainers(): Promise<void> {\n    const containers = await containerEngine.listContainers();\n\n    const serverContainer = containers.find(c => c.Labels && LLAMA_STACK_API_PORT_LABEL in c.Labels);\n    let serverInfo: LlamaStackContainerInfo | undefined;\n\n    if (serverContainer) {\n      serverInfo = {\n        containerId: serverContainer.Id,\n        port: parseInt(serverContainer.Labels[LLAMA_STACK_API_PORT_LABEL], 10),\n        state: serverContainer.State,\n      };\n    }\n\n    const playgroundContainer = containers.find(c => c.Labels && LLAMA_STACK_PLAYGROUND_PORT_LABEL in c.Labels);\n    let playgroundInfo: LlamaStackContainerInfo | undefined;\n\n    if (playgroundContainer) {\n      playgroundInfo = {\n        containerId: playgroundContainer.Id,\n        port: parseInt(playgroundContainer.Labels[LLAMA_STACK_PLAYGROUND_PORT_LABEL], 10),\n        state: playgroundContainer.State,\n      };\n    }\n\n    this.#stack_containers = {\n      server: serverInfo,\n      playground: playgroundInfo,\n    };\n  }\n\n  /**\n   * requestcreateLlamaStackContainerss creates the Llama Stack containers.\n   * It is called when the user clicks the \"Start\" button.\n   *\n   * Flowchart for checking containers and handling them:\n   *\n   * Server exists\n   *   ├─ Playground exists\n   *   │    └─ Start both\n   *   └─ Playground doesn't exist\n   *        └─ Create new playground\n   *\n   * Server doesn't exist\n   *   ├─ Playground exists\n   *   │    └─ Delete playground and update state\n   *   └─ Playground doesn't exist\n   *        └─ Create both\n   */\n  async requestcreateLlamaStackContainerss(config: LlamaStackContainerConfiguration): Promise<void> {\n    const connection: ContainerProviderConnection | undefined = config.connection\n      ? this.podmanConnection.getContainerProviderConnection(config.connection)\n      : this.podmanConnection.findRunningContainerProviderConnection();\n\n    if (!connection) throw new Error('Cannot find running container provider connection');\n\n    const labels = { trackingId: LLAMA_STACK_CONTAINER_TRACKINGID };\n    const containers = await containerEngine.listContainers();\n    const server = containers.find(c => c.Labels && LLAMA_STACK_API_PORT_LABEL in c.Labels);\n    const playground = containers.find(c => c.Labels && LLAMA_STACK_PLAYGROUND_PORT_LABEL in c.Labels);\n\n    try {\n      if (server) {\n        if (playground) {\n          await this.startBoth(server, playground, labels);\n        } else {\n          await this.createPlaygroundFromServer(server, labels, connection);\n        }\n      } else {\n        this.#creationInProgress = true;\n        await this.createBoth(playground, labels, connection);\n        this.#creationInProgress = false;\n      }\n    } catch (err) {\n      this.telemetryLogger.logError('llamaStack.startContainer', { error: err });\n    }\n  }\n\n  /**\n   * Helper: Both server and playground exist → start both\n   */\n  private async startBoth(\n    server: ContainerInfo,\n    playground: ContainerInfo,\n    labels: { [p: string]: string },\n  ): Promise<void> {\n    await this.#taskRunner.runAsTask(\n      labels,\n      {\n        loadingLabel: 'Starting Server and/or Playground',\n        errorMsg: err => `Failed to start existing containers: ${String(err)}`,\n      },\n      async ({ updateLabels }) => {\n        if (server.State !== 'running') await containerEngine.startContainer(server.engineId, server.Id);\n        if (playground.State !== 'running') await containerEngine.startContainer(playground.engineId, playground.Id);\n\n        const serverInfo = await this.waitLlamaStackServerHealthy(\n          {\n            containerId: server.Id,\n            port: parseInt(server.Labels[LLAMA_STACK_API_PORT_LABEL], 10),\n            state: server.State,\n          },\n          labels,\n        );\n\n        this.#stack_containers = {\n          server: serverInfo,\n          playground: {\n            containerId: playground.Id,\n            port: parseInt(playground.Labels[LLAMA_STACK_PLAYGROUND_PORT_LABEL], 10),\n            state: 'running',\n          },\n        };\n\n        updateLabels(l => ({\n          ...l,\n          containerId: serverInfo.containerId,\n          port: `${serverInfo.port}`,\n          state: serverInfo.state,\n          playgroundId: playground.Id,\n          playgroundPort: `${parseInt(playground.Labels[LLAMA_STACK_PLAYGROUND_PORT_LABEL], 10)}`,\n          playgroundState: 'running',\n        }));\n\n        this.telemetryLogger.logUsage('llamaStack.startContainer');\n      },\n    );\n  }\n\n  /**\n   * Helper: Only server exists → create playground\n   */\n  private async createPlaygroundFromServer(\n    server: ContainerInfo,\n    labels: { [p: string]: string },\n    connection: ContainerProviderConnection,\n  ): Promise<void> {\n    await this.#taskRunner.runAsTask(\n      labels,\n      {\n        loadingLabel: 'Creating Playground container',\n        errorMsg: err => `Failed to create playground: ${String(err)}`,\n      },\n      async ({ updateLabels }) => {\n        if (server.State !== 'running') await containerEngine.startContainer(server.engineId, server.Id);\n\n        const serverInfo = await this.waitLlamaStackServerHealthy(\n          {\n            containerId: server.Id,\n            port: parseInt(server.Labels[LLAMA_STACK_API_PORT_LABEL], 10),\n            state: server.State,\n          },\n          labels,\n        );\n\n        const playgroundInfo = await this.createPlaygroundContainer(serverInfo, labels, connection);\n\n        this.#stack_containers = { server: serverInfo, playground: playgroundInfo };\n\n        updateLabels(l => ({\n          ...l,\n          containerId: serverInfo.containerId,\n          port: `${serverInfo.port}`,\n          state: serverInfo.state,\n          playgroundId: playgroundInfo.containerId,\n          playgroundPort: `${playgroundInfo.port}`,\n          playgroundState: playgroundInfo.state,\n        }));\n\n        this.telemetryLogger.logUsage('llamaStack.startContainer');\n      },\n    );\n  }\n\n  /**\n   * Helper: Only playground exists → delete it and create both containers\n   */\n  private async createBoth(\n    playground: ContainerInfo | undefined,\n    labels: { [p: string]: string },\n    connection: ContainerProviderConnection,\n  ): Promise<void> {\n    await this.#taskRunner.runAsTask(\n      labels,\n      {\n        loadingLabel: 'Creating Server and Playground',\n        errorMsg: err => `Failed to create Llama Stack containers: ${String(err)}`,\n        failFastSubtasks: true,\n      },\n      async ({ updateLabels }) => {\n        // If playground exists, stop & delete it\n        if (playground) {\n          if (playground.State === 'running') {\n            await containerEngine.stopContainer(playground.engineId, playground.Id);\n          }\n          await containerEngine.deleteContainer(playground.engineId, playground.Id);\n        }\n\n        // Create new server + playground\n        const stackInfo = await this.createLlamaStackContainers(connection, labels);\n        this.#stack_containers = stackInfo;\n\n        // Update task labels for UI\n        updateLabels(l => ({\n          ...l,\n          containerId: stackInfo.server?.containerId ?? '',\n          port: `${stackInfo.server?.port}`,\n          state: stackInfo.server?.state ?? '',\n          playgroundId: stackInfo.playground?.containerId ?? '',\n          playgroundPort: `${stackInfo.playground?.port}`,\n          playgroundState: stackInfo.playground?.state ?? '',\n        }));\n\n        this.telemetryLogger.logUsage('llamaStack.startContainer');\n      },\n    );\n  }\n  async createLlamaStackContainers(\n    connection: ContainerProviderConnection,\n    labels: { [p: string]: string },\n  ): Promise<LlamaStackContainers> {\n    const image = llama_stack_images.default;\n    const imageInfo = await this.#taskRunner.runAsTask<ImageInfo>(\n      labels,\n      {\n        loadingLabel: `Pulling ${image}.`,\n        errorMsg: err => `Something went wrong while pulling ${image}: ${String(err)}`,\n      },\n      () => getImageInfo(connection, image, () => {}),\n    );\n\n    // Create the server container\n    let serverInfo = await this.createServerContainer(connection, image, imageInfo, labels);\n    serverInfo = await this.waitLlamaStackServerHealthy(serverInfo, labels);\n    serverInfo = await this.registerModels(serverInfo, labels, connection);\n    const playgroundInfo = await this.createPlaygroundContainer(serverInfo, labels, connection);\n\n    // Return both in proper interface\n    return {\n      server: serverInfo,\n      playground: playgroundInfo,\n    };\n  }\n\n  private async createServerContainer(\n    connection: ContainerProviderConnection,\n    image: string,\n    imageInfo: ImageInfo,\n    labels: { [p: string]: string },\n  ): Promise<LlamaStackContainerInfo> {\n    const folder = await this.getLlamaStackContainersFolder();\n\n    const aiLabApiHost =\n      env.isWindows && connection.vmType === 'wsl' ? await getLocalIPAddress(connection) : 'host.docker.internal';\n    const aiLabApiPort = this.configurationRegistry.getExtensionConfiguration().apiPort;\n    const llamaStackApiPort = await getFreeRandomPort('0.0.0.0');\n\n    const createContainerOptions: ContainerCreateOptions = {\n      Image: imageInfo.Id,\n      name: getRandomName('llama-stack'),\n      Labels: {\n        [LLAMA_STACK_CONTAINER_LABEL]: image,\n        [LLAMA_STACK_API_PORT_LABEL]: `${llamaStackApiPort}`,\n      },\n      HostConfig: {\n        AutoRemove: false,\n        SecurityOpt: [DISABLE_SELINUX_LABEL_SECURITY_OPTION],\n        Mounts: [\n          {\n            Target: '/app/.llama',\n            Source: path.join(folder, '.llama'),\n            Type: 'bind',\n          },\n        ],\n        UsernsMode: 'keep-id:uid=0,gid=0',\n        PortBindings: { '8321/tcp': [{ HostPort: `${llamaStackApiPort}` }] },\n      },\n      Env: [`PODMAN_AI_LAB_URL=http://${aiLabApiHost}:${aiLabApiPort}`],\n      OpenStdin: true,\n      start: true,\n      HealthCheck: {\n        Test: ['CMD-SHELL', `curl -sSf localhost:8321/v1/models > /dev/null`],\n        Interval: SECOND * 5,\n        Retries: 20,\n      },\n    };\n\n    return this.#taskRunner.runAsTask<LlamaStackContainerInfo>(\n      labels,\n      {\n        loadingLabel: 'Starting Llama Stack server container',\n        errorMsg: err => `Something went wrong while creating server container: ${String(err)}`,\n      },\n      async () => {\n        const { id } = await containerEngine.createContainer(imageInfo.engineId, createContainerOptions);\n        return {\n          containerId: id,\n          port: llamaStackApiPort,\n          state: 'starting',\n        };\n      },\n    );\n  }\n\n  async waitLlamaStackServerHealthy(\n    serverInfo: LlamaStackContainerInfo,\n    labels: { [p: string]: string },\n  ): Promise<LlamaStackContainerInfo> {\n    return this.#taskRunner.runAsTask<LlamaStackContainerInfo>(\n      labels,\n      {\n        loadingLabel: 'Waiting for Llama Stack server to be healthy',\n        errorMsg: err => `Something went wrong while checking server health: ${String(err)}`,\n      },\n      () =>\n        new Promise((resolve, _reject) => {\n          const disposable = this.containerRegistry.onHealthyContainerEvent((event: ContainerHealthy) => {\n            if (event.id !== serverInfo.containerId) return;\n\n            disposable.dispose();\n            serverInfo.state = 'running';\n            this.telemetryLogger.logUsage('llamaStack.startContainer');\n            resolve(serverInfo);\n          });\n        }),\n    );\n  }\n\n  async registerModels(\n    serverInfo: LlamaStackContainerInfo,\n    labels: { [p: string]: string },\n    connection: ContainerProviderConnection,\n  ): Promise<LlamaStackContainerInfo> {\n    for (const model of this.modelsManager.getModelsInfo().filter(model => model.file)) {\n      await this.#taskRunner.runAsTask(\n        labels,\n        {\n          loadingLabel: `Registering model ${model.name}`,\n          errorMsg: err => `Something went wrong while registering model: ${String(err)}`,\n        },\n        async () => {\n          await this.podmanConnection.execute(connection, [\n            'exec',\n            serverInfo.containerId,\n            'llama-stack-client',\n            'models',\n            'register',\n            model.name,\n            '--provider-id',\n            'podman-ai-lab',\n          ]);\n        },\n      );\n    }\n    return serverInfo;\n  }\n\n  private async createPlaygroundContainer(\n    serverInfo: LlamaStackContainerInfo,\n    labels: { [p: string]: string },\n    connection: ContainerProviderConnection,\n  ): Promise<LlamaStackContainerInfo> {\n    const image = llama_stack_playground_images.default;\n    const imageInfo = await this.#taskRunner.runAsTask<ImageInfo>(\n      labels,\n      {\n        loadingLabel: `Pulling ${image}.`,\n        errorMsg: err => `Something went wrong while pulling ${image}: ${String(err)}`,\n      },\n      () => getImageInfo(connection, image, () => {}),\n    );\n\n    const playgroundPort = await getFreeRandomPort('0.0.0.0');\n\n    const createContainerOptions: ContainerCreateOptions = {\n      Image: imageInfo.Id,\n      name: getRandomName('llama-stack-playground'),\n      Labels: {\n        [LLAMA_STACK_CONTAINER_LABEL]: image,\n        [LLAMA_STACK_PLAYGROUND_PORT_LABEL]: `${playgroundPort}`,\n      },\n      HostConfig: {\n        AutoRemove: false,\n        PortBindings: { '8501/tcp': [{ HostPort: `${playgroundPort}` }] },\n      },\n      Env: [`LLAMA_STACK_ENDPOINT=http://host.containers.internal:${serverInfo.port}`],\n      OpenStdin: true,\n      start: true,\n    };\n\n    return this.#taskRunner.runAsTask<LlamaStackContainerInfo>(\n      labels,\n      {\n        loadingLabel: 'Starting Llama Stack Playground container',\n        errorMsg: err => `Something went wrong while creating playground container: ${String(err)}`,\n      },\n      async () => {\n        const { id } = await containerEngine.createContainer(imageInfo.engineId, createContainerOptions);\n        return {\n          containerId: id,\n          port: playgroundPort,\n          state: 'running',\n        };\n      },\n    );\n  }\n\n  private async getLlamaStackContainersFolder(): Promise<string> {\n    const llamaStackPath = path.join(this.appUserDirectory, 'llama-stack', 'container');\n    await fs.mkdir(path.join(llamaStackPath, '.llama'), { recursive: true });\n    return llamaStackPath;\n  }\n\n  // For tests only\n  protected getContainersInfo(): LlamaStackContainers | undefined {\n    return this.#stack_containers;\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/modelsManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { type MockInstance, beforeEach, describe, expect, test, vi } from 'vitest';\nimport os from 'node:os';\nimport fs, { type Stats, type PathLike } from 'node:fs';\nimport path from 'node:path';\nimport { ModelsManager } from './modelsManager';\nimport { env, process as coreProcess } from '@podman-desktop/api';\nimport type { RunResult, TelemetryLogger, ContainerProviderConnection } from '@podman-desktop/api';\nimport type { CatalogManager } from './catalogManager';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport * as utils from '../utils/utils';\nimport { TaskRegistry } from '../registries/TaskRegistry';\nimport type { CancellationTokenRegistry } from '../registries/CancellationTokenRegistry';\nimport * as sha from '../utils/sha';\nimport type { GGUFParseOutput } from '@huggingface/gguf';\nimport { gguf } from '@huggingface/gguf';\nimport type { PodmanConnection } from './podmanConnection';\nimport { VMType } from '@shared/models/IPodman';\nimport { getPodmanMachineName } from '../utils/podman';\nimport type { ConfigurationRegistry } from '../registries/ConfigurationRegistry';\nimport { Uploader } from '../utils/uploader';\nimport { ModelHandlerRegistry } from '../registries/ModelHandlerRegistry';\nimport { URLModelHandler } from '../models/URLModelHandler';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { MSG_NEW_MODELS_STATE } from '@shared/Messages';\n\nconst mocks = vi.hoisted(() => {\n  return {\n    showErrorMessageMock: vi.fn(),\n    logUsageMock: vi.fn(),\n    logErrorMock: vi.fn(),\n    performDownloadMock: vi.fn(),\n    onEventDownloadMock: vi.fn(),\n    getTargetMock: vi.fn(),\n    getDownloaderCompleter: vi.fn(),\n    isCompletionEventMock: vi.fn(),\n    getPodmanCliMock: vi.fn(),\n  };\n});\n\nvi.mock('../utils/uploader', () => ({\n  Uploader: vi.fn(),\n}));\n\nvi.mock('@huggingface/gguf', () => ({\n  gguf: vi.fn(),\n}));\n\nvi.mock('../utils/podman', () => ({\n  getPodmanCli: mocks.getPodmanCliMock,\n  getPodmanMachineName: vi.fn(),\n}));\n\nvi.mock('@podman-desktop/api', () => {\n  return {\n    Disposable: {\n      create: vi.fn(),\n    },\n    env: {\n      isWindows: false,\n    },\n    process: {\n      exec: vi.fn(),\n    },\n    fs: {\n      createFileSystemWatcher: (): unknown => ({\n        onDidCreate: vi.fn(),\n        onDidDelete: vi.fn(),\n        onDidChange: vi.fn(),\n      }),\n    },\n    window: {\n      showErrorMessage: mocks.showErrorMessageMock,\n    },\n    EventEmitter: vi.fn(),\n  };\n});\n\nvi.mock('../utils/downloader', () => ({\n  isCompletionEvent: mocks.isCompletionEventMock,\n  Downloader: class {\n    get completed(): boolean {\n      return mocks.getDownloaderCompleter();\n    }\n    onEvent = mocks.onEventDownloadMock;\n    perform = mocks.performDownloadMock;\n    getTarget = mocks.getTargetMock;\n  },\n}));\n\nconst podmanConnectionMock = {\n  getContainerProviderConnections: vi.fn(),\n} as unknown as PodmanConnection;\n\nconst cancellationTokenRegistryMock = {\n  createCancellationTokenSource: vi.fn(),\n} as unknown as CancellationTokenRegistry;\n\nlet taskRegistry: TaskRegistry;\n\nconst telemetryLogger = {\n  logUsage: mocks.logUsageMock,\n  logError: mocks.logErrorMock,\n} as unknown as TelemetryLogger;\n\nconst configurationRegistryMock: ConfigurationRegistry = {\n  getExtensionConfiguration: vi.fn(),\n} as unknown as ConfigurationRegistry;\n\nlet modelHandlerRegistry: ModelHandlerRegistry;\n\nconst rpcExtensionMock = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(rpcExtensionMock.fire).mockResolvedValue(true);\n  taskRegistry = new TaskRegistry(rpcExtensionMock);\n  modelHandlerRegistry = new ModelHandlerRegistry(rpcExtensionMock);\n\n  vi.mocked(configurationRegistryMock.getExtensionConfiguration).mockReturnValue({\n    modelUploadDisabled: false,\n    modelsPath: '~/downloads',\n    experimentalTuning: false,\n    apiPort: 0,\n    inferenceRuntime: 'llama-cpp',\n    experimentalGPU: false,\n    showGPUPromotion: false,\n    appearance: 'dark',\n  });\n\n  mocks.isCompletionEventMock.mockReturnValue(true);\n});\n\nconst dirent = [\n  {\n    isDirectory: (): boolean => true,\n    parentPath: '/home/user/appstudio-dir',\n    name: 'model-id-1',\n  },\n  {\n    isDirectory: (): boolean => true,\n    parentPath: '/home/user/appstudio-dir',\n    name: 'model-id-2',\n  },\n  {\n    isDirectory: (): boolean => false,\n    parentPath: '/home/user/appstudio-dir',\n    name: 'other-file-should-be-ignored.txt',\n  },\n] as fs.Dirent[];\n\nfunction mockFiles(now: Date): void {\n  vi.spyOn(os, 'homedir').mockReturnValue('/home/user');\n  const existsSyncSpy = vi.spyOn(fs, 'existsSync');\n  existsSyncSpy.mockImplementation((path: PathLike) => {\n    if (process.platform === 'win32') {\n      expect(path).toBe('C:\\\\home\\\\user\\\\aistudio\\\\models');\n    } else {\n      expect(path).toBe('/home/user/aistudio/models');\n    }\n    return true;\n  });\n  const statSpy = vi.spyOn(fs.promises, 'stat');\n  const info: Stats = {} as Stats;\n  info.size = 32000;\n  info.mtime = now;\n  statSpy.mockResolvedValue(info);\n  const readdirMock = vi.spyOn(fs.promises, 'readdir') as unknown as MockInstance<\n    (path: string) => Promise<string[] | fs.Dirent[]>\n  >;\n  readdirMock.mockImplementation((dir: string) => {\n    if (dir.endsWith('model-id-1') || dir.endsWith('model-id-2')) {\n      const base = path.basename(dir);\n      return Promise.resolve([base + '-model']);\n    } else {\n      return Promise.resolve(dirent);\n    }\n  });\n}\n\ntest('getModelsInfo should get models in local directory', async () => {\n  const now = new Date();\n  mockFiles(now);\n  let modelsDir: string;\n  if (process.platform === 'win32') {\n    modelsDir = 'C:\\\\home\\\\user\\\\aistudio\\\\models';\n  } else {\n    modelsDir = '/home/user/aistudio/models';\n  }\n  const manager = new ModelsManager(\n    rpcExtensionMock,\n    {\n      getModels(): ModelInfo[] {\n        return [\n          { id: 'model-id-1', name: 'model-id-1-model' } as ModelInfo,\n          { id: 'model-id-2', name: 'model-id-2-model' } as ModelInfo,\n        ];\n      },\n      onUpdate: vi.fn(),\n    } as unknown as CatalogManager,\n    telemetryLogger,\n    taskRegistry,\n    cancellationTokenRegistryMock,\n    podmanConnectionMock,\n    configurationRegistryMock,\n    modelHandlerRegistry,\n  );\n  modelHandlerRegistry.register(new URLModelHandler(manager, modelsDir));\n  await manager.init();\n  await manager.loadLocalModels();\n  expect(manager.getModelsInfo()).toEqual([\n    {\n      id: 'model-id-1',\n      name: 'model-id-1-model',\n      file: {\n        size: 32000,\n        creation: now,\n        path: path.resolve(dirent[0].parentPath, dirent[0].name),\n        file: 'model-id-1-model',\n      },\n    },\n    {\n      id: 'model-id-2',\n      name: 'model-id-2-model',\n      file: {\n        size: 32000,\n        creation: now,\n        path: path.resolve(dirent[1].parentPath, dirent[1].name),\n        file: 'model-id-2-model',\n      },\n    },\n  ]);\n});\n\ntest('getModelsInfo should return an empty array if the models folder does not exist', async () => {\n  vi.spyOn(os, 'homedir').mockReturnValue('/home/user');\n  const existsSyncSpy = vi.spyOn(fs, 'existsSync');\n  existsSyncSpy.mockReturnValue(false);\n  let modelsDir: string;\n  if (process.platform === 'win32') {\n    modelsDir = 'C:\\\\home\\\\user\\\\aistudio\\\\models';\n  } else {\n    modelsDir = '/home/user/aistudio/models';\n  }\n  const manager = new ModelsManager(\n    rpcExtensionMock,\n    {\n      getModels(): ModelInfo[] {\n        return [];\n      },\n      onUpdate: vi.fn(),\n    } as unknown as CatalogManager,\n    telemetryLogger,\n    taskRegistry,\n    cancellationTokenRegistryMock,\n    podmanConnectionMock,\n    configurationRegistryMock,\n    modelHandlerRegistry,\n  );\n  modelHandlerRegistry.register(new URLModelHandler(manager, modelsDir));\n  await manager.init();\n  await manager.getLocalModelsFromDisk();\n  expect(manager.getModelsInfo()).toEqual([]);\n  if (process.platform === 'win32') {\n    expect(existsSyncSpy).toHaveBeenCalledWith('C:\\\\home\\\\user\\\\aistudio\\\\models');\n  } else {\n    expect(existsSyncSpy).toHaveBeenCalledWith('/home/user/aistudio/models');\n  }\n});\n\ntest('getLocalModelsFromDisk should return undefined Date and size when stat fail', async () => {\n  const now = new Date();\n  mockFiles(now);\n  const statSpy = vi.spyOn(fs.promises, 'stat') as unknown as MockInstance<(path: PathLike) => Promise<Stats>>;\n  statSpy.mockImplementation((path: PathLike) => {\n    if (`${path}`.endsWith('model-id-1')) throw new Error('random-error');\n    return Promise.resolve({ isDirectory: () => true } as Stats);\n  });\n\n  let modelsDir: string;\n  if (process.platform === 'win32') {\n    modelsDir = 'C:\\\\home\\\\user\\\\aistudio\\\\models';\n  } else {\n    modelsDir = '/home/user/aistudio/models';\n  }\n  const manager = new ModelsManager(\n    rpcExtensionMock,\n    {\n      getModels(): ModelInfo[] {\n        return [{ id: 'model-id-1', name: 'model-id-1-model' } as ModelInfo];\n      },\n      onUpdate: vi.fn(),\n    } as unknown as CatalogManager,\n    telemetryLogger,\n    taskRegistry,\n    cancellationTokenRegistryMock,\n    podmanConnectionMock,\n    configurationRegistryMock,\n    modelHandlerRegistry,\n  );\n  modelHandlerRegistry.register(new URLModelHandler(manager, modelsDir));\n  await manager.init();\n  await manager.loadLocalModels();\n  expect(manager.getModelsInfo()).toEqual([\n    {\n      id: 'model-id-1',\n      name: 'model-id-1-model',\n      file: {\n        size: undefined,\n        creation: undefined,\n        path: path.resolve(dirent[0].parentPath, dirent[0].name),\n        file: 'model-id-1-model',\n      },\n    },\n  ]);\n});\n\ntest('getLocalModelsFromDisk should skip folders containing tmp files', async () => {\n  const now = new Date();\n  mockFiles(now);\n  const statSpy = vi.spyOn(fs.promises, 'stat') as unknown as MockInstance<(path: PathLike) => Promise<Stats>>;\n  statSpy.mockImplementation((path: PathLike) => {\n    if (`${path}`.endsWith('model-id-1')) throw new Error('random-error');\n    return Promise.resolve({ isDirectory: () => true } as Stats);\n  });\n\n  const readdirMock = vi.spyOn(fs.promises, 'readdir') as unknown as MockInstance<\n    (path: string) => Promise<string[] | fs.Dirent[]>\n  >;\n  readdirMock.mockImplementation((dir: string) => {\n    if (dir.endsWith('model-id-1') || dir.endsWith('model-id-2')) {\n      const base = path.basename(dir);\n      return Promise.resolve([base + '-model.tmp']);\n    } else {\n      return Promise.resolve(dirent);\n    }\n  });\n\n  let modelsDir: string;\n  if (process.platform === 'win32') {\n    modelsDir = 'C:\\\\home\\\\user\\\\aistudio\\\\models';\n  } else {\n    modelsDir = '/home/user/aistudio/models';\n  }\n  const manager = new ModelsManager(\n    rpcExtensionMock,\n    {\n      getModels(): ModelInfo[] {\n        return [{ id: 'model-id-1', name: 'model-id-1-model' } as ModelInfo];\n      },\n      onUpdate: vi.fn(),\n    } as unknown as CatalogManager,\n    telemetryLogger,\n    taskRegistry,\n    cancellationTokenRegistryMock,\n    podmanConnectionMock,\n    configurationRegistryMock,\n    modelHandlerRegistry,\n  );\n  modelHandlerRegistry.register(new URLModelHandler(manager, modelsDir));\n  await manager.init();\n  await manager.loadLocalModels();\n  expect(manager.getModelsInfo()).toEqual([\n    {\n      id: 'model-id-1',\n      name: 'model-id-1-model',\n    },\n  ]);\n});\n\ntest('loadLocalModels should post a message with the message on disk and on catalog', async () => {\n  const now = new Date();\n  mockFiles(now);\n\n  let modelsDir: string;\n  if (process.platform === 'win32') {\n    modelsDir = 'C:\\\\home\\\\user\\\\aistudio\\\\models';\n  } else {\n    modelsDir = '/home/user/aistudio/models';\n  }\n  const manager = new ModelsManager(\n    rpcExtensionMock,\n    {\n      getModels: () => {\n        return [\n          {\n            id: 'model-id-1',\n          },\n        ] as ModelInfo[];\n      },\n      onUpdate: vi.fn(),\n    } as unknown as CatalogManager,\n    telemetryLogger,\n    taskRegistry,\n    cancellationTokenRegistryMock,\n    podmanConnectionMock,\n    configurationRegistryMock,\n    modelHandlerRegistry,\n  );\n  modelHandlerRegistry.register(new URLModelHandler(manager, modelsDir));\n  await manager.init();\n  await manager.loadLocalModels();\n  expect(rpcExtensionMock.fire).toHaveBeenNthCalledWith(2, MSG_NEW_MODELS_STATE, [\n    {\n      file: {\n        creation: now,\n        file: 'model-id-1-model',\n        size: 32000,\n        path: path.resolve(dirent[0].parentPath, dirent[0].name),\n      },\n      id: 'model-id-1',\n    },\n  ]);\n});\n\ntest('deleteModel deletes the model folder', async () => {\n  let modelsDir: string;\n  if (process.platform === 'win32') {\n    modelsDir = 'C:\\\\home\\\\user\\\\aistudio\\\\models';\n  } else {\n    modelsDir = '/home/user/aistudio/models';\n  }\n  const now = new Date();\n  mockFiles(now);\n  const rmSpy = vi.spyOn(fs.promises, 'rm');\n  rmSpy.mockResolvedValue();\n  const manager = new ModelsManager(\n    rpcExtensionMock,\n    {\n      getModels: () => {\n        return [\n          {\n            id: 'model-id-1',\n            url: 'https:///model-url',\n          },\n        ] as ModelInfo[];\n      },\n      onUpdate: vi.fn(),\n    } as unknown as CatalogManager,\n    telemetryLogger,\n    taskRegistry,\n    cancellationTokenRegistryMock,\n    podmanConnectionMock,\n    configurationRegistryMock,\n    modelHandlerRegistry,\n  );\n  modelHandlerRegistry.register(new URLModelHandler(manager, modelsDir));\n  await manager.init();\n  await manager.loadLocalModels();\n  await manager.deleteModel('model-id-1');\n  // check that the model's folder is removed from disk\n  if (process.platform === 'win32') {\n    expect(rmSpy).toBeCalledWith('C:\\\\home\\\\user\\\\aistudio\\\\models\\\\model-id-1', {\n      recursive: true,\n      force: true,\n      maxRetries: 3,\n    });\n  } else {\n    expect(rmSpy).toBeCalledWith('/home/user/aistudio/models/model-id-1', {\n      recursive: true,\n      force: true,\n      maxRetries: 3,\n    });\n  }\n  expect(rpcExtensionMock.fire).toHaveBeenCalledTimes(5);\n  // check that a new state is sent with the model removed\n  expect(rpcExtensionMock.fire).toHaveBeenNthCalledWith(4, MSG_NEW_MODELS_STATE, [\n    {\n      id: 'model-id-1',\n      url: 'https:///model-url',\n    },\n  ]);\n  expect(mocks.logUsageMock).toHaveBeenNthCalledWith(1, 'model.delete', { 'model.id': expect.any(String) });\n});\n\ndescribe('deleting models', () => {\n  test('deleteModel fails to delete the model folder', async () => {\n    let modelsDir: string;\n    if (process.platform === 'win32') {\n      modelsDir = 'C:\\\\home\\\\user\\\\aistudio\\\\models';\n    } else {\n      modelsDir = '/home/user/aistudio/models';\n    }\n    const now = new Date();\n    mockFiles(now);\n    const rmSpy = vi.spyOn(fs.promises, 'rm');\n    rmSpy.mockRejectedValue(new Error('failed'));\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        getModels: () => {\n          return [\n            {\n              id: 'model-id-1',\n              url: 'https://model-url',\n            },\n          ] as ModelInfo[];\n        },\n        onUpdate: vi.fn(),\n      } as unknown as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n    modelHandlerRegistry.register(new URLModelHandler(manager, modelsDir));\n    await manager.init();\n    await manager.loadLocalModels();\n    await manager.deleteModel('model-id-1');\n    // check that the model's folder is removed from disk\n    if (process.platform === 'win32') {\n      expect(rmSpy).toBeCalledWith('C:\\\\home\\\\user\\\\aistudio\\\\models\\\\model-id-1', {\n        recursive: true,\n        force: true,\n        maxRetries: 3,\n      });\n    } else {\n      expect(rmSpy).toBeCalledWith('/home/user/aistudio/models/model-id-1', {\n        recursive: true,\n        force: true,\n        maxRetries: 3,\n      });\n    }\n    expect(rpcExtensionMock.fire).toHaveBeenCalledTimes(5);\n    // check that a new state is sent with the model non removed\n    expect(rpcExtensionMock.fire).toHaveBeenNthCalledWith(4, MSG_NEW_MODELS_STATE, [\n      {\n        id: 'model-id-1',\n        url: 'https://model-url',\n        file: {\n          creation: now,\n          file: 'model-id-1-model',\n          size: 32000,\n          path: path.resolve(dirent[0].parentPath, dirent[0].name),\n        },\n      },\n    ]);\n    expect(mocks.showErrorMessageMock).toHaveBeenCalledOnce();\n    expect(mocks.logErrorMock).toHaveBeenCalled();\n  });\n\n  test('delete local model should call catalogManager', async () => {\n    vi.mocked(env).isWindows = false;\n    const removeUserModelMock = vi.fn();\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        getModels: () => {\n          return [\n            {\n              id: 'model-id-1',\n              file: {\n                file: 'model-id-1-model',\n                size: 32000,\n                path: path.resolve(dirent[0].parentPath, dirent[0].name),\n              },\n            },\n          ] as ModelInfo[];\n        },\n        removeUserModel: removeUserModelMock,\n      } as unknown as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n    await manager.loadLocalModels();\n    await manager.deleteModel('model-id-1');\n\n    expect(removeUserModelMock).toBeCalledWith('model-id-1');\n  });\n\n  test('deleting on windows should check for all connections', async () => {\n    vi.mocked(coreProcess.exec).mockResolvedValue({} as RunResult);\n    mocks.getPodmanCliMock.mockReturnValue('dummyCli');\n    vi.mocked(env).isWindows = true;\n    const connections: ContainerProviderConnection[] = [\n      {\n        name: 'Machine 1',\n        type: 'podman',\n        vmType: VMType.HYPERV,\n        endpoint: {\n          socketPath: '',\n        },\n        status: () => 'started',\n      },\n      {\n        name: 'Machine 2',\n        type: 'podman',\n        vmType: VMType.WSL,\n        endpoint: {\n          socketPath: '',\n        },\n        status: () => 'started',\n      },\n    ];\n    vi.mocked(podmanConnectionMock.getContainerProviderConnections).mockReturnValue(connections);\n    vi.mocked(getPodmanMachineName).mockReturnValue('machine-2');\n\n    const rmSpy = vi.spyOn(fs.promises, 'rm');\n    rmSpy.mockResolvedValue(undefined);\n\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        getModels: () => {\n          return [\n            {\n              id: 'model-id-1',\n              url: 'model-url',\n              file: {\n                file: 'dummyFile',\n                path: 'dummyPath',\n              },\n            },\n          ] as ModelInfo[];\n        },\n      } as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n\n    await manager.loadLocalModels();\n    // delete the model\n    await manager.deleteModel('model-id-1');\n\n    expect(podmanConnectionMock.getContainerProviderConnections).toHaveBeenCalledOnce();\n\n    expect(coreProcess.exec).toHaveBeenCalledWith('dummyCli', [\n      'machine',\n      'ssh',\n      'machine-2',\n      'rm',\n      '-f',\n      '/home/user/ai-lab/models/model-id-1',\n    ]);\n  });\n});\n\ndescribe('downloadModel', () => {\n  test('download model if not already on disk', async () => {\n    vi.mocked(cancellationTokenRegistryMock.createCancellationTokenSource).mockReturnValue(99);\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        getModels(): ModelInfo[] {\n          return [];\n        },\n      } as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n    modelHandlerRegistry.register(new URLModelHandler(manager, 'appdir'));\n\n    vi.spyOn(manager, 'isModelOnDisk').mockReturnValue(false);\n    vi.spyOn(utils, 'getDurationSecondsSince').mockReturnValue(99);\n    const updateTaskMock = vi.spyOn(taskRegistry, 'updateTask');\n    await manager.requestDownloadModel({\n      id: 'id',\n      url: 'https:///url',\n      name: 'name',\n    } as ModelInfo);\n\n    expect(cancellationTokenRegistryMock.createCancellationTokenSource).toHaveBeenCalled();\n    expect(updateTaskMock).toHaveBeenLastCalledWith({\n      id: expect.any(String),\n      name: 'Downloading model name',\n      labels: {\n        'model-pulling': 'id',\n      },\n      state: 'loading',\n      cancellationToken: 99,\n    });\n  });\n  test('retrieve model path if already on disk', async () => {\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        getModels(): ModelInfo[] {\n          return [];\n        },\n      } as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n    const updateTaskMock = vi.spyOn(taskRegistry, 'updateTask');\n    vi.spyOn(manager, 'isModelOnDisk').mockReturnValue(true);\n    const getLocalModelPathMock = vi.spyOn(manager, 'getLocalModelPath').mockReturnValue('');\n    await manager.requestDownloadModel({\n      id: 'id',\n      url: 'url',\n      name: 'name',\n    } as ModelInfo);\n    expect(getLocalModelPathMock).toBeCalledWith('id');\n    expect(updateTaskMock).toHaveBeenLastCalledWith({\n      id: expect.any(String),\n      name: 'Model name already present on disk',\n      labels: {\n        'model-pulling': 'id',\n      },\n      state: 'success',\n    });\n  });\n  test('fail if model on disk has different sha of the expected value', async () => {\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        getModels(): ModelInfo[] {\n          return [];\n        },\n      } as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n    vi.spyOn(taskRegistry, 'updateTask');\n    vi.spyOn(manager, 'isModelOnDisk').mockReturnValue(true);\n    vi.spyOn(manager, 'getLocalModelPath').mockReturnValue('path');\n    vi.spyOn(sha, 'hasValidSha').mockResolvedValue(false);\n    await expect(() =>\n      manager.requestDownloadModel({\n        id: 'id',\n        url: 'url',\n        name: 'name',\n        sha256: 'sha',\n      } as ModelInfo),\n    ).rejects.toThrowError(\n      'Model name is already present on disk at path but its security hash (SHA-256) does not match the expected value. This may indicate the file has been altered or corrupted. Please delete it and try again.',\n    );\n  });\n  test('multiple download request same model - second call after first completed', async () => {\n    mocks.getDownloaderCompleter.mockReturnValue(true);\n\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        getModels(): ModelInfo[] {\n          return [];\n        },\n      } as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n    modelHandlerRegistry.register(new URLModelHandler(manager, 'appdir'));\n\n    vi.spyOn(manager, 'isModelOnDisk').mockReturnValue(false);\n    vi.spyOn(utils, 'getDurationSecondsSince').mockReturnValue(99);\n\n    await manager.requestDownloadModel({\n      id: 'id',\n      url: 'https:///url',\n      name: 'name',\n    } as ModelInfo);\n\n    await manager.requestDownloadModel({\n      id: 'id',\n      url: 'https:///url',\n      name: 'name',\n    } as ModelInfo);\n\n    // Only called once\n    expect(mocks.performDownloadMock).toHaveBeenCalledTimes(1);\n    expect(mocks.onEventDownloadMock).toHaveBeenCalledTimes(1);\n  });\n\n  test('multiple download request same model - second call before first completed', async () => {\n    mocks.getDownloaderCompleter.mockReturnValue(false);\n\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        getModels(): ModelInfo[] {\n          return [];\n        },\n      } as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n    modelHandlerRegistry.register(new URLModelHandler(manager, 'appdir'));\n\n    vi.spyOn(manager, 'isModelOnDisk').mockReturnValue(false);\n    vi.spyOn(utils, 'getDurationSecondsSince').mockReturnValue(99);\n\n    mocks.onEventDownloadMock.mockImplementation(listener => {\n      setTimeout(() => {\n        listener({\n          id: 'id',\n          status: 'completed',\n          duration: 1000,\n        });\n      }, 1000);\n      return {\n        dispose: vi.fn(),\n      };\n    });\n\n    await manager.requestDownloadModel({\n      id: 'id',\n      url: 'https:///url',\n      name: 'name',\n    } as ModelInfo);\n\n    await manager.requestDownloadModel({\n      id: 'id',\n      url: 'https:///url',\n      name: 'name',\n    } as ModelInfo);\n\n    // Only called once\n    expect(mocks.performDownloadMock).toHaveBeenCalledTimes(1);\n    expect(mocks.onEventDownloadMock).toHaveBeenCalledTimes(2);\n  });\n});\n\ndescribe('getModelMetadata', () => {\n  test('unknown model', async () => {\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        getModels: (): ModelInfo[] => [],\n      } as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n\n    await expect(() => manager.getModelMetadata('unknown-model-id')).rejects.toThrowError(\n      'model with id unknown-model-id does not exists.',\n    );\n  });\n\n  test('remote model', async () => {\n    const manager = new ModelsManager(\n      {} as RpcExtension,\n      {\n        getModels: (): ModelInfo[] => [\n          {\n            id: 'test-model-id',\n            url: 'dummy-url',\n            file: undefined,\n          } as unknown as ModelInfo,\n        ],\n        onUpdate: vi.fn(),\n      } as unknown as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n\n    await manager.init();\n\n    const fakeMetadata: Record<string, string> = {\n      hello: 'world',\n    };\n\n    vi.mocked(gguf).mockResolvedValue({\n      metadata: fakeMetadata,\n    } as unknown as GGUFParseOutput & { parameterCount: number });\n\n    const result = await manager.getModelMetadata('test-model-id');\n    expect(result).toStrictEqual(fakeMetadata);\n\n    expect(gguf).toHaveBeenCalledWith('dummy-url');\n  });\n\n  test('local model', async () => {\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        getModels: (): ModelInfo[] => [\n          {\n            id: 'test-model-id',\n            url: 'dummy-url',\n            file: {\n              file: 'random',\n              path: 'dummy-path',\n            },\n          } as unknown as ModelInfo,\n        ],\n        onUpdate: vi.fn(),\n      } as unknown as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n\n    await manager.init();\n\n    const fakeMetadata: Record<string, string> = {\n      hello: 'world',\n    };\n\n    vi.mocked(gguf).mockResolvedValue({\n      metadata: fakeMetadata,\n    } as unknown as GGUFParseOutput & { parameterCount: number });\n\n    const result = await manager.getModelMetadata('test-model-id');\n    expect(result).toStrictEqual(fakeMetadata);\n\n    expect(gguf).toHaveBeenCalledWith(path.join('dummy-path', 'random'), {\n      allowLocalFile: true,\n    });\n  });\n});\n\nconst connectionMock: ContainerProviderConnection = {\n  name: 'dummy-connection',\n  type: 'podman',\n  vmType: undefined,\n} as unknown as ContainerProviderConnection;\n\nconst modelMock: ModelInfo = {\n  id: 'test-model-id',\n  url: 'dummy-url',\n  file: {\n    file: 'random',\n    path: 'dummy-path',\n  },\n} as unknown as ModelInfo;\n\ndescribe('uploadModelToPodmanMachine', () => {\n  test('uploader should be used', async () => {\n    const performMock = vi.fn().mockResolvedValue('uploader-result');\n    vi.mocked(Uploader).mockReturnValue({\n      onEvent: vi.fn(),\n      perform: performMock,\n    } as unknown as Uploader);\n\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        onUpdate: vi.fn(),\n        getModels: () => [],\n      } as unknown as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n\n    await manager.init();\n    const result = await manager.uploadModelToPodmanMachine(connectionMock, modelMock);\n    expect(result).toBe('uploader-result');\n    expect(performMock).toHaveBeenCalledWith(modelMock.id);\n  });\n\n  test('upload should be skipped when configuration disable it', async () => {\n    vi.mocked(configurationRegistryMock.getExtensionConfiguration).mockReturnValue({\n      // disable upload\n      modelUploadDisabled: true,\n      modelsPath: '~/downloads',\n      experimentalTuning: false,\n      apiPort: 0,\n      inferenceRuntime: 'llama-cpp',\n      experimentalGPU: false,\n      showGPUPromotion: false,\n      appearance: 'dark',\n    });\n\n    const manager = new ModelsManager(\n      rpcExtensionMock,\n      {\n        onUpdate: vi.fn(),\n        getModels: () => [],\n      } as unknown as CatalogManager,\n      telemetryLogger,\n      taskRegistry,\n      cancellationTokenRegistryMock,\n      podmanConnectionMock,\n      configurationRegistryMock,\n      modelHandlerRegistry,\n    );\n\n    await manager.init();\n    await manager.uploadModelToPodmanMachine(connectionMock, modelMock);\n    expect(Uploader).not.toHaveBeenCalled();\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/managers/modelsManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { LocalModelInfo } from '@shared/models/ILocalModelInfo';\nimport fs from 'node:fs';\nimport * as path from 'node:path';\nimport { type Disposable, env, type ContainerProviderConnection } from '@podman-desktop/api';\nimport { MSG_NEW_MODELS_STATE } from '@shared/Messages';\nimport type { CatalogManager } from './catalogManager';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport * as podmanDesktopApi from '@podman-desktop/api';\nimport type { Downloader } from '../utils/downloader';\nimport type { TaskRegistry } from '../registries/TaskRegistry';\nimport type { Task } from '@shared/models/ITask';\nimport type { BaseEvent } from '../models/baseEvent';\nimport { isCompletionEvent, isProgressEvent } from '../models/baseEvent';\nimport { Uploader } from '../utils/uploader';\nimport { deleteRemoteModel, getLocalModelFile, isModelUploaded } from '../utils/modelsUtils';\nimport { getPodmanMachineName } from '../utils/podman';\nimport type { CancellationTokenRegistry } from '../registries/CancellationTokenRegistry';\nimport { getHash, hasValidSha } from '../utils/sha';\nimport type { GGUFParseOutput } from '@huggingface/gguf';\nimport { gguf } from '@huggingface/gguf';\nimport type { PodmanConnection } from './podmanConnection';\nimport { VMType } from '@shared/models/IPodman';\nimport type { ConfigurationRegistry } from '../registries/ConfigurationRegistry';\nimport type { ModelHandlerRegistry } from '../registries/ModelHandlerRegistry';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nexport class ModelsManager implements Disposable {\n  #models: Map<string, ModelInfo>;\n  #disposables: Disposable[];\n\n  #downloaders: Map<string, Downloader> = new Map<string, Downloader>();\n\n  constructor(\n    private rpcExtension: RpcExtension,\n    private catalogManager: CatalogManager,\n    private telemetry: podmanDesktopApi.TelemetryLogger,\n    private taskRegistry: TaskRegistry,\n    private cancellationTokenRegistry: CancellationTokenRegistry,\n    private podmanConnection: PodmanConnection,\n    private configurationRegistry: ConfigurationRegistry,\n    private modelHandlerRegistry: ModelHandlerRegistry,\n  ) {\n    this.#models = new Map();\n    this.#disposables = [];\n    this.modelHandlerRegistry.getAll().forEach(handler => handler.onUpdate(this.loadLocalModels));\n  }\n\n  async init(): Promise<void> {\n    const disposable = this.catalogManager.onUpdate(() => {\n      this.loadLocalModels().catch((err: unknown) => {\n        console.error(`Something went wrong when loading local models`, err);\n      });\n    });\n    this.#disposables.push(disposable);\n\n    try {\n      await this.loadLocalModels();\n    } catch (err: unknown) {\n      console.error('Something went wrong while trying to load local models', err);\n    }\n  }\n\n  dispose(): void {\n    this.#models.clear();\n    this.#disposables.forEach(d => d.dispose());\n  }\n\n  async loadLocalModels(): Promise<void> {\n    this.#models.clear();\n    this.catalogManager.getModels().forEach(m => this.#models.set(m.id, m));\n    const reloadLocalModels = async (): Promise<void> => {\n      await this.getLocalModelsFromDisk();\n      await this.sendModelsInfo();\n    };\n    // Initialize the local models manually\n    await reloadLocalModels();\n  }\n\n  getModelsInfo(): ModelInfo[] {\n    return [...this.#models.values()];\n  }\n\n  async sendModelsInfo(): Promise<void> {\n    const models = this.getModelsInfo();\n    await this.rpcExtension.fire(MSG_NEW_MODELS_STATE, models);\n  }\n\n  async getLocalModelsFromDisk(): Promise<void> {\n    return Promise.all(this.modelHandlerRegistry.getAll().map(registry => registry.getLocalModelsFromDisk())).then(\n      () => void 0,\n    );\n  }\n\n  isModelOnDisk(modelId: string): boolean {\n    return this.#models.get(modelId)?.file !== undefined;\n  }\n\n  getLocalModelInfo(modelId: string): LocalModelInfo {\n    const model = this.#models.get(modelId);\n    if (!model?.file) {\n      throw new Error('model is not on disk');\n    }\n    return model.file;\n  }\n\n  getModelInfo(modelId: string): ModelInfo {\n    const model = this.#models.get(modelId);\n    if (!model) {\n      throw new Error('model is not loaded');\n    }\n    return model;\n  }\n\n  getLocalModelPath(modelId: string): string {\n    return getLocalModelFile(this.getModelInfo(modelId));\n  }\n\n  async deleteModel(modelId: string): Promise<void> {\n    const model = this.#models.get(modelId);\n    if (!model?.file) {\n      throw new Error('model cannot be found.');\n    }\n\n    model.state = 'deleting';\n    await this.sendModelsInfo();\n    try {\n      await this.deleteRemoteModel(model);\n      // if model does not have any url, it has been imported locally by the user\n      if (!model.url) {\n        const modelPath = path.join(model.file.path, model.file.file);\n        // remove it from the catalog as it cannot be downloaded anymore\n        await this.catalogManager.removeUserModel(modelId);\n        await fs.promises.rm(modelPath, { recursive: true, force: true, maxRetries: 3 });\n      } else {\n        const modelHandler = this.modelHandlerRegistry.findModelHandler(model.url);\n        if (!modelHandler) {\n          throw new Error(`no model registry found for model ${model.id} url ${model.url}`);\n        }\n        await modelHandler.deleteModel(model);\n      }\n\n      this.telemetry.logUsage('model.delete', { 'model.id': getHash(modelId) });\n      model.file = model.state = undefined;\n    } catch (err: unknown) {\n      this.telemetry.logError('model.delete', {\n        'model.id': modelId,\n        message: 'error deleting model from disk',\n        error: err,\n      });\n      await podmanDesktopApi.window.showErrorMessage(`Error deleting model ${modelId}. ${String(err)}`);\n\n      // Let's reload the models manually to avoid any issue\n      model.state = undefined;\n      await this.getLocalModelsFromDisk();\n    } finally {\n      await this.sendModelsInfo();\n    }\n  }\n\n  private async deleteRemoteModel(modelInfo: ModelInfo): Promise<void> {\n    // currently only Window is supported\n    if (!env.isWindows) {\n      return;\n    }\n\n    // get all container provider connections\n    const connections = this.podmanConnection.getContainerProviderConnections();\n\n    // iterate over all connections\n    for (const connection of connections) {\n      // ignore non-wsl machines\n      if (connection.vmType !== VMType.WSL) continue;\n      // Get the corresponding machine name\n      const machineName = getPodmanMachineName(connection);\n\n      // check if model already loaded on the podman machine\n      const existsRemote = await isModelUploaded(machineName, modelInfo);\n      if (!existsRemote) return;\n\n      await deleteRemoteModel(machineName, modelInfo);\n    }\n  }\n\n  /**\n   * This method will resolve when the provided model will be downloaded.\n   *\n   * This can method can be call multiple time for the same model, it will reuse existing downloader and wait on\n   * their completion.\n   * @param model\n   * @param labels\n   */\n  async requestDownloadModel(model: ModelInfo, labels?: { [key: string]: string }): Promise<string> {\n    // Create a task to follow progress\n    const task: Task = this.createDownloadTask(model, labels);\n\n    // Check there is no existing downloader running\n    const existingDownloader = this.#downloaders.get(model.id);\n    if (!existingDownloader) {\n      return this.downloadModel(model, task);\n    }\n\n    if (existingDownloader.completed) {\n      task.state = 'success';\n      this.taskRegistry.updateTask(task);\n\n      return existingDownloader.getTarget();\n    }\n\n    // Propagate cancellation token from existing task to the new one\n    task.cancellationToken = this.taskRegistry.findTaskByLabels({ 'model-pulling': model.id })?.cancellationToken;\n    this.taskRegistry.updateTask(task);\n\n    // If we have an existing downloader running we subscribe on its events\n    return new Promise((resolve, reject) => {\n      const disposable = existingDownloader.onEvent(event => {\n        if (!isCompletionEvent(event)) return;\n\n        switch (event.status) {\n          case 'completed':\n            resolve(existingDownloader.getTarget());\n            break;\n          default:\n            reject(new Error(event.message));\n        }\n        disposable.dispose();\n      });\n    });\n  }\n\n  private async onDownloadUploadEvent(event: BaseEvent, action: 'download' | 'upload'): Promise<void> {\n    let taskLabel = 'model-pulling';\n    let eventName = 'model.download';\n    if (action === 'upload') {\n      taskLabel = 'model-uploading';\n      eventName = 'model.upload';\n    }\n    // Always use the task registry as source of truth for tasks\n    const tasks = this.taskRegistry.getTasksByLabels({ [taskLabel]: event.id });\n    if (tasks.length === 0) {\n      // tasks might have been cleared but still an error.\n      console.error(`received ${action} event but no task is associated.`);\n      return;\n    }\n\n    for (const task of tasks) {\n      if (isProgressEvent(event)) {\n        task.state = 'loading';\n        task.progress = event.value;\n      } else if (isCompletionEvent(event)) {\n        // status error or canceled\n        if (event.status === 'error' || event.status === 'canceled') {\n          task.state = 'error';\n          task.progress = undefined;\n          task.error = event.message;\n\n          // telemetry usage\n          this.telemetry.logError(eventName, {\n            'model.id': event.id,\n            message: `error ${action}ing model`,\n            error: event.message,\n            durationSeconds: event.duration,\n          });\n        } else {\n          task.state = 'success';\n          task.progress = 100;\n\n          // telemetry usage\n          this.telemetry.logUsage(eventName, { 'model.id': event.id, durationSeconds: event.duration });\n        }\n\n        // cleanup downloader\n        this.#downloaders.delete(event.id);\n      }\n      this.taskRegistry.updateTask(task); // update task\n    }\n  }\n\n  public createDownloader(model: ModelInfo, abortSignal: AbortSignal): Downloader {\n    if (!model.url) {\n      throw new Error(`model ${model.id} does not have url defined.`);\n    }\n\n    const modelHandler = this.modelHandlerRegistry.findModelHandler(model.url);\n    if (!modelHandler) {\n      throw new Error(`no model registry found for model ${model.id} url ${model.url}`);\n    }\n\n    // Create a downloader\n    const downloader = modelHandler.createDownloader(model, abortSignal);\n    this.#downloaders.set(model.id, downloader);\n\n    return downloader;\n  }\n\n  private createDownloadTask(model: ModelInfo, labels?: { [key: string]: string }): Task {\n    // it may happen that the taskRegistry contains old entries representing an old failing download, we delete them as we are starting a new download\n    const failedPullingTaskIds = this.taskRegistry\n      .getTasksByLabels({\n        'model-pulling': model.id,\n      })\n      .filter(t => t.state === 'error')\n      .map(t => t.id);\n    if (failedPullingTaskIds.length > 0) {\n      this.taskRegistry.deleteAll(failedPullingTaskIds);\n    }\n    return this.taskRegistry.createTask(`Downloading model ${model.name}`, 'loading', {\n      ...labels,\n      'model-pulling': model.id,\n    });\n  }\n\n  private async downloadModel(model: ModelInfo, task: Task): Promise<string> {\n    // Check if the model is already on disk.\n    if (this.isModelOnDisk(model.id)) {\n      task.name = `Model ${model.name} already present on disk`;\n\n      const modelPath = this.getLocalModelPath(model.id);\n      if (model.sha256) {\n        const isValid = await hasValidSha(modelPath, model.sha256);\n        if (!isValid) {\n          task.state = 'error';\n          task.error = `Model ${model.name} is already present on disk at ${modelPath} but its security hash (SHA-256) does not match the expected value. This may indicate the file has been altered or corrupted. Please delete it and try again.`;\n          this.taskRegistry.updateTask(task); // update task\n          throw new Error(\n            `Model ${model.name} is already present on disk at ${modelPath} but its security hash (SHA-256) does not match the expected value. This may indicate the file has been altered or corrupted. Please delete it and try again.`,\n          );\n        }\n      }\n\n      task.state = 'success';\n      this.taskRegistry.updateTask(task); // update task\n\n      // return model path\n      return modelPath;\n    }\n\n    const abortController = new AbortController();\n    task.cancellationToken = this.cancellationTokenRegistry.createCancellationTokenSource(() => {\n      abortController.abort('Cancel');\n    });\n\n    // update task to loading state\n    this.taskRegistry.updateTask(task);\n\n    const downloader = this.createDownloader(model, abortController.signal);\n\n    // Capture downloader events\n    downloader.onEvent(event => this.onDownloadUploadEvent(event, 'download'), this);\n\n    // perform download\n    await downloader.perform(model.id);\n    await this.updateModelInfos();\n\n    return downloader.getTarget();\n  }\n\n  async uploadModelToPodmanMachine(\n    connection: ContainerProviderConnection,\n    model: ModelInfo,\n    labels?: { [key: string]: string },\n  ): Promise<string> {\n    // ensure the model upload is not disabled\n    if (this.configurationRegistry.getExtensionConfiguration().modelUploadDisabled) {\n      console.warn('The model upload is disabled, this may cause the inference server to take a few minutes to start.');\n      return getLocalModelFile(model);\n    }\n\n    this.taskRegistry.createTask(`Copying model ${model.name} to ${connection.name}`, 'loading', {\n      ...labels,\n      'model-uploading': model.id,\n      connection: connection.name,\n    });\n\n    const uploader = new Uploader(connection, model);\n    uploader.onEvent(event => this.onDownloadUploadEvent(event, 'upload'), this);\n\n    // perform download\n    const path = uploader.perform(model.id);\n    await this.updateModelInfos();\n\n    return path;\n  }\n\n  private async updateModelInfos(): Promise<void> {\n    // refresh model lists on event completion\n    await this.getLocalModelsFromDisk();\n    this.sendModelsInfo().catch((err: unknown) => {\n      console.error('Something went wrong while sending models info.', err);\n    });\n  }\n\n  async getModelMetadata(modelId: string): Promise<Record<string, unknown>> {\n    const model = this.#models.get(modelId);\n    if (!model) throw new Error(`model with id ${modelId} does not exists.`);\n\n    const before = performance.now();\n    const data: Record<string, unknown> = {\n      'model-id': getHash(modelId),\n    };\n\n    try {\n      let result: GGUFParseOutput<{ strict: false }>;\n      if (this.isModelOnDisk(modelId)) {\n        const modelPath = path.normalize(getLocalModelFile(model));\n        result = await gguf(modelPath, { allowLocalFile: true });\n      } else if (model.url) {\n        result = await gguf(model.url);\n      } else {\n        throw new Error('cannot get model metadata');\n      }\n      return result.metadata;\n    } catch (err: unknown) {\n      data['error'] = err;\n      console.error(err);\n      throw err;\n    } finally {\n      data['duration'] = performance.now() - before;\n      this.telemetry.logUsage('get-metadata', data);\n    }\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/monitoringManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, expect, afterEach, test, vi } from 'vitest';\nimport { MonitoringManager } from './monitoringManager';\nimport { containerEngine, type ContainerStatsInfo, type Disposable } from '@podman-desktop/api';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { MSG_MONITORING_UPDATE } from '@shared/Messages';\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    containerEngine: {\n      statsContainer: vi.fn(),\n    },\n  };\n});\n\nconst rpcExtensionMock = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(rpcExtensionMock.fire).mockResolvedValue(true);\n  vi.mocked(containerEngine.statsContainer).mockResolvedValue({} as unknown as Disposable);\n\n  vi.useFakeTimers();\n});\n\nafterEach(() => {\n  vi.useRealTimers();\n});\n\nfunction simplifiedCallback(callback: (arg: ContainerStatsInfo) => void, cpu: number, ram: number): void {\n  callback({\n    cpu_stats: {\n      cpu_usage: {\n        total_usage: cpu,\n      },\n    },\n    memory_stats: {\n      usage: ram,\n    },\n  } as unknown as ContainerStatsInfo);\n}\n\ntest('expect constructor to do nothing', () => {\n  const manager = new MonitoringManager(rpcExtensionMock);\n  expect(containerEngine.statsContainer).not.toHaveBeenCalled();\n  expect(manager.getStats().length).toBe(0);\n  expect(rpcExtensionMock.fire).not.toHaveBeenCalled();\n});\n\ntest('expect monitor method to start stats container', async () => {\n  const manager = new MonitoringManager(rpcExtensionMock);\n  await manager.monitor('randomContainerId', 'dummyEngineId');\n\n  expect(containerEngine.statsContainer).toHaveBeenCalledWith('dummyEngineId', 'randomContainerId', expect.anything());\n});\n\ntest('expect monitor method to start stats container', async () => {\n  const manager = new MonitoringManager(rpcExtensionMock);\n  await manager.monitor('randomContainerId', 'dummyEngineId');\n\n  expect(containerEngine.statsContainer).toHaveBeenCalledWith('dummyEngineId', 'randomContainerId', expect.anything());\n});\n\ntest('expect dispose to dispose stats container', async () => {\n  const manager = new MonitoringManager(rpcExtensionMock);\n  const fakeDisposable = vi.fn();\n  vi.mocked(containerEngine.statsContainer).mockResolvedValue({\n    dispose: fakeDisposable,\n  });\n\n  await manager.monitor('randomContainerId', 'dummyEngineId');\n\n  manager.dispose();\n  expect(fakeDisposable).toHaveBeenCalled();\n});\n\ntest('expect webview to be notified when statsContainer call back', async () => {\n  const manager = new MonitoringManager(rpcExtensionMock);\n  let mCallback: ((stats: ContainerStatsInfo) => void) | undefined;\n  vi.mocked(containerEngine.statsContainer).mockImplementation(async (_engineId, _id, callback) => {\n    mCallback = callback;\n    return { dispose: (): void => {} };\n  });\n\n  await manager.monitor('randomContainerId', 'dummyEngineId');\n  await vi.waitFor(() => {\n    expect(mCallback).toBeDefined();\n  });\n\n  if (!mCallback) throw new Error('undefined mCallback');\n\n  const date = new Date(2000, 1, 1, 13);\n  vi.setSystemTime(date);\n\n  simplifiedCallback(mCallback, 123, 99);\n\n  expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_MONITORING_UPDATE, [\n    {\n      containerId: 'randomContainerId',\n      stats: [\n        {\n          timestamp: Date.now(),\n          cpu_usage: 123,\n          memory_usage: 99,\n        },\n      ],\n    },\n  ]);\n});\n\ntest('expect stats to cumulate', async () => {\n  const manager = new MonitoringManager(rpcExtensionMock);\n  let mCallback: ((stats: ContainerStatsInfo) => void) | undefined;\n  vi.mocked(containerEngine.statsContainer).mockImplementation(async (_engineId, _id, callback) => {\n    mCallback = callback;\n    return { dispose: (): void => {} };\n  });\n\n  await manager.monitor('randomContainerId', 'dummyEngineId');\n  await vi.waitFor(() => {\n    expect(mCallback).toBeDefined();\n  });\n\n  if (!mCallback) throw new Error('undefined mCallback');\n\n  simplifiedCallback(mCallback, 0, 0);\n  simplifiedCallback(mCallback, 1, 1);\n  simplifiedCallback(mCallback, 2, 2);\n  simplifiedCallback(mCallback, 3, 3);\n\n  const stats = manager.getStats();\n  expect(stats.length).toBe(1);\n  expect(stats[0].stats.length).toBe(4);\n});\n\ntest('expect old stats to be removed', async () => {\n  const manager = new MonitoringManager(rpcExtensionMock);\n  let mCallback: ((stats: ContainerStatsInfo) => void) | undefined;\n  vi.mocked(containerEngine.statsContainer).mockImplementation(async (_engineId, _id, callback) => {\n    mCallback = callback;\n    return { dispose: (): void => {} };\n  });\n\n  await manager.monitor('randomContainerId', 'dummyEngineId');\n  await vi.waitFor(() => {\n    expect(mCallback).toBeDefined();\n  });\n\n  if (!mCallback) throw new Error('undefined mCallback');\n\n  vi.setSystemTime(new Date(2000, 1, 1, 13));\n\n  simplifiedCallback(mCallback, 0, 0);\n\n  vi.setSystemTime(new Date(2005, 1, 1, 13));\n\n  simplifiedCallback(mCallback, 1, 1);\n  simplifiedCallback(mCallback, 2, 2);\n  simplifiedCallback(mCallback, 3, 3);\n\n  const stats = manager.getStats();\n  expect(stats.length).toBe(1);\n  expect(stats[0].stats.length).toBe(3);\n});\n\ntest('expect stats to be disposed if stats result is an error', async () => {\n  const manager = new MonitoringManager(rpcExtensionMock);\n  let mCallback: ((stats: ContainerStatsInfo) => void) | undefined;\n  const fakeDisposable = vi.fn();\n  vi.mocked(containerEngine.statsContainer).mockImplementation(async (_engineId, _id, callback) => {\n    mCallback = callback;\n    return { dispose: fakeDisposable };\n  });\n\n  await manager.monitor('randomContainerId', 'dummyEngineId');\n  await vi.waitFor(() => {\n    expect(mCallback).toBeDefined();\n  });\n\n  if (!mCallback) throw new Error('undefined mCallback');\n\n  mCallback({ cause: 'container is stopped' } as unknown as ContainerStatsInfo);\n\n  const stats = manager.getStats();\n  expect(stats.length).toBe(0);\n  expect(fakeDisposable).toHaveBeenCalled();\n});\n"
  },
  {
    "path": "packages/backend/src/managers/monitoringManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { type Disposable, containerEngine, type ContainerStatsInfo } from '@podman-desktop/api';\nimport { Publisher } from '../utils/Publisher';\nimport { MSG_MONITORING_UPDATE } from '@shared/Messages';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nexport interface StatsInfo {\n  timestamp: number;\n  cpu_usage: number;\n  memory_usage: number;\n}\n\nexport interface StatsHistory {\n  containerId: string;\n  stats: StatsInfo[];\n}\n\nexport const MAX_AGE: number = 5 * 60 * 1000; // 5 minutes\n\nexport class MonitoringManager extends Publisher<StatsHistory[]> implements Disposable {\n  #containerStats: Map<string, StatsHistory>;\n  #disposables: Disposable[];\n\n  constructor(rpcExtension: RpcExtension) {\n    super(rpcExtension, MSG_MONITORING_UPDATE, () => this.getStats());\n    this.#containerStats = new Map<string, StatsHistory>();\n    this.#disposables = [];\n  }\n\n  async monitor(containerId: string, engineId: string): Promise<Disposable> {\n    const disposable = await containerEngine.statsContainer(engineId, containerId, statsInfo => {\n      if ('cause' in statsInfo) {\n        console.error('Cannot stats container', statsInfo.cause);\n        disposable.dispose();\n      } else {\n        this.push(containerId, statsInfo);\n      }\n    });\n    this.#disposables.push(disposable);\n    return disposable;\n  }\n\n  private push(containerId: string, statsInfo: ContainerStatsInfo): void {\n    let stats: StatsInfo[] = [];\n    const statsHistory = this.#containerStats.get(containerId);\n    if (statsHistory) {\n      const limit = Date.now() - MAX_AGE;\n      stats = statsHistory.stats.filter(stats => stats.timestamp > limit);\n    }\n\n    this.#containerStats.set(containerId, {\n      containerId: containerId,\n      stats: [\n        ...stats,\n        {\n          timestamp: Date.now(),\n          cpu_usage: statsInfo.cpu_stats.cpu_usage.total_usage,\n          memory_usage: statsInfo.memory_stats.usage,\n        },\n      ],\n    });\n    this.notify();\n  }\n\n  clear(containerId: string): void {\n    this.#containerStats.delete(containerId);\n  }\n\n  getStats(): StatsHistory[] {\n    return Array.from(this.#containerStats.values());\n  }\n\n  dispose(): void {\n    this.#disposables.forEach(disposable => disposable.dispose());\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/playground/McpServerManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport path from 'node:path';\nimport { type RpcExtension } from '@shared/messages/MessageProxy';\nimport { type McpClient, type McpServer, McpServerType, type McpSettings } from '@shared/models/McpSettings';\nimport { McpServerManager } from './McpServerManager';\nimport { JsonWatcher } from '../../utils/JsonWatcher';\nimport { toMcpClients } from '../../utils/mcpUtils';\n\nvi.mock('../../utils/JsonWatcher');\nvi.mock('../../utils/mcpUtils');\n\nconst mockJsonWatcher = {\n  init: vi.fn(),\n  dispose: vi.fn(),\n  onContentUpdated: vi.fn((fn: (mcpSettings: McpSettings) => void) => (update = fn)),\n} as unknown as JsonWatcher<McpSettings>;\nconst rpcExtension = { fire: vi.fn(() => Promise.resolve(true)) } as unknown as RpcExtension;\nlet update: (mcpSettings: McpSettings) => void;\nlet appUserDirectory: string;\nlet mcpServerManager: McpServerManager;\nbeforeEach(async () => {\n  vi.resetAllMocks();\n  vi.mocked(JsonWatcher).mockReturnValue(mockJsonWatcher);\n  vi.mocked(toMcpClients).mockImplementation(async (...mcpServers) =>\n    mcpServers.map(s => ({ name: s.name }) as unknown as McpClient),\n  );\n  appUserDirectory = path.join('/', 'tmp', 'mcp-server-manager-test-');\n  mcpServerManager = new McpServerManager(rpcExtension, appUserDirectory);\n});\ntest('provides an empty default value', () => {\n  expect(mcpServerManager.getMcpSettings()).toEqual({ servers: {} });\n});\ntest('init initializes the watcher', () => {\n  mcpServerManager.init();\n  expect(mockJsonWatcher.init).toHaveBeenCalled();\n});\ntest('dispose disposes the watcher', () => {\n  mcpServerManager.dispose();\n  expect(mockJsonWatcher.dispose).toHaveBeenCalled();\n});\ndescribe('when loading mcp-settings.json', () => {\n  beforeEach(() => {\n    const mcpSettings = {\n      servers: {\n        'stdio-ok': {\n          enabled: true,\n          type: 'stdio',\n          command: 'npx',\n          args: ['-y', 'kubernetes-mcp-server'],\n        },\n        'sse-ok': {\n          enabled: true,\n          type: 'sse',\n          url: 'https://echo.example.com/sse',\n          headers: {\n            foo: 'bar',\n          },\n        },\n        'invalid-type': {\n          enabled: true,\n          type: 'invalid',\n          url: 'https://echo.example.com/sse',\n        },\n      },\n    } as unknown as McpSettings;\n    update(mcpSettings);\n  });\n  test('loads valid servers', () => {\n    expect(mcpServerManager.getMcpSettings().servers).toEqual(\n      expect.objectContaining({\n        'stdio-ok': {\n          enabled: true,\n          name: 'stdio-ok',\n          type: McpServerType.STDIO,\n          command: 'npx',\n          args: ['-y', 'kubernetes-mcp-server'],\n        },\n        'sse-ok': {\n          enabled: true,\n          name: 'sse-ok',\n          type: McpServerType.SSE,\n          url: 'https://echo.example.com/sse',\n          headers: { foo: 'bar' },\n        },\n      }),\n    );\n  });\n  test('ignores invalid servers', () => {\n    expect(mcpServerManager.getMcpSettings().servers['invalid-type']).toBeUndefined();\n  });\n});\ntest('toMcpClients returns the enabled servers', async () => {\n  mcpServerManager.init();\n  update({\n    servers: {\n      enabled: { enabled: true, type: McpServerType.STDIO } as unknown as McpServer,\n      disabled: { enabled: false, type: McpServerType.STDIO } as unknown as McpServer,\n    },\n  });\n  const mcpClients = await mcpServerManager.toMcpClients();\n  expect(mcpClients).toEqual([{ name: 'enabled' }]);\n});\n"
  },
  {
    "path": "packages/backend/src/managers/playground/McpServerManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport path from 'node:path';\nimport { type Disposable } from '@podman-desktop/api';\nimport { MSG_MCP_SERVERS_UPDATE } from '@shared/Messages';\nimport { type McpSettings, McpServerType, type McpClient } from '@shared/models/McpSettings';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { JsonWatcher } from '../../utils/JsonWatcher';\nimport { Publisher } from '../../utils/Publisher';\nimport { toMcpClients } from '../../utils/mcpUtils';\n\n// TODO: Agree on the name of the file and its location\nconst MCP_SETTINGS = 'mcp-settings.json';\n\nexport class McpServerManager extends Publisher<McpSettings> implements Disposable {\n  private readonly settingsFile: string;\n  private mcpSettings: McpSettings;\n  readonly #jsonWatcher: JsonWatcher<McpSettings>;\n\n  constructor(\n    rpcExtension: RpcExtension,\n    private appUserDirectory: string,\n  ) {\n    super(rpcExtension, MSG_MCP_SERVERS_UPDATE, () => this.getMcpSettings());\n    this.settingsFile = path.join(this.appUserDirectory, MCP_SETTINGS);\n    this.mcpSettings = {\n      servers: {},\n    };\n    this.#jsonWatcher = new JsonWatcher<McpSettings>(this.settingsFile, { ...this.mcpSettings });\n    this.#jsonWatcher.onContentUpdated(this.onMcpSettingsUpdated.bind(this));\n  }\n\n  /**\n   * Lazily initialize the MCP server manager dependencies.\n   */\n  init(): void {\n    this.#jsonWatcher.init();\n  }\n\n  private onMcpSettingsUpdated(mcpSettings: McpSettings): void {\n    this.mcpSettings = { servers: {} };\n    for (const [name, mcpServer] of Object.entries(mcpSettings.servers ?? {})) {\n      mcpServer.name = name;\n      if (!Object.values(McpServerType).includes(mcpServer.type)) {\n        console.warn(`McpServerManager: Invalid MCP server type ${mcpServer.type} for server ${mcpServer.name}.`);\n        continue;\n      }\n      this.mcpSettings.servers[name] = mcpServer;\n    }\n    this.notify();\n  }\n\n  getMcpSettings(): McpSettings {\n    return this.mcpSettings;\n  }\n\n  async toMcpClients(): Promise<McpClient[]> {\n    const enabledServers = Object.values(this.mcpSettings.servers).filter(server => server.enabled);\n    return toMcpClients(...enabledServers);\n  }\n\n  dispose(): void {\n    this.#jsonWatcher.dispose();\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/playground/aiSdk.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { describe, test, expect, beforeEach, vi } from 'vitest';\nimport * as ai from 'ai';\nimport { MockLanguageModelV3 } from 'ai/test';\nimport { AiStreamProcessor, toCoreMessage } from './aiSdk';\nimport type {\n  AssistantChat,\n  ChatMessage,\n  Conversation,\n  ErrorMessage,\n  Message,\n  PendingChat,\n  UserChat,\n} from '@shared/models/IPlaygroundMessage';\nimport type {\n  LanguageModelV3,\n  LanguageModelV2CallWarning,\n  LanguageModelV3StreamPart,\n  LanguageModelV3GenerateResult,\n} from '@ai-sdk/provider';\nimport { ConversationRegistry } from '../../registries/ConversationRegistry';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport type { ModelOptions } from '@shared/models/IModelOptions';\nimport type { ToolSet } from 'ai';\nimport { jsonSchema, simulateStreamingMiddleware, tool, wrapLanguageModel } from 'ai';\n\nvi.mock('ai', async original => {\n  const mod = (await original()) as object;\n  return { ...mod };\n});\n\n/* eslint-disable sonarjs/no-nested-functions */\ndescribe('aiSdk', () => {\n  beforeEach(() => {\n    vi.resetAllMocks();\n  });\n  describe('toCoreMessage', () => {\n    test('with no fields', () => {\n      const result = toCoreMessage({} as Message);\n      expect(result).toEqual([]);\n    });\n    test('with no role', () => {\n      const result = toCoreMessage({ content: 'alex' } as ChatMessage);\n      expect(result).toEqual([]);\n    });\n    test('with no content', () => {\n      const result = toCoreMessage({ role: 'user' } as ChatMessage);\n      expect(result).toEqual([{ role: 'user', content: '' }]);\n    });\n    test('with all fields', () => {\n      const result = toCoreMessage({ role: 'user', content: 'alex' } as ChatMessage);\n      expect(result).toEqual([{ role: 'user', content: 'alex' }]);\n    });\n    test('with multiple messages', () => {\n      const result = toCoreMessage(\n        { role: 'user', content: 'alex' } as ChatMessage,\n        { role: 'assistant', content: 'bob' } as ChatMessage,\n      );\n      expect(result).toEqual([\n        { role: 'user', content: 'alex' },\n        { role: 'assistant', content: 'bob' },\n      ]);\n    });\n    test('with tool call messages', () => {\n      const result = toCoreMessage(\n        { role: 'user', content: 'alex' } as ChatMessage,\n        {\n          role: 'assistant',\n          content: {\n            type: 'tool-call',\n            toolCallId: 'call-001',\n            toolName: 'tool-1',\n            args: {},\n            result: {\n              content: [{ type: 'text', text: 'Success!!!' }],\n            },\n          },\n        } as AssistantChat,\n        { role: 'assistant', content: 'The call to the tool was a success!' } as AssistantChat,\n      );\n      expect(result).toEqual([\n        { role: 'user', content: 'alex' },\n        {\n          role: 'assistant',\n          content: [\n            {\n              type: 'tool-call',\n              toolCallId: 'call-001',\n              toolName: 'tool-1',\n              input: {},\n            },\n          ],\n        },\n        {\n          role: 'tool',\n          content: [\n            {\n              type: 'tool-result',\n              toolCallId: 'call-001',\n              toolName: 'tool-1',\n              output: {\n                content: [{ type: 'text', text: 'Success!!!' }],\n              },\n            },\n          ],\n        },\n        { role: 'assistant', content: 'The call to the tool was a success!' },\n      ]);\n    });\n  });\n  describe('AiStreamProcessor', () => {\n    let conversationRegistry: ConversationRegistry;\n    let conversationId: string;\n    beforeEach(() => {\n      const rpcExtension = {\n        fire: vi.fn().mockResolvedValue(true),\n      } as unknown as RpcExtension;\n      conversationRegistry = new ConversationRegistry(rpcExtension);\n      conversationId = conversationRegistry.createConversation('test-conversation', 'test-model');\n      conversationRegistry.submit(conversationId, {\n        content: 'Aitana, please proceed with the test',\n        role: 'user',\n        id: conversationRegistry.getUniqueId(),\n        timestamp: Date.now(),\n      } as UserChat);\n    });\n    test('sends model options', async () => {\n      const streamTextSpy = vi.spyOn(ai, 'streamText');\n      const streamProcessor = new AiStreamProcessor(conversationId, conversationRegistry);\n      const streamResult = streamProcessor.stream(createTestModel(), undefined, {\n        temperature: 42,\n        top_p: 13,\n        max_tokens: 37,\n        stream_options: { include_usage: true },\n      } as ModelOptions);\n      await streamResult.consumeStream();\n      expect(streamTextSpy).toHaveBeenCalledWith(\n        expect.objectContaining({\n          model: expect.anything(),\n          temperature: 42,\n          maxOutputTokens: 37,\n          topP: 13,\n          abortSignal: expect.any(AbortSignal),\n          messages: expect.any(Array),\n          onStepFinish: expect.any(Function),\n          onError: expect.any(Function),\n          onChunk: expect.any(Function),\n        }),\n      );\n    });\n    test('abort, completes the last assistant message', async () => {\n      const incompleteMessageId = 'incomplete-message-id';\n      conversationRegistry.submit(conversationId, {\n        id: incompleteMessageId,\n        role: 'assistant',\n        timestamp: Date.now(),\n        choices: [],\n        completed: undefined,\n      } as PendingChat);\n      const streamProcessor = new AiStreamProcessor(conversationId, conversationRegistry);\n      streamProcessor['currentMessageId'] = incompleteMessageId;\n      streamProcessor.abortController.abort('cancel');\n      expect(conversationRegistry.get(conversationId).messages).toHaveLength(2);\n      expect((conversationRegistry.get(conversationId).messages[1] as AssistantChat).completed).not.toBeUndefined();\n    });\n    describe('with stream error', () => {\n      beforeEach(async () => {\n        // eslint-disable-next-line sonarjs/no-nested-functions\n        const doStream: LanguageModelV3['doStream'] = async () => {\n          throw new Error('The stream is kaput.');\n        };\n        const model = new MockLanguageModelV3({ doStream });\n        await new AiStreamProcessor(conversationId, conversationRegistry).stream(model).consumeStream();\n      });\n      test('appends a single message', () => {\n        expect(conversationRegistry.get(conversationId).messages).toHaveLength(2);\n      });\n      test('appended message is error', () => {\n        expect((conversationRegistry.get(conversationId).messages[1] as ErrorMessage).error).toEqual(\n          'The stream is kaput.',\n        );\n      });\n    });\n    describe('with single message stream', () => {\n      let model: LanguageModelV3;\n      beforeEach(async () => {\n        model = createTestModel({\n          stream: ai.simulateReadableStream({\n            chunks: [\n              {\n                type: 'response-metadata',\n                id: 'id-0',\n                modelId: 'mock-model-id',\n                timestamp: new Date(0),\n              },\n              { type: 'text-delta', id: 'id-1', delta: 'Greetings' },\n              { type: 'text-delta', id: 'id-2', delta: ' professor ' },\n              { type: 'text-delta', id: 'id-3', delta: `Falken` },\n              {\n                type: 'finish',\n                finishReason: { unified: 'stop', raw: undefined },\n                usage: {\n                  outputTokens: { total: 133, text: undefined, reasoning: undefined },\n                  inputTokens: { total: 7, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },\n                  totalTokens: 140,\n                },\n              },\n            ],\n          }),\n        });\n        await new AiStreamProcessor(conversationId, conversationRegistry).stream(model).consumeStream();\n      });\n      test('appends a single message', () => {\n        expect(conversationRegistry.get(conversationId).messages).toHaveLength(2);\n      });\n      test('appended message is from assistant', () => {\n        expect((conversationRegistry.get(conversationId).messages[1] as ChatMessage).role).toEqual('assistant');\n      });\n      test('concatenates message content', () => {\n        expect((conversationRegistry.get(conversationId).messages[1] as ChatMessage).content).toEqual(\n          'Greetings professor Falken',\n        );\n      });\n      test('setsUsage', async () => {\n        const conversation = conversationRegistry.get(conversationId) as Conversation;\n        expect(conversation?.usage?.completion_tokens).toEqual(133);\n        expect(conversation?.usage?.prompt_tokens).toEqual(7);\n      });\n    });\n    describe('with wrapped generated multiple messages as stream', () => {\n      let model: LanguageModelV3;\n      let tools: ToolSet;\n      let generateStep: number;\n\n      beforeEach(async () => {\n        generateStep = 0;\n        model = wrapLanguageModel({\n          model: new MockLanguageModelV3({\n            doGenerate: async (): Promise<LanguageModelV3GenerateResult> => {\n              if (generateStep++ === 0) {\n                return {\n                  content: [\n                    {\n                      type: 'tool-call',\n                      toolCallId: 'call-001',\n                      toolName: 'tool-1',\n                      input: '{}',\n                    },\n                    {\n                      type: 'tool-call',\n                      toolCallId: 'call-002',\n                      toolName: 'tool-1',\n                      input: '{}',\n                    },\n                  ],\n                  finishReason: { unified: 'tool-calls', raw: undefined },\n                  usage: {\n                    inputTokens: { total: 1, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },\n                    outputTokens: { total: 1, text: undefined, reasoning: undefined },\n                  },\n                  warnings: [],\n                };\n              }\n              return {\n                content: [\n                  {\n                    type: 'text',\n                    text: 'These are the results of you functions: huge success!',\n                  },\n                ],\n                finishReason: { unified: 'stop', raw: undefined },\n                usage: {\n                  inputTokens: { total: 133, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },\n                  outputTokens: { total: 7, text: undefined, reasoning: undefined },\n                },\n                warnings: [],\n              };\n            },\n          }),\n          middleware: simulateStreamingMiddleware(),\n        });\n        tools = {\n          'tool-1': tool({\n            inputSchema: jsonSchema({ type: 'object' }),\n            execute: async () => 'successful result!',\n          }),\n        };\n        await new AiStreamProcessor(conversationId, conversationRegistry).stream(model, tools).consumeStream();\n      });\n      test('appends multiple messages', () => {\n        expect(conversationRegistry.get(conversationId).messages).toHaveLength(4);\n      });\n      test.each<{ index: number; toolCallId: string }>([\n        { index: 1, toolCallId: 'call-001' },\n        { index: 2, toolCallId: 'call-002' },\n      ])(`appends tool call (to tool-1) message at $index`, ({ index, toolCallId }) => {\n        const message = conversationRegistry.get(conversationId).messages[index] as AssistantChat;\n        expect(message.role).toEqual('assistant');\n        expect(message.content).toMatchObject({\n          type: 'tool-call',\n          toolCallId,\n          toolName: 'tool-1',\n          args: {},\n        });\n      });\n      test.each<{ index: number; id: string; toolCallId: string }>([\n        { index: 1, id: '3', toolCallId: 'call-001' },\n        { index: 2, id: '4', toolCallId: 'call-002' },\n      ])(`sets tool result message at $index for $toolCallId`, ({ index, id, toolCallId }) => {\n        const message = conversationRegistry.get(conversationId).messages[index] as AssistantChat;\n        expect(message.id).toEqual(id);\n        expect(message.timestamp).toBeDefined();\n        expect(message.role).toEqual('assistant');\n        expect(message.content).toMatchObject({\n          type: 'tool-call',\n          toolCallId,\n          toolName: 'tool-1',\n          args: {},\n        });\n        if (message.content && typeof message.content === 'object' && 'result' in message.content) {\n          expect(message.content.result).toEqual('successful result!');\n          expect(message.completed).toBeDefined();\n        }\n      });\n      test('appends final assistant message', () => {\n        const message = conversationRegistry.get(conversationId).messages[3] as AssistantChat;\n        expect(message.role).toEqual('assistant');\n        expect(message.content).toEqual('These are the results of you functions: huge success!');\n      });\n      test('setsUsage', async () => {\n        const conversation = conversationRegistry.get(conversationId) as Conversation;\n        expect(conversation?.usage?.completion_tokens).toEqual(7);\n        expect(conversation?.usage?.prompt_tokens).toEqual(133);\n      });\n    });\n  });\n});\n\nexport function createTestModel({\n  stream = ai.simulateReadableStream({ chunks: [] }),\n  rawCall = { rawPrompt: 'prompt', rawSettings: {} },\n  rawResponse = undefined,\n  request = undefined,\n  warnings,\n}: {\n  stream?: ReadableStream<LanguageModelV3StreamPart>;\n  rawResponse?: { headers: Record<string, string> };\n  rawCall?: { rawPrompt: string; rawSettings: Record<string, unknown> };\n  request?: { body: string };\n  warnings?: LanguageModelV2CallWarning[];\n} = {}): LanguageModelV3 {\n  return new MockLanguageModelV3({\n    doStream: async () => ({ stream, rawCall, rawResponse, request, warnings }),\n  });\n}\n"
  },
  {
    "path": "packages/backend/src/managers/playground/aiSdk.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { streamText, stepCountIs } from 'ai';\nimport type {\n  LanguageModel,\n  ModelMessage,\n  StepResult,\n  StreamTextResult,\n  StreamTextOnFinishCallback,\n  TextStreamPart,\n  ToolCallPart,\n  ToolResultPart,\n  ToolSet,\n} from 'ai';\nimport type { ModelOptions } from '@shared/models/IModelOptions';\nimport {\n  type AssistantChat,\n  type ErrorMessage,\n  isAssistantToolCall,\n  type Message,\n  type ModelUsage,\n  type PendingChat,\n  type ToolCall,\n} from '@shared/models/IPlaygroundMessage';\nimport { isChatMessage } from '@shared/models/IPlaygroundMessage';\nimport type { ConversationRegistry } from '../../registries/ConversationRegistry';\n\nexport function toCoreMessage(...messages: Message[]): ModelMessage[] {\n  const ret: ModelMessage[] = [];\n  for (const message of messages) {\n    if (isAssistantToolCall(message)) {\n      const toolCall = message.content as ToolCall;\n      ret.push({\n        role: 'assistant',\n        content: [\n          {\n            type: 'tool-call',\n            toolCallId: toolCall.toolCallId,\n            toolName: toolCall.toolName,\n            input: toolCall.args,\n          } as ToolCallPart,\n        ] as ToolCallPart[],\n      } as ModelMessage);\n      if (toolCall.result) {\n        ret.push({\n          role: 'tool',\n          content: [\n            {\n              type: 'tool-result',\n              toolCallId: toolCall.toolCallId,\n              toolName: toolCall.toolName,\n              output: toolCall.result,\n            } as ToolResultPart,\n          ] as ToolResultPart[],\n        } as ModelMessage);\n      }\n    } else if (isChatMessage(message)) {\n      ret.push({\n        role: message.role,\n        content: message.content ?? '',\n      } as ModelMessage);\n    }\n  }\n  return ret;\n}\n\nexport class AiStreamProcessor<TOOLS extends ToolSet> {\n  private stepStartTime: number | undefined;\n  private currentMessageId: string | undefined;\n  public readonly abortController: AbortController;\n\n  constructor(\n    private conversationId: string,\n    private conversationRegistry: ConversationRegistry,\n  ) {\n    this.abortController = new AbortController();\n    this.abortController.signal.addEventListener('abort', this.onAbort);\n  }\n\n  private onStepFinish = (stepResult: StepResult<TOOLS>): void => {\n    this.conversationRegistry.setUsage(this.conversationId, {\n      completion_tokens: stepResult.usage.outputTokens,\n      prompt_tokens: stepResult.usage.inputTokens,\n    } as ModelUsage);\n    if (this.currentMessageId !== undefined) {\n      this.conversationRegistry.completeMessage(this.conversationId, this.currentMessageId);\n    }\n    if (stepResult.toolCalls?.length > 0) {\n      for (const toolCall of stepResult.toolCalls) {\n        this.conversationRegistry.submit(this.conversationId, {\n          id: this.conversationRegistry.getUniqueId(),\n          role: 'assistant',\n          timestamp: this.stepStartTime,\n          content: {\n            type: 'tool-call',\n            toolCallId: toolCall.toolCallId,\n            toolName: toolCall.toolName,\n            args: toolCall.input,\n          } as ToolCall,\n        } as AssistantChat);\n      }\n    }\n    if (stepResult.toolResults?.length > 0) {\n      for (const toolResult of stepResult.toolResults) {\n        this.conversationRegistry.toolResult(\n          this.conversationId,\n          toolResult.toolCallId,\n          toolResult.output as string | object,\n        );\n      }\n    }\n    this.currentMessageId = undefined;\n    this.stepStartTime = Date.now();\n  };\n\n  private onChunk = ({ chunk }: { chunk: TextStreamPart<TOOLS> }): void => {\n    if (chunk.type !== 'text-delta') {\n      return;\n    }\n    if (this.currentMessageId === undefined) {\n      this.currentMessageId = this.conversationRegistry.getUniqueId();\n      this.conversationRegistry.submit(this.conversationId, {\n        id: this.currentMessageId,\n        role: 'assistant',\n        timestamp: this.stepStartTime,\n        choices: [],\n        completed: undefined,\n      } as PendingChat);\n    }\n    this.conversationRegistry.textDelta(this.conversationId, this.currentMessageId, chunk.text);\n  };\n\n  private onError = (error: unknown): void => {\n    if (error instanceof Object && 'error' in error) {\n      error = error.error;\n    }\n    if (error instanceof Error) {\n      error = error.message;\n    }\n    let errorMessage = String(error);\n    if (errorMessage.endsWith('Please reduce the length of the messages or completion.')) {\n      errorMessage += ' Note: You should start a new playground.';\n    }\n    console.error('Something went wrong while creating model response', errorMessage);\n    this.conversationRegistry.submit(this.conversationId, {\n      id: this.conversationRegistry.getUniqueId(),\n      timestamp: Date.now(),\n      error: errorMessage,\n    } as ErrorMessage);\n  };\n\n  private onAbort = (): void => {\n    // Ensure the last message is marked as complete to allow the user to resume the conversation\n    if (this.currentMessageId !== undefined) {\n      this.conversationRegistry.completeMessage(this.conversationId, this.currentMessageId);\n    }\n  };\n\n  private onFinish: StreamTextOnFinishCallback<TOOLS> = stepResult => {\n    this.conversationRegistry.setUsage(this.conversationId, {\n      completion_tokens: stepResult.usage.outputTokens,\n      prompt_tokens: stepResult.usage.inputTokens,\n    } as ModelUsage);\n  };\n\n  stream = (model: LanguageModel, tools?: TOOLS, options?: ModelOptions): StreamTextResult<TOOLS, never> => {\n    this.stepStartTime = Date.now();\n    return streamText({\n      model,\n      tools,\n      stopWhen: stepCountIs(10),\n      temperature: options?.temperature,\n      maxOutputTokens: (options?.max_tokens ?? -1) < 1 ? undefined : options?.max_tokens,\n      topP: options?.top_p,\n      abortSignal: this.abortController.signal,\n      messages: toCoreMessage(...this.conversationRegistry.get(this.conversationId).messages),\n      onStepFinish: this.onStepFinish,\n      onError: this.onError,\n      onChunk: this.onChunk,\n      onFinish: this.onFinish,\n    });\n  };\n}\n"
  },
  {
    "path": "packages/backend/src/managers/playgroundV2Manager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test, vi, beforeEach, afterEach, describe } from 'vitest';\nimport { createOpenAICompatible } from '@ai-sdk/openai-compatible';\nimport { PlaygroundV2Manager } from './playgroundV2Manager';\nimport type { TelemetryLogger } from '@podman-desktop/api';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport type { InferenceManager } from './inference/inferenceManager';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { TaskRegistry } from '../registries/TaskRegistry';\nimport type { Task, TaskState } from '@shared/models/ITask';\nimport type { ChatMessage, ErrorMessage } from '@shared/models/IPlaygroundMessage';\nimport type { CancellationTokenRegistry } from '../registries/CancellationTokenRegistry';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { MSG_CONVERSATIONS_UPDATE } from '@shared/Messages';\nimport type { LanguageModelV2CallWarning, LanguageModelV3, LanguageModelV3StreamPart } from '@ai-sdk/provider';\nimport { type McpServerManager } from './playground/McpServerManager';\nimport { MockLanguageModelV3 } from 'ai/test';\nimport { simulateReadableStream } from 'ai';\n\nvi.mock('@ai-sdk/openai-compatible', () => ({\n  createOpenAICompatible: vi.fn(),\n}));\n\nconst rpcExtensionMock = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nconst inferenceManagerMock = {\n  get: vi.fn(),\n  getServers: vi.fn(),\n  createInferenceServer: vi.fn(),\n  startInferenceServer: vi.fn(),\n} as unknown as InferenceManager;\n\nconst taskRegistryMock = {\n  createTask: vi.fn(),\n  getTasksByLabels: vi.fn(),\n  updateTask: vi.fn(),\n} as unknown as TaskRegistry;\n\nconst telemetryMock = {\n  logUsage: vi.fn(),\n  logError: vi.fn(),\n} as unknown as TelemetryLogger;\n\nconst cancellationTokenRegistryMock = {\n  createCancellationTokenSource: vi.fn(),\n  delete: vi.fn(),\n} as unknown as CancellationTokenRegistry;\n\nlet mcpServerManager: McpServerManager;\nlet createTestModel: (options: {\n  stream?: ReadableStream<LanguageModelV3StreamPart>;\n  rawResponse?: { headers: Record<string, string> };\n  rawCall?: { rawPrompt: string; rawSettings: Record<string, unknown> };\n  request?: { body: string };\n  warnings?: LanguageModelV2CallWarning[];\n}) => LanguageModelV3;\n\nbeforeEach(async () => {\n  vi.resetAllMocks();\n  vi.mocked(rpcExtensionMock.fire).mockResolvedValue(true);\n  vi.useFakeTimers();\n  mcpServerManager = {\n    getMcpSettings: vi.fn(() => {}),\n    toMcpClients: vi.fn(() => []),\n  } as unknown as McpServerManager;\n  createTestModel = (await import('./playground/aiSdk.spec')).createTestModel;\n});\n\nafterEach(async () => {\n  vi.useRealTimers();\n});\n\ntest('manager should be properly initialized', () => {\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  expect(manager.getConversations().length).toBe(0);\n});\n\ntest('submit should throw an error if the server is stopped', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([\n    {\n      status: 'running',\n      models: [\n        {\n          id: 'model1',\n        },\n      ],\n    } as unknown as InferenceServer,\n  ]);\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  await manager.createPlayground('playground 1', { id: 'model1' } as ModelInfo, 'tracking-1');\n\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([\n    {\n      status: 'stopped',\n      models: [\n        {\n          id: 'model1',\n        },\n      ],\n    } as unknown as InferenceServer,\n  ]);\n\n  await expect(manager.submit(manager.getConversations()[0].id, 'dummyUserInput')).rejects.toThrowError(\n    'Inference server is not running.',\n  );\n});\n\ntest('submit should throw an error if the server is unhealthy', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([\n    {\n      status: 'running',\n      health: {\n        Status: 'unhealthy',\n      },\n      models: [\n        {\n          id: 'model1',\n        },\n      ],\n    } as unknown as InferenceServer,\n  ]);\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  await manager.createPlayground('p1', { id: 'model1' } as ModelInfo, 'tracking-1');\n  const playgroundId = manager.getConversations()[0].id;\n  await expect(manager.submit(playgroundId, 'dummyUserInput')).rejects.toThrowError(\n    'Inference server is not healthy, currently status: unhealthy.',\n  );\n});\n\ntest('create playground should create conversation.', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([\n    {\n      status: 'running',\n      health: {\n        Status: 'healthy',\n      },\n      models: [\n        {\n          id: 'dummyModelId',\n          file: {\n            file: 'dummyModelFile',\n          },\n        },\n      ],\n    } as unknown as InferenceServer,\n  ]);\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  expect(manager.getConversations().length).toBe(0);\n  await manager.createPlayground('playground 1', { id: 'model-1' } as ModelInfo, 'tracking-1');\n\n  const conversations = manager.getConversations();\n  expect(conversations.length).toBe(1);\n});\n\ntest('valid submit should create IPlaygroundMessage and notify the webview', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([\n    {\n      status: 'running',\n      health: {\n        Status: 'healthy',\n      },\n      models: [\n        {\n          id: 'dummyModelId',\n          file: {\n            path: '.',\n            file: 'dummyModelFile',\n          },\n        },\n      ],\n      connection: {\n        port: 8888,\n      },\n      labels: [],\n    } as unknown as InferenceServer,\n  ]);\n  // @ts-expect-error - Mock return type for testing\n  vi.mocked(createOpenAICompatible).mockReturnValue(() =>\n    createTestModel({\n      stream: simulateReadableStream({\n        chunks: [\n          { type: 'text-delta', id: 'id-1', delta: 'The message from the model' },\n          {\n            type: 'finish',\n            finishReason: { unified: 'stop', raw: undefined },\n            usage: {\n              outputTokens: { total: 133, text: undefined, reasoning: undefined },\n              inputTokens: { total: 7, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },\n            },\n          },\n        ],\n      }),\n    }),\n  );\n\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  await manager.createPlayground('playground 1', { id: 'dummyModelId' } as ModelInfo, 'tracking-1');\n\n  const date = new Date(2000, 1, 1, 13);\n  vi.setSystemTime(date);\n\n  const playgrounds = manager.getConversations();\n  await manager.submit(playgrounds[0].id, 'dummyUserInput');\n\n  // Wait for assistant message to be completed\n  await vi.waitFor(() => {\n    expect(manager.getConversations()[0].usage?.completion_tokens).toBeGreaterThan(0);\n  });\n\n  const conversations = manager.getConversations();\n\n  expect(conversations.length).toBe(1);\n  expect(conversations[0].messages.length).toBe(2);\n  expect(conversations[0].messages[0]).toStrictEqual({\n    content: 'dummyUserInput',\n    id: expect.anything(),\n    options: undefined,\n    role: 'user',\n    timestamp: expect.any(Number),\n  });\n  expect(conversations[0].messages[1]).toStrictEqual({\n    choices: undefined,\n    completed: expect.any(Number),\n    content: 'The message from the model',\n    id: expect.anything(),\n    role: 'assistant',\n    timestamp: expect.any(Number),\n  });\n  expect(conversations[0].usage).toStrictEqual({\n    completion_tokens: 133,\n    prompt_tokens: 7,\n  });\n\n  expect(rpcExtensionMock.fire).toHaveBeenLastCalledWith(MSG_CONVERSATIONS_UPDATE, conversations);\n});\n\ntest('error', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([\n    {\n      status: 'running',\n      health: {\n        Status: 'healthy',\n      },\n      models: [\n        {\n          id: 'dummyModelId',\n          file: {\n            path: '.',\n            file: 'dummyModelFile',\n          },\n        },\n      ],\n      connection: {\n        port: 8888,\n      },\n      labels: [],\n    } as unknown as InferenceServer,\n  ]);\n  const doStream: LanguageModelV3['doStream'] = async () => {\n    throw new Error('Please reduce the length of the messages or completion.');\n  };\n  vi.mocked(createOpenAICompatible).mockReturnValue(\n    // @ts-expect-error MockLanguageModelV2 test mock\n    // eslint-disable-next-line sonarjs/new-operator-misuse\n    () =>\n      new (MockLanguageModelV3 as unknown as new (options: {\n        doStream: LanguageModelV3['doStream'];\n      }) => LanguageModelV3)({ doStream }),\n  );\n\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  await manager.createPlayground('playground 1', { id: 'dummyModelId' } as ModelInfo, 'tracking-1');\n\n  const date = new Date(2000, 1, 1, 13);\n  vi.setSystemTime(date);\n\n  const playgrounds = manager.getConversations();\n  await manager.submit(playgrounds[0].id, 'dummyUserInput');\n\n  // Wait for error message\n  await vi.waitFor(() => {\n    expect((manager.getConversations()[0].messages[1] as ErrorMessage).error).toBeDefined();\n  });\n\n  const conversations = manager.getConversations();\n\n  expect(conversations.length).toBe(1);\n  expect(conversations[0].messages.length).toBe(2);\n  expect(conversations[0].messages[0]).toStrictEqual({\n    content: 'dummyUserInput',\n    id: expect.anything(),\n    options: undefined,\n    role: 'user',\n    timestamp: expect.any(Number),\n  });\n  expect(conversations[0].messages[1]).toStrictEqual({\n    error: 'Please reduce the length of the messages or completion. Note: You should start a new playground.',\n    id: expect.anything(),\n    timestamp: expect.any(Number),\n  });\n\n  expect(rpcExtensionMock.fire).toHaveBeenLastCalledWith(MSG_CONVERSATIONS_UPDATE, conversations);\n});\n\ntest('creating a new playground should send new playground to frontend', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([]);\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  await manager.createPlayground(\n    'a name',\n    {\n      id: 'model-1',\n      name: 'Model 1',\n    } as unknown as ModelInfo,\n    'tracking-1',\n  );\n  expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_CONVERSATIONS_UPDATE, [\n    {\n      id: expect.anything(),\n      modelId: 'model-1',\n      name: 'a name',\n      messages: [],\n      usage: {\n        completion_tokens: 0,\n        prompt_tokens: 0,\n      },\n    },\n  ]);\n});\n\ntest('creating a new playground with no name should send new playground to frontend with generated name', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([]);\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  await manager.createPlayground(\n    '',\n    {\n      id: 'model-1',\n      name: 'Model 1',\n    } as unknown as ModelInfo,\n    'tracking-1',\n  );\n  expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_CONVERSATIONS_UPDATE, [\n    {\n      id: expect.anything(),\n      modelId: 'model-1',\n      name: 'playground 1',\n      messages: [],\n      usage: {\n        completion_tokens: 0,\n        prompt_tokens: 0,\n      },\n    },\n  ]);\n});\n\ntest('creating a new playground with no model served should start an inference server', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([]);\n  const createInferenceServerMock = vi.mocked(inferenceManagerMock.createInferenceServer);\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  await manager.createPlayground(\n    'a name',\n    {\n      id: 'model-1',\n      name: 'Model 1',\n    } as unknown as ModelInfo,\n    'tracking-1',\n  );\n  expect(createInferenceServerMock).toHaveBeenCalledWith({\n    gpuLayers: expect.any(Number),\n    image: undefined,\n    providerId: undefined,\n    inferenceProvider: undefined,\n    labels: {\n      trackingId: 'tracking-1',\n    },\n    modelsInfo: [\n      {\n        id: 'model-1',\n        name: 'Model 1',\n      },\n    ],\n    port: expect.anything(),\n  });\n});\n\ntest('creating a new playground with the model already served should not start an inference server', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([\n    {\n      models: [\n        {\n          id: 'model-1',\n        },\n      ],\n    },\n  ] as InferenceServer[]);\n  const createInferenceServerMock = vi.mocked(inferenceManagerMock.createInferenceServer);\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  await manager.createPlayground(\n    'a name',\n    {\n      id: 'model-1',\n      name: 'Model 1',\n    } as unknown as ModelInfo,\n    'tracking-1',\n  );\n  expect(createInferenceServerMock).not.toHaveBeenCalled();\n});\n\ntest('creating a new playground with the model server stopped should start the inference server', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([\n    {\n      models: [\n        {\n          id: 'model-1',\n        },\n      ],\n      status: 'stopped',\n      container: {\n        containerId: 'container-1',\n      },\n    },\n  ] as InferenceServer[]);\n  const createInferenceServerMock = vi.mocked(inferenceManagerMock.createInferenceServer);\n  const startInferenceServerMock = vi.mocked(inferenceManagerMock.startInferenceServer);\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  await manager.createPlayground(\n    'a name',\n    {\n      id: 'model-1',\n      name: 'Model 1',\n    } as unknown as ModelInfo,\n    'tracking-1',\n  );\n  expect(createInferenceServerMock).not.toHaveBeenCalled();\n  expect(startInferenceServerMock).toHaveBeenCalledWith('container-1');\n});\n\ntest('delete conversation should delete the conversation', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([]);\n\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  expect(manager.getConversations().length).toBe(0);\n  await manager.createPlayground(\n    'a name',\n    {\n      id: 'model-1',\n      name: 'Model 1',\n    } as unknown as ModelInfo,\n    'tracking-1',\n  );\n\n  const conversations = manager.getConversations();\n  expect(conversations.length).toBe(1);\n  manager.deleteConversation(conversations[0].id);\n  expect(manager.getConversations().length).toBe(0);\n  expect(rpcExtensionMock.fire).toHaveBeenCalled();\n});\n\ntest('creating a new playground with an existing name should fail', async () => {\n  vi.mocked(inferenceManagerMock.getServers).mockReturnValue([]);\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  await manager.createPlayground(\n    'a name',\n    {\n      id: 'model-1',\n      name: 'Model 1',\n    } as unknown as ModelInfo,\n    'tracking-1',\n  );\n  await expect(\n    manager.createPlayground(\n      'a name',\n      {\n        id: 'model-2',\n        name: 'Model 2',\n      } as unknown as ModelInfo,\n      'tracking-2',\n    ),\n  ).rejects.toThrowError('a playground with the name a name already exists');\n});\n\ntest('requestCreatePlayground should call createPlayground and createTask, then updateTask', async () => {\n  vi.useRealTimers();\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  const createTaskMock = vi.mocked(taskRegistryMock).createTask;\n  const updateTaskMock = vi.mocked(taskRegistryMock).updateTask;\n  createTaskMock.mockImplementation((_name: string, _state: TaskState, labels?: { [id: string]: string }) => {\n    return {\n      labels,\n    } as Task;\n  });\n  const createPlaygroundSpy = vi.spyOn(manager, 'createPlayground').mockResolvedValue('playground-1');\n\n  const id = await manager.requestCreatePlayground('a name', { id: 'model-1' } as ModelInfo);\n\n  expect(createPlaygroundSpy).toHaveBeenCalledWith('a name', { id: 'model-1' } as ModelInfo, expect.any(String));\n  expect(createTaskMock).toHaveBeenCalledWith('Creating Playground environment', 'loading', {\n    trackingId: id,\n  });\n  await new Promise(resolve => setTimeout(resolve, 0));\n  expect(updateTaskMock).toHaveBeenCalledWith({\n    labels: {\n      trackingId: id,\n      playgroundId: 'playground-1',\n    },\n    state: 'success',\n  });\n});\n\ntest('requestCreatePlayground should call createPlayground and createTask, then updateTask when createPlayground fails', async () => {\n  vi.useRealTimers();\n  const manager = new PlaygroundV2Manager(\n    rpcExtensionMock,\n    inferenceManagerMock,\n    taskRegistryMock,\n    telemetryMock,\n    cancellationTokenRegistryMock,\n    mcpServerManager,\n  );\n  const createTaskMock = vi.mocked(taskRegistryMock).createTask;\n  const updateTaskMock = vi.mocked(taskRegistryMock).updateTask;\n  const getTasksByLabelsMock = vi.mocked(taskRegistryMock).getTasksByLabels;\n  createTaskMock.mockImplementation((_name: string, _state: TaskState, labels?: { [id: string]: string }) => {\n    return {\n      labels,\n    } as Task;\n  });\n  const createPlaygroundSpy = vi.spyOn(manager, 'createPlayground').mockRejectedValue(new Error('an error'));\n\n  const id = await manager.requestCreatePlayground('a name', { id: 'model-1' } as ModelInfo);\n\n  expect(createPlaygroundSpy).toHaveBeenCalledWith('a name', { id: 'model-1' } as ModelInfo, expect.any(String));\n  expect(createTaskMock).toHaveBeenCalledWith('Creating Playground environment', 'loading', {\n    trackingId: id,\n  });\n\n  getTasksByLabelsMock.mockReturnValue([\n    {\n      labels: {\n        trackingId: id,\n      },\n    } as unknown as Task,\n  ]);\n\n  await new Promise(resolve => setTimeout(resolve, 0));\n  expect(updateTaskMock).toHaveBeenCalledWith({\n    error: 'Something went wrong while trying to create a playground environment Error: an error.',\n    labels: {\n      trackingId: id,\n    },\n    state: 'error',\n  });\n});\n\ndescribe('system prompt', () => {\n  test('set system prompt on non existing conversation should throw an error', async () => {\n    vi.mocked(inferenceManagerMock.getServers).mockReturnValue([\n      {\n        status: 'running',\n        models: [\n          {\n            id: 'model1',\n          },\n        ],\n      } as unknown as InferenceServer,\n    ]);\n    const manager = new PlaygroundV2Manager(\n      rpcExtensionMock,\n      inferenceManagerMock,\n      taskRegistryMock,\n      telemetryMock,\n      cancellationTokenRegistryMock,\n      mcpServerManager,\n    );\n\n    expect(() => {\n      manager.setSystemPrompt('invalid', 'content');\n    }).toThrowError('conversation with id invalid does not exist.');\n  });\n\n  test('set system prompt should throw an error if user already submit message', async () => {\n    vi.mocked(inferenceManagerMock.getServers).mockReturnValue([\n      {\n        status: 'running',\n        health: {\n          Status: 'healthy',\n        },\n        models: [\n          {\n            id: 'dummyModelId',\n            file: {\n              path: '.',\n              file: 'dummyModelFile',\n            },\n          },\n        ],\n        connection: {\n          port: 8888,\n        },\n        labels: [],\n      } as unknown as InferenceServer,\n    ]);\n    // @ts-expect-error - Mock return type for testing\n    vi.mocked(createOpenAICompatible).mockReturnValue(() =>\n      createTestModel({\n        stream: simulateReadableStream({\n          chunks: [\n            { type: 'text-delta', id: 'id-1', delta: 'The message from the model' },\n            {\n              type: 'finish',\n              finishReason: { unified: 'stop', raw: undefined },\n              usage: {\n                outputTokens: { total: 133, text: undefined, reasoning: undefined },\n                inputTokens: { total: 7, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },\n              },\n            },\n          ],\n        }),\n      }),\n    );\n\n    const manager = new PlaygroundV2Manager(\n      rpcExtensionMock,\n      inferenceManagerMock,\n      taskRegistryMock,\n      telemetryMock,\n      cancellationTokenRegistryMock,\n      mcpServerManager,\n    );\n    await manager.createPlayground('playground 1', { id: 'dummyModelId' } as ModelInfo, 'tracking-1');\n\n    const date = new Date(2000, 1, 1, 13);\n    vi.setSystemTime(date);\n\n    const conversations = manager.getConversations();\n    await manager.submit(conversations[0].id, 'dummyUserInput');\n\n    // Wait for assistant message to be completed\n    await vi.waitFor(() => {\n      expect((manager.getConversations()[0].messages[1] as ChatMessage).content).toBeDefined();\n    });\n\n    expect(() => {\n      manager.setSystemPrompt(manager.getConversations()[0].id, 'newSystemPrompt');\n    }).toThrowError('Cannot change system prompt on started conversation.');\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/managers/playgroundV2Manager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { Disposable, TelemetryLogger } from '@podman-desktop/api';\nimport type { InferenceManager } from './inference/inferenceManager';\nimport type { ModelOptions } from '@shared/models/IModelOptions';\nimport { ConversationRegistry } from '../registries/ConversationRegistry';\nimport type { Conversation, SystemPrompt, UserChat } from '@shared/models/IPlaygroundMessage';\nimport { isSystemPrompt } from '@shared/models/IPlaygroundMessage';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { withDefaultConfiguration } from '../utils/inferenceUtils';\nimport { getRandomString } from '../utils/randomUtils';\nimport type { TaskRegistry } from '../registries/TaskRegistry';\nimport type { CancellationTokenRegistry } from '../registries/CancellationTokenRegistry';\nimport { getHash } from '../utils/sha';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { createOpenAICompatible } from '@ai-sdk/openai-compatible';\nimport { AiStreamProcessor } from './playground/aiSdk';\nimport { type McpServerManager } from './playground/McpServerManager';\nimport type { ToolSet } from 'ai';\nimport { simulateStreamingMiddleware, wrapLanguageModel } from 'ai';\n\nexport class PlaygroundV2Manager implements Disposable {\n  readonly #conversationRegistry: ConversationRegistry;\n\n  constructor(\n    rpcExtension: RpcExtension,\n    private inferenceManager: InferenceManager,\n    private taskRegistry: TaskRegistry,\n    private telemetry: TelemetryLogger,\n    private cancellationTokenRegistry: CancellationTokenRegistry,\n    private mcpServerManager: McpServerManager,\n  ) {\n    this.#conversationRegistry = new ConversationRegistry(rpcExtension);\n  }\n\n  deleteConversation(conversationId: string): void {\n    const conversation = this.#conversationRegistry.get(conversationId);\n    this.telemetry.logUsage('playground.delete', {\n      totalMessages: conversation.messages.length,\n      modelId: getHash(conversation.modelId),\n    });\n    this.#conversationRegistry.deleteConversation(conversationId);\n  }\n\n  async requestCreatePlayground(name: string, model: ModelInfo): Promise<string> {\n    const trackingId: string = getRandomString();\n    const task = this.taskRegistry.createTask('Creating Playground environment', 'loading', {\n      trackingId: trackingId,\n    });\n\n    const telemetry: Record<string, unknown> = {\n      hasName: !!name,\n      modelId: getHash(model.id),\n    };\n    this.createPlayground(name, model, trackingId)\n      .then((playgroundId: string) => {\n        this.taskRegistry.updateTask({\n          ...task,\n          state: 'success',\n          labels: {\n            ...task.labels,\n            playgroundId,\n          },\n        });\n      })\n      .catch((err: unknown) => {\n        telemetry['errorMessage'] = `${String(err)}`;\n\n        const tasks = this.taskRegistry.getTasksByLabels({\n          trackingId: trackingId,\n        });\n        // Filter the one no in loading state\n        tasks\n          .filter(t => t.state === 'loading' && t.id !== task.id)\n          .forEach(t => {\n            this.taskRegistry.updateTask({\n              ...t,\n              state: 'error',\n            });\n          });\n        // Update the main task\n        this.taskRegistry.updateTask({\n          ...task,\n          state: 'error',\n          error: `Something went wrong while trying to create a playground environment ${String(err)}.`,\n        });\n      })\n      .finally(() => {\n        this.telemetry.logUsage('playground.create', telemetry);\n      });\n    return trackingId;\n  }\n\n  async createPlayground(name: string, model: ModelInfo, trackingId: string): Promise<string> {\n    if (!name) {\n      name = this.getFreeName();\n    }\n    if (!this.isNameFree(name)) {\n      throw new Error(`a playground with the name ${name} already exists`);\n    }\n\n    // Create conversation\n    const conversationId = this.#conversationRegistry.createConversation(name, model.id);\n\n    // create/start inference server if necessary\n    const servers = this.inferenceManager.getServers();\n    const server = servers.find(s => s.models.map(mi => mi.id).includes(model.id));\n    if (!server) {\n      await this.inferenceManager.createInferenceServer(\n        await withDefaultConfiguration({\n          modelsInfo: [model],\n          labels: {\n            trackingId: trackingId,\n          },\n        }),\n      );\n    } else if (server.status === 'stopped') {\n      await this.inferenceManager.startInferenceServer(server.container.containerId);\n    }\n\n    return conversationId;\n  }\n\n  /**\n   * Add a system prompt to an existing conversation.\n   * @param conversationId the conversation to append the system prompt to.\n   * @param content the content of the system prompt\n   */\n  private submitSystemPrompt(conversationId: string, content: string): void {\n    this.#conversationRegistry.submit(conversationId, {\n      content: content,\n      role: 'system',\n      id: this.#conversationRegistry.getUniqueId(),\n      timestamp: Date.now(),\n    } as SystemPrompt);\n    this.telemetry.logUsage('playground.system-prompt.create', {\n      modelId: getHash(this.#conversationRegistry.get(conversationId).modelId),\n    });\n  }\n\n  /**\n   * Given a conversation, update the system prompt.\n   * If none exists, it will create one, otherwise it will replace the content with the new one\n   * @param conversationId the conversation id to set the system id\n   * @param content the new system prompt to use\n   */\n  setSystemPrompt(conversationId: string, content: string | undefined): void {\n    const conversation = this.#conversationRegistry.get(conversationId);\n\n    if (content === undefined || content.length === 0) {\n      this.#conversationRegistry.removeMessage(conversationId, conversation.messages[0].id);\n      this.telemetry.logUsage('playground.system-prompt.delete', {\n        modelId: getHash(conversation.modelId),\n      });\n      return;\n    }\n\n    if (conversation.messages.length === 0) {\n      this.submitSystemPrompt(conversationId, content);\n    } else if (conversation.messages.length === 1 && isSystemPrompt(conversation.messages[0])) {\n      this.#conversationRegistry.update(conversationId, conversation.messages[0].id, {\n        content,\n      });\n      this.telemetry.logUsage('playground.system-prompt.update', {\n        modelId: getHash(conversation.modelId),\n      });\n    } else {\n      throw new Error('Cannot change system prompt on started conversation.');\n    }\n  }\n\n  /**\n   * @param conversationId\n   * @param userInput the user input\n   * @param options the model configuration\n   */\n  async submit(conversationId: string, userInput: string, options?: ModelOptions): Promise<number> {\n    const conversation = this.#conversationRegistry.get(conversationId);\n\n    const servers = this.inferenceManager.getServers();\n    const server = servers.find(s => s.models.map(mi => mi.id).includes(conversation.modelId));\n    if (server === undefined) throw new Error('Inference server not found.');\n\n    if (server.status !== 'running') throw new Error('Inference server is not running.');\n\n    if (server.health?.Status !== 'healthy')\n      throw new Error(`Inference server is not healthy, currently status: ${server.health?.Status ?? 'unknown'}.`);\n\n    const modelInfo = server.models.find(model => model.id === conversation.modelId);\n    if (modelInfo === undefined)\n      throw new Error(\n        `modelId '${conversation.modelId}' is not available on the inference server, valid model ids are: ${server.models.map(model => model.id).join(', ')}.`,\n      );\n\n    this.#conversationRegistry.submit(conversation.id, {\n      content: userInput,\n      options: options,\n      role: 'user',\n      id: this.#conversationRegistry.getUniqueId(),\n      timestamp: Date.now(),\n    } as UserChat);\n\n    if (!modelInfo.file?.path) throw new Error('model info has undefined file.');\n\n    const telemetry: Record<string, unknown> = {\n      conversationId: conversationId,\n      ...options,\n      promptLength: userInput.length,\n      modelId: getHash(modelInfo.id),\n    };\n\n    const streamProcessor = new AiStreamProcessor(conversationId, this.#conversationRegistry);\n    const cancelTokenId = this.cancellationTokenRegistry.createCancellationTokenSource(() => {\n      streamProcessor.abortController.abort('cancel');\n    });\n\n    const tools: ToolSet = {};\n    const mcpClients = await this.mcpServerManager.toMcpClients();\n    for (const client of mcpClients) {\n      const clientTools = await client.tools();\n      for (const entry of Object.entries(clientTools)) {\n        tools[entry[0]] = entry[1];\n      }\n    }\n\n    const openAiClient = createOpenAICompatible({\n      name: modelInfo.name,\n      baseURL: server.labels['api'] ?? `http://localhost:${server.connection.port}/v1`,\n    });\n    let model = openAiClient(modelInfo.name);\n    // Tool calling in OpenAI doesn't support streaming yet\n    if (Object.keys(tools).length > 0) {\n      model = wrapLanguageModel({ model, middleware: simulateStreamingMiddleware() });\n    }\n\n    const start = Date.now();\n    const finalBlock = (): void => {\n      this.telemetry.logUsage('playground.submit', telemetry);\n      this.cancellationTokenRegistry.delete(cancelTokenId);\n      Promise.all(mcpClients.map(client => client.close())).catch((e: unknown) =>\n        console.error(`Error closing MCP client`, e),\n      );\n    };\n    streamProcessor\n      .stream(model, tools, options)\n      .consumeStream()\n      .then(\n        () => {\n          this.telemetry.logUsage('playground.message.complete', {\n            duration: Date.now() - start,\n            modelId: getHash(conversation.modelId),\n          });\n          finalBlock();\n        },\n        (err: unknown) => {\n          console.error('Something went wrong while processing stream', err);\n          finalBlock();\n        },\n      );\n    return cancelTokenId;\n  }\n\n  getConversations(): Conversation[] {\n    return this.#conversationRegistry.getAll();\n  }\n\n  private getFreeName(): string {\n    const names = new Set(this.getConversations().map(c => c.name));\n    let i = 0;\n    let name: string;\n    do {\n      name = `playground ${++i}`;\n    } while (names.has(name));\n    return name;\n  }\n\n  private isNameFree(name: string): boolean {\n    return !this.getConversations().some(c => c.name === name);\n  }\n\n  dispose(): void {\n    this.#conversationRegistry.dispose();\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/podmanConnection.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport { PodmanConnection } from './podmanConnection';\nimport type {\n  ContainerProviderConnection,\n  Extension,\n  ProviderConnectionStatus,\n  ProviderContainerConnection,\n  ProviderEvent,\n  RegisterContainerConnectionEvent,\n  RunResult,\n  UnregisterContainerConnectionEvent,\n  UpdateContainerConnectionEvent,\n} from '@podman-desktop/api';\nimport { containerEngine, extensions, process, provider, EventEmitter, env } from '@podman-desktop/api';\nimport { VMType } from '@shared/models/IPodman';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { getPodmanCli, getPodmanMachineName } from '../utils/podman';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { MSG_PODMAN_CONNECTION_UPDATE } from '@shared/Messages';\n\nconst rpcExtensionMock = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    EventEmitter: vi.fn(),\n    provider: {\n      onDidUnregisterContainerConnection: vi.fn(),\n      onDidRegisterContainerConnection: vi.fn(),\n      onDidUpdateContainerConnection: vi.fn(),\n      onDidUpdateProvider: vi.fn(),\n      getContainerConnections: vi.fn(),\n    },\n    process: {\n      exec: vi.fn(),\n    },\n    extensions: {\n      getExtension: vi.fn(),\n    },\n    containerEngine: {\n      listInfos: vi.fn(),\n    },\n    env: {\n      isLinux: vi.fn(),\n    },\n    navigation: {},\n  };\n});\n\nvi.mock('../utils/podman', () => {\n  return {\n    getPodmanCli: vi.fn(),\n    getPodmanMachineName: vi.fn(),\n    MIN_CPUS_VALUE: 4,\n  };\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(rpcExtensionMock.fire).mockResolvedValue(true);\n  vi.mocked(provider.getContainerConnections).mockReturnValue([]);\n  vi.mocked(getPodmanCli).mockReturnValue('podman-executable');\n  vi.mocked(getPodmanMachineName).mockImplementation(connection => connection.name);\n\n  const listeners: ((value: unknown) => void)[] = [];\n\n  vi.mocked(EventEmitter).mockReturnValue({\n    event: vi.fn().mockImplementation(callback => {\n      listeners.push(callback);\n    }),\n    fire: vi.fn().mockImplementation((content: unknown) => {\n      listeners.forEach(listener => listener(content));\n    }),\n  } as unknown as EventEmitter<unknown>);\n});\n\nconst providerContainerConnectionMock: ProviderContainerConnection = {\n  connection: {\n    type: 'podman',\n    status: () => 'started',\n    name: 'Podman Machine',\n    endpoint: {\n      socketPath: './socket-path',\n    },\n  },\n  providerId: 'podman',\n};\n\ndescribe('execute', () => {\n  test('execute should get the podman extension from api', async () => {\n    vi.mocked(extensions.getExtension).mockReturnValue(undefined);\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await manager.execute(providerContainerConnectionMock.connection, ['ls']);\n    expect(extensions.getExtension).toHaveBeenCalledWith('podman-desktop.podman');\n  });\n\n  test('execute should call getPodmanCli if extension not available', async () => {\n    vi.mocked(extensions.getExtension).mockReturnValue(undefined);\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await manager.execute(providerContainerConnectionMock.connection, ['ls']);\n\n    expect(getPodmanCli).toHaveBeenCalledOnce();\n    expect(process.exec).toHaveBeenCalledWith('podman-executable', ['ls'], undefined);\n  });\n\n  test('options should be propagated to process execution when provided', async () => {\n    vi.mocked(extensions.getExtension).mockReturnValue(undefined);\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await manager.execute(providerContainerConnectionMock.connection, ['ls'], {\n      isAdmin: true,\n    });\n\n    expect(getPodmanCli).toHaveBeenCalledOnce();\n    expect(process.exec).toHaveBeenCalledWith('podman-executable', ['ls'], {\n      isAdmin: true,\n    });\n  });\n\n  test('execute should use extension exec if available', async () => {\n    vi.mocked(provider.getContainerConnections).mockReturnValue([providerContainerConnectionMock]);\n    const podmanAPI = {\n      exec: vi.fn(),\n    };\n    vi.mocked(extensions.getExtension).mockReturnValue({ exports: podmanAPI } as unknown as Extension<unknown>);\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await manager.execute(providerContainerConnectionMock.connection, ['ls']);\n\n    expect(getPodmanCli).not.toHaveBeenCalledOnce();\n    expect(podmanAPI.exec).toHaveBeenCalledWith(['ls'], {\n      connection: providerContainerConnectionMock,\n    });\n  });\n\n  test('an error should be throw if the provided container connection do not exists', async () => {\n    vi.mocked(provider.getContainerConnections).mockReturnValue([]);\n    const podmanAPI = {\n      exec: vi.fn(),\n    };\n    vi.mocked(extensions.getExtension).mockReturnValue({ exports: podmanAPI } as unknown as Extension<unknown>);\n    const manager = new PodmanConnection(rpcExtensionMock);\n\n    await expect(async () => {\n      await manager.execute(providerContainerConnectionMock.connection, ['ls'], {\n        isAdmin: true,\n      });\n    }).rejects.toThrowError('cannot find podman provider with connection name Podman Machine');\n  });\n\n  test('execute should propagate options to extension exec if available', async () => {\n    vi.mocked(provider.getContainerConnections).mockReturnValue([providerContainerConnectionMock]);\n    const podmanAPI = {\n      exec: vi.fn(),\n    };\n    vi.mocked(extensions.getExtension).mockReturnValue({ exports: podmanAPI } as unknown as Extension<unknown>);\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await manager.execute(providerContainerConnectionMock.connection, ['ls'], {\n      isAdmin: true,\n    });\n\n    expect(getPodmanCli).not.toHaveBeenCalledOnce();\n    expect(podmanAPI.exec).toHaveBeenCalledWith(['ls'], {\n      isAdmin: true,\n      connection: providerContainerConnectionMock,\n    });\n  });\n});\n\ndescribe('executeSSH', () => {\n  test('executeSSH should call getPodmanCli if extension not available', async () => {\n    vi.mocked(extensions.getExtension).mockReturnValue(undefined);\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await manager.executeSSH(providerContainerConnectionMock.connection, ['ls']);\n\n    expect(getPodmanCli).toHaveBeenCalledOnce();\n    expect(process.exec).toHaveBeenCalledWith(\n      'podman-executable',\n      ['machine', 'ssh', providerContainerConnectionMock.connection.name, 'ls'],\n      undefined,\n    );\n  });\n\n  test('executeSSH should use extension exec if available', async () => {\n    vi.mocked(provider.getContainerConnections).mockReturnValue([providerContainerConnectionMock]);\n    const podmanAPI = {\n      exec: vi.fn(),\n    };\n    vi.mocked(extensions.getExtension).mockReturnValue({ exports: podmanAPI } as unknown as Extension<unknown>);\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await manager.executeSSH(providerContainerConnectionMock.connection, ['ls']);\n\n    expect(getPodmanCli).not.toHaveBeenCalledOnce();\n    expect(podmanAPI.exec).toHaveBeenCalledWith(\n      ['machine', 'ssh', providerContainerConnectionMock.connection.name, 'ls'],\n      {\n        connection: providerContainerConnectionMock,\n      },\n    );\n  });\n\n  test('executeSSH should propagate options to extension exec if available', async () => {\n    vi.mocked(provider.getContainerConnections).mockReturnValue([providerContainerConnectionMock]);\n    const podmanAPI = {\n      exec: vi.fn(),\n    };\n    vi.mocked(extensions.getExtension).mockReturnValue({ exports: podmanAPI } as unknown as Extension<unknown>);\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await manager.executeSSH(providerContainerConnectionMock.connection, ['ls'], {\n      isAdmin: true,\n    });\n\n    expect(getPodmanCli).not.toHaveBeenCalledOnce();\n    expect(podmanAPI.exec).toHaveBeenCalledWith(\n      ['machine', 'ssh', providerContainerConnectionMock.connection.name, 'ls'],\n      {\n        isAdmin: true,\n        connection: providerContainerConnectionMock,\n      },\n    );\n  });\n});\n\ndescribe('podman connection initialization', () => {\n  test('init should notify publisher', () => {\n    const manager = new PodmanConnection(rpcExtensionMock);\n    manager.init();\n\n    expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_PODMAN_CONNECTION_UPDATE, []);\n  });\n\n  test('init should register all provider events', () => {\n    const manager = new PodmanConnection(rpcExtensionMock);\n    manager.init();\n\n    expect(provider.onDidUnregisterContainerConnection).toHaveBeenCalledWith(expect.any(Function));\n    expect(provider.onDidRegisterContainerConnection).toHaveBeenCalledWith(expect.any(Function));\n    expect(provider.onDidUpdateContainerConnection).toHaveBeenCalledWith(expect.any(Function));\n    expect(provider.onDidUpdateProvider).toHaveBeenCalledWith(expect.any(Function));\n  });\n\n  test('init should fetch all container connections', () => {\n    const statusMock = vi.fn().mockReturnValue('started');\n    const providerContainerConnection: ProviderContainerConnection = {\n      connection: {\n        type: 'podman',\n        status: statusMock,\n        name: 'Podman Machine',\n        endpoint: {\n          socketPath: './socket-path',\n        },\n      },\n      providerId: 'podman',\n    };\n    vi.mocked(provider.getContainerConnections).mockReturnValue([providerContainerConnection]);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    manager.init();\n\n    expect(manager.getContainerProviderConnectionInfo()).toStrictEqual([\n      {\n        name: 'Podman Machine',\n        providerId: 'podman',\n        status: 'started',\n        type: 'podman',\n        vmType: VMType.UNKNOWN,\n      },\n    ]);\n\n    expect(manager.getContainerProviderConnections()).toStrictEqual([providerContainerConnection.connection]);\n    expect(statusMock).toHaveBeenCalled();\n  });\n});\n\nasync function getListeners(): Promise<{\n  onDidUnregisterContainerConnection: (e: UnregisterContainerConnectionEvent) => void;\n  onDidRegisterContainerConnection: (e: RegisterContainerConnectionEvent) => void;\n  onDidUpdateContainerConnection: (e: UpdateContainerConnectionEvent) => void;\n  onDidUpdateProvider: (e: ProviderEvent) => void;\n  podmanConnection: PodmanConnection;\n}> {\n  const onDidUnregisterContainerConnectionPromise: Promise<(e: UnregisterContainerConnectionEvent) => void> =\n    new Promise(resolve => {\n      vi.mocked(provider.onDidUnregisterContainerConnection).mockImplementation(\n        (fn: (e: UnregisterContainerConnectionEvent) => void) => {\n          resolve(fn);\n          return {\n            dispose: vi.fn(),\n          };\n        },\n      );\n    });\n\n  const onDidRegisterContainerConnectionPromise: Promise<(e: RegisterContainerConnectionEvent) => void> = new Promise(\n    resolve => {\n      vi.mocked(provider.onDidRegisterContainerConnection).mockImplementation(\n        (fn: (e: RegisterContainerConnectionEvent) => void) => {\n          resolve(fn);\n          return {\n            dispose: vi.fn(),\n          };\n        },\n      );\n    },\n  );\n\n  const onDidUpdateContainerConnectionPromise: Promise<(e: UpdateContainerConnectionEvent) => void> = new Promise(\n    resolve => {\n      vi.mocked(provider.onDidUpdateContainerConnection).mockImplementation(\n        (fn: (e: UpdateContainerConnectionEvent) => void) => {\n          resolve(fn);\n          return {\n            dispose: vi.fn(),\n          };\n        },\n      );\n    },\n  );\n\n  const onDidUpdateProviderPromise: Promise<(e: ProviderEvent) => void> = new Promise(resolve => {\n    vi.mocked(provider.onDidUpdateProvider).mockImplementation((fn: (e: ProviderEvent) => void) => {\n      resolve(fn);\n      return {\n        dispose: vi.fn(),\n      };\n    });\n  });\n\n  const manager = new PodmanConnection(rpcExtensionMock);\n  manager.init();\n\n  return {\n    onDidUnregisterContainerConnection: await onDidUnregisterContainerConnectionPromise,\n    onDidRegisterContainerConnection: await onDidRegisterContainerConnectionPromise,\n    onDidUpdateContainerConnection: await onDidUpdateContainerConnectionPromise,\n    onDidUpdateProvider: await onDidUpdateProviderPromise,\n    podmanConnection: manager,\n  };\n}\n\ndescribe('container connection event', () => {\n  test('onDidUnregisterContainerConnection should refresh and notify webview', async () => {\n    const { onDidUnregisterContainerConnection } = await getListeners();\n\n    // simulate onDidUnregisterContainerConnection event\n    onDidUnregisterContainerConnection({ providerId: 'podman' });\n\n    // ensure the webview has been notified\n    await vi.waitFor(() => {\n      expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_PODMAN_CONNECTION_UPDATE, []);\n    });\n  });\n\n  test('onDidUnregisterContainerConnection should fire PodmanConnectionEvent', async () => {\n    const { onDidUnregisterContainerConnection, podmanConnection } = await getListeners();\n\n    // register event listener\n    const onPodmanConnectionEventListenerMock = vi.fn();\n    podmanConnection.onPodmanConnectionEvent(onPodmanConnectionEventListenerMock);\n\n    // simulate onDidUnregisterContainerConnection event\n    onDidUnregisterContainerConnection({ providerId: 'podman' });\n\n    expect(onPodmanConnectionEventListenerMock).toHaveBeenCalledWith({\n      status: 'unregister',\n    });\n  });\n\n  test('onDidRegisterContainerConnection should notify webview', async () => {\n    const { onDidRegisterContainerConnection, podmanConnection } = await getListeners();\n\n    // simulate a onDidRegisterContainerConnection event\n    onDidRegisterContainerConnection({\n      providerId: 'podman',\n      connection: {\n        type: 'podman',\n        name: 'Podman Machine',\n        status: () => 'started',\n        endpoint: {\n          socketPath: './socket-path',\n        },\n      },\n    });\n\n    // ensure the webview has been notified\n    await vi.waitFor(() => {\n      expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_PODMAN_CONNECTION_UPDATE, [\n        {\n          providerId: 'podman',\n          name: 'Podman Machine',\n          status: 'started',\n          type: 'podman',\n          vmType: VMType.UNKNOWN,\n        },\n      ]);\n    });\n\n    // ensure it has properly been added\n    expect(podmanConnection.getContainerProviderConnectionInfo().length).toBe(1);\n  });\n\n  test('onDidRegisterContainerConnection should fire PodmanConnectionEvent', async () => {\n    const { onDidRegisterContainerConnection, podmanConnection } = await getListeners();\n\n    // register event listener\n    const onPodmanConnectionEventListenerMock = vi.fn();\n    podmanConnection.onPodmanConnectionEvent(onPodmanConnectionEventListenerMock);\n\n    // simulate a onDidRegisterContainerConnection event\n    onDidRegisterContainerConnection({\n      providerId: 'podman',\n      connection: {\n        type: 'podman',\n        name: 'Podman Machine',\n        status: () => 'started',\n        endpoint: {\n          socketPath: './socket-path',\n        },\n      },\n    });\n\n    expect(onPodmanConnectionEventListenerMock).toHaveBeenCalledWith({\n      status: 'register',\n    });\n  });\n\n  test('onDidUpdateProvider should refresh and notify webview', async () => {\n    const { onDidUpdateProvider } = await getListeners();\n\n    // simulate onDidUnregisterContainerConnection event\n    onDidUpdateProvider({ name: 'podman', status: 'unknown', id: 'podman' });\n\n    // ensure the webview has been notified\n    await vi.waitFor(() => {\n      expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_PODMAN_CONNECTION_UPDATE, []);\n    });\n  });\n\n  test('onDidUpdateContainerConnection should refresh and notify webview', async () => {\n    const { onDidUpdateContainerConnection } = await getListeners();\n\n    // simulate onDidUnregisterContainerConnection event\n    onDidUpdateContainerConnection({\n      status: 'started',\n      providerId: 'podman',\n      connection: {\n        type: 'podman',\n        name: 'Podman Machine',\n        status: () => 'started',\n        endpoint: {\n          socketPath: './socket-path',\n        },\n      },\n    });\n\n    // ensure the webview has been notified\n    await vi.waitFor(() => {\n      expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_PODMAN_CONNECTION_UPDATE, []);\n    });\n  });\n});\n\ndescribe('getVMType', () => {\n  test('empty response should throw an error', async () => {\n    vi.mocked(process.exec).mockResolvedValue({\n      stdout: '[]',\n    } as unknown as RunResult);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await expect(() => manager.getVMType('machine')).rejects.toThrowError(\n      'podman machine list provided an empty array',\n    );\n  });\n\n  test('empty array should return UNKNOWN when no name is provided', async () => {\n    vi.mocked(process.exec).mockResolvedValue({\n      stdout: '[]',\n    } as unknown as RunResult);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    expect(await manager.getVMType()).toBe(VMType.UNKNOWN);\n  });\n\n  test('malformed response should throw an error', async () => {\n    vi.mocked(process.exec).mockResolvedValue({\n      stdout: '{}',\n    } as unknown as RunResult);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await expect(() => manager.getVMType()).rejects.toThrowError('podman machine list provided a malformed response');\n  });\n\n  test('array with length greater than one require name', async () => {\n    vi.mocked(process.exec).mockResolvedValue({\n      stdout: '[{}, {}]',\n    } as unknown as RunResult);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await expect(() => manager.getVMType()).rejects.toThrowError(\n      'name need to be provided when more than one podman machine is configured.',\n    );\n  });\n\n  test('argument name should be used to filter the machine', async () => {\n    vi.mocked(process.exec).mockResolvedValue({\n      stdout: JSON.stringify([\n        {\n          Name: 'machine-1',\n          VMType: VMType.QEMU,\n        },\n        {\n          Name: 'machine-2',\n          VMType: VMType.APPLEHV,\n        },\n      ]),\n    } as unknown as RunResult);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    expect(await manager.getVMType('machine-2')).toBe(VMType.APPLEHV);\n  });\n\n  test('invalid name should throw an error', async () => {\n    vi.mocked(process.exec).mockResolvedValue({\n      stdout: JSON.stringify([\n        {\n          Name: 'machine-1',\n        },\n        {\n          Name: 'machine-2',\n        },\n      ]),\n    } as unknown as RunResult);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    await expect(() => manager.getVMType('potatoes')).rejects.toThrowError(\n      'cannot find matching podman machine with name potatoes',\n    );\n  });\n\n  test('single machine should return its VMType', async () => {\n    vi.mocked(process.exec).mockResolvedValue({\n      stdout: JSON.stringify([\n        {\n          Name: 'machine-1',\n          VMType: VMType.WSL,\n        },\n      ]),\n    } as unknown as RunResult);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    expect(await manager.getVMType()).toBe(VMType.WSL);\n  });\n\n  test('unknown string should return UNKNOWN', async () => {\n    vi.mocked(process.exec).mockResolvedValue({\n      stdout: JSON.stringify([\n        {\n          Name: 'machine-1',\n          VMType: 'fake-content',\n        },\n      ]),\n    } as unknown as RunResult);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    expect(await manager.getVMType()).toBe(VMType.UNKNOWN);\n  });\n\n  test.each(Object.values(VMType) as string[])('%s type should be the expected result', async vmtype => {\n    vi.mocked(process.exec).mockResolvedValue({\n      stdout: JSON.stringify([\n        {\n          VMType: vmtype,\n        },\n      ]),\n    } as unknown as RunResult);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    expect(await manager.getVMType()).toBe(vmtype);\n  });\n});\n\nconst modelMock: ModelInfo & { memory: number } = {\n  name: 'dummy',\n  memory: 10,\n  description: '',\n  id: 'dummy-id',\n  properties: {},\n};\n\ndescribe('checkContainerConnectionStatusAndResources', () => {\n  test('return native on Linux', async () => {\n    const manager = new PodmanConnection(rpcExtensionMock);\n    vi.mocked(env).isLinux = true;\n\n    const result = await manager.checkContainerConnectionStatusAndResources({\n      model: modelMock,\n      context: 'inference',\n    });\n    expect(result).toStrictEqual({\n      status: 'native',\n      canRedirect: expect.any(Boolean),\n    });\n  });\n\n  test('return noMachineInfo if there is no running podman connection', async () => {\n    const manager = new PodmanConnection(rpcExtensionMock);\n    vi.mocked(env).isLinux = false;\n\n    const result = await manager.checkContainerConnectionStatusAndResources({\n      model: modelMock,\n      context: 'inference',\n    });\n    expect(result).toStrictEqual({\n      status: 'no-machine',\n      canRedirect: expect.any(Boolean),\n    });\n  });\n\n  test('return noMachineInfo if we are not able to retrieve any info about the podman connection', async () => {\n    const manager = new PodmanConnection(rpcExtensionMock);\n    vi.mocked(env).isLinux = false;\n\n    vi.mocked(containerEngine.listInfos).mockResolvedValue([]);\n    const result = await manager.checkContainerConnectionStatusAndResources({\n      model: modelMock,\n      context: 'inference',\n    });\n    expect(result).toStrictEqual({\n      status: 'no-machine',\n      canRedirect: expect.any(Boolean),\n    });\n  });\n\n  test('return lowResourceMachineInfo if the podman connection has not enough cpus', async () => {\n    const manager = new PodmanConnection(rpcExtensionMock);\n    vi.mocked(env).isLinux = false;\n\n    vi.mocked(provider.getContainerConnections).mockReturnValue([\n      {\n        connection: {\n          type: 'podman',\n          status: (): ProviderConnectionStatus => 'started',\n          name: 'Podman Machine',\n          endpoint: {\n            socketPath: './socket-path',\n          },\n        },\n        providerId: 'podman',\n      },\n    ]);\n\n    vi.mocked(containerEngine.listInfos).mockResolvedValue([\n      {\n        engineId: 'engineId',\n        engineName: 'enginerName',\n        engineType: 'podman',\n        cpus: 3,\n        memory: 20,\n        memoryUsed: 0,\n      },\n    ]);\n\n    manager.init();\n\n    const result = await manager.checkContainerConnectionStatusAndResources({\n      model: modelMock,\n      context: 'inference',\n    });\n    expect(result).toStrictEqual({\n      status: 'low-resources',\n      canRedirect: expect.any(Boolean),\n      name: 'Podman Machine',\n      canEdit: false,\n      cpus: 3,\n      memoryIdle: 20,\n      cpusExpected: 4,\n      memoryExpected: 11,\n    });\n  });\n\n  test('return runningMachineInfo if the podman connection has enough resources', async () => {\n    const manager = new PodmanConnection(rpcExtensionMock);\n    vi.mocked(env).isLinux = false;\n\n    vi.mocked(provider.getContainerConnections).mockReturnValue([\n      {\n        connection: {\n          type: 'podman',\n          status: (): ProviderConnectionStatus => 'started',\n          name: 'Podman Machine',\n          endpoint: {\n            socketPath: './socket-path',\n          },\n        },\n        providerId: 'podman',\n      },\n    ]);\n\n    vi.mocked(containerEngine.listInfos).mockResolvedValue([\n      {\n        engineId: 'engineId',\n        engineName: 'enginerName',\n        engineType: 'podman',\n        cpus: 12,\n        memory: 20,\n        memoryUsed: 0,\n      },\n    ]);\n\n    manager.init();\n\n    const result = await manager.checkContainerConnectionStatusAndResources({\n      model: modelMock,\n      context: 'inference',\n    });\n    expect(result).toStrictEqual({\n      name: 'Podman Machine',\n      status: 'running',\n      canRedirect: expect.any(Boolean),\n    });\n  });\n});\n\ndescribe('getConnectionByEngineId', () => {\n  test('no provider should raise an error', async () => {\n    vi.mocked(provider.getContainerConnections).mockReturnValue([]);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    manager.init();\n\n    await expect(() => manager.getConnectionByEngineId('fake engine')).rejects.toThrowError('connection not found');\n\n    expect(containerEngine.listInfos).not.toHaveBeenCalled();\n  });\n\n  test('empty listInfos response should raise an error', async () => {\n    vi.mocked(provider.getContainerConnections).mockReturnValue([\n      {\n        connection: {\n          type: 'podman',\n          status: (): ProviderConnectionStatus => 'started',\n          name: 'Podman Machine',\n          endpoint: {\n            socketPath: './socket-path',\n          },\n        },\n        providerId: 'podman',\n      },\n    ]);\n\n    vi.mocked(containerEngine.listInfos).mockResolvedValue([]);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    manager.init();\n\n    await expect(() => manager.getConnectionByEngineId('fake engine')).rejects.toThrowError('connection not found');\n\n    expect(containerEngine.listInfos).toHaveBeenCalled();\n  });\n\n  test('invalid engineId should raise an error', async () => {\n    vi.mocked(provider.getContainerConnections).mockReturnValue([\n      {\n        connection: {\n          type: 'podman',\n          status: (): ProviderConnectionStatus => 'started',\n          name: 'Podman Machine',\n          endpoint: {\n            socketPath: './socket-path',\n          },\n        },\n        providerId: 'podman',\n      },\n    ]);\n\n    vi.mocked(containerEngine.listInfos).mockResolvedValue([\n      {\n        engineId: 'engineId',\n        engineName: 'enginerName',\n        engineType: 'podman',\n        cpus: 12,\n        memory: 20,\n        memoryUsed: 0,\n      },\n    ]);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    manager.init();\n\n    await expect(() => manager.getConnectionByEngineId('fake engine')).rejects.toThrowError('connection not found');\n\n    expect(containerEngine.listInfos).toHaveBeenCalled();\n  });\n\n  test('valid engineId should return matching connection', async () => {\n    const connectionMock: ContainerProviderConnection = {\n      type: 'podman',\n      status: () => 'started',\n      name: 'Podman Machine',\n      endpoint: {\n        socketPath: './socket-path',\n      },\n    };\n    vi.mocked(provider.getContainerConnections).mockReturnValue([\n      {\n        connection: connectionMock,\n        providerId: 'podman',\n      },\n    ]);\n\n    vi.mocked(containerEngine.listInfos).mockResolvedValue([\n      {\n        engineId: 'engineId',\n        engineName: 'enginerName',\n        engineType: 'podman',\n        cpus: 12,\n        memory: 20,\n        memoryUsed: 0,\n      },\n    ]);\n\n    const manager = new PodmanConnection(rpcExtensionMock);\n    manager.init();\n\n    const connection = await manager.getConnectionByEngineId('engineId');\n\n    expect(containerEngine.listInfos).toHaveBeenCalled();\n    expect(connection).toBe(connectionMock);\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/managers/podmanConnection.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type {\n  ContainerProviderConnection,\n  Disposable,\n  Event,\n  RegisterContainerConnectionEvent,\n  UpdateContainerConnectionEvent,\n  RunResult,\n  RunOptions,\n  ProviderContainerConnection,\n} from '@podman-desktop/api';\nimport { containerEngine, env, navigation, EventEmitter, process, provider, extensions } from '@podman-desktop/api';\nimport { getPodmanMachineName, type MachineJSON, MIN_CPUS_VALUE, getPodmanCli } from '../utils/podman';\nimport { VMType } from '@shared/models/IPodman';\nimport { Publisher } from '../utils/Publisher';\nimport type {\n  CheckContainerConnectionResourcesOptions,\n  ContainerConnectionInfo,\n  ContainerProviderConnectionInfo,\n} from '@shared/models/IContainerConnectionInfo';\nimport { MSG_PODMAN_CONNECTION_UPDATE } from '@shared/Messages';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nexport interface PodmanConnectionEvent {\n  status: 'stopped' | 'started' | 'unregister' | 'register';\n}\n\nexport interface PodmanRunOptions extends RunOptions {\n  connection?: ProviderContainerConnection;\n}\n\nexport class PodmanConnection extends Publisher<ContainerProviderConnectionInfo[]> implements Disposable {\n  // Map of providerId with corresponding connections\n  #providers: Map<string, ContainerProviderConnection[]>;\n  #disposables: Disposable[];\n\n  private readonly _onPodmanConnectionEvent = new EventEmitter<PodmanConnectionEvent>();\n  readonly onPodmanConnectionEvent: Event<PodmanConnectionEvent> = this._onPodmanConnectionEvent.event;\n\n  constructor(rpcExtension: RpcExtension) {\n    super(rpcExtension, MSG_PODMAN_CONNECTION_UPDATE, () => this.getContainerProviderConnectionInfo());\n    this.#providers = new Map();\n    this.#disposables = [];\n  }\n\n  /**\n   * Execute the podman cli with the arguments provided\n   *\n   * @example\n   * ```\n   * const result = await podman.execute(connection, ['machine', 'ls', '--format=json']);\n   * ```\n   * @param connection\n   * @param args\n   * @param options\n   */\n  execute(connection: ContainerProviderConnection, args: string[], options?: RunOptions): Promise<RunResult> {\n    const podman = extensions.getExtension('podman-desktop.podman');\n    if (!podman) {\n      console.warn('cannot find podman extension api');\n      return this.executeLegacy(args, options);\n    }\n\n    const podmanApi: {\n      exec(args: string[], options?: PodmanRunOptions): Promise<RunResult>;\n    } = podman.exports;\n\n    return podmanApi.exec(args, {\n      ...options,\n      connection: this.getProviderContainerConnection(connection),\n    });\n  }\n\n  /**\n   * Execute a command inside the podman machine\n   *\n   * @example\n   * ```\n   * const result = await podman.executeSSH(connection, ['ls', '/dev']);\n   * ```\n   * @param connection\n   * @param args\n   * @param options\n   */\n  executeSSH(connection: ContainerProviderConnection, args: string[], options?: RunOptions): Promise<RunResult> {\n    return this.execute(connection, ['machine', 'ssh', this.getNameLegacyCompatibility(connection), ...args], options);\n  }\n\n  /**\n   * Before 1.13, the podman extension was not exposing any api.\n   *\n   * Therefore, to support old version we need to get the podman executable ourself\n   * @deprecated\n   */\n  protected executeLegacy(args: string[], options?: RunOptions): Promise<RunResult> {\n    return process.exec(getPodmanCli(), [...args], options);\n  }\n\n  /**\n   * Before 1.13, the {@link ContainerProviderConnection.name} field was used as friendly user\n   * field also.\n   *\n   * Therefore, we could have `Podman Machine Default` as name, where the real machine was `podman-machine-default`.\n   * @param connection\n   * @deprecated\n   */\n  protected getNameLegacyCompatibility(connection: ContainerProviderConnection): string {\n    return getPodmanMachineName(connection);\n  }\n\n  getContainerProviderConnections(): ContainerProviderConnection[] {\n    return Array.from(this.#providers.values()).flat();\n  }\n\n  /**\n   * This method flatten the\n   */\n  getContainerProviderConnectionInfo(): ContainerProviderConnectionInfo[] {\n    const output: ContainerProviderConnectionInfo[] = [];\n\n    for (const [providerId, connections] of Array.from(this.#providers.entries())) {\n      output.push(\n        ...connections.map(\n          (connection): ContainerProviderConnectionInfo => ({\n            providerId: providerId,\n            name: connection.name,\n            vmType: this.parseVMType(connection.vmType),\n            type: 'podman',\n            status: connection.status(),\n          }),\n        ),\n      );\n    }\n\n    return output;\n  }\n\n  init(): void {\n    // setup listeners\n    this.listen();\n\n    this.refreshProviders();\n  }\n\n  dispose(): void {\n    this.#disposables.forEach(disposable => disposable.dispose());\n  }\n\n  /**\n   * This method allow us to get the ProviderContainerConnection given a ContainerProviderConnection\n   * @param connection\n   * @protected\n   */\n  protected getProviderContainerConnection(connection: ContainerProviderConnection): ProviderContainerConnection {\n    const providers: ProviderContainerConnection[] = provider.getContainerConnections();\n\n    const podmanProvider = providers\n      .filter(({ connection }) => connection.type === 'podman')\n      .find(provider => provider.connection.name === connection.name);\n    if (!podmanProvider) throw new Error(`cannot find podman provider with connection name ${connection.name}`);\n\n    return podmanProvider;\n  }\n\n  protected refreshProviders(): void {\n    // clear all providers\n    this.#providers.clear();\n\n    const providers: ProviderContainerConnection[] = provider.getContainerConnections();\n\n    // register the podman container connection\n    providers\n      .filter(({ connection }) => connection.type === 'podman')\n      .forEach(({ providerId, connection }) => {\n        this.#providers.set(providerId, [connection, ...(this.#providers.get(providerId) ?? [])]);\n      });\n\n    // notify\n    this.notify();\n  }\n\n  private listen(): void {\n    // capture unregister event\n    this.#disposables.push(\n      provider.onDidUnregisterContainerConnection(() => {\n        this.refreshProviders();\n        this._onPodmanConnectionEvent.fire({\n          status: 'unregister',\n        });\n      }),\n    );\n\n    this.#disposables.push(\n      provider.onDidRegisterContainerConnection(({ providerId, connection }: RegisterContainerConnectionEvent) => {\n        if (connection.type !== 'podman') {\n          return;\n        }\n\n        // update connection\n        this.#providers.set(providerId, [connection, ...(this.#providers.get(providerId) ?? [])]);\n        this.notify();\n        this._onPodmanConnectionEvent.fire({\n          status: 'register',\n        });\n      }),\n    );\n\n    this.#disposables.push(\n      provider.onDidUpdateContainerConnection(({ status }: UpdateContainerConnectionEvent) => {\n        switch (status) {\n          case 'started':\n          case 'stopped':\n            this._onPodmanConnectionEvent.fire({\n              status: status,\n            });\n            this.notify();\n            break;\n          default:\n            break;\n        }\n      }),\n    );\n\n    this.#disposables.push(\n      provider.onDidUpdateProvider(() => {\n        this.refreshProviders();\n      }),\n    );\n  }\n\n  protected parseVMType(vmtype: string | undefined): VMType {\n    if (!vmtype) return VMType.UNKNOWN;\n    const type = Object.values(VMType).find(s => s === vmtype);\n    if (type === undefined) {\n      return VMType.UNKNOWN;\n    }\n    return type;\n  }\n\n  /**\n   * Get the VMType of the podman machine\n   * @param name the machine name, from {@link ContainerProviderConnection}\n   * @deprecated should uses the `getContainerProviderConnectionInfo()`\n   */\n  async getVMType(name?: string): Promise<VMType> {\n    const { stdout } = await process.exec(getPodmanCli(), ['machine', 'list', '--format', 'json']);\n\n    const parsed: unknown = JSON.parse(stdout);\n    if (!Array.isArray(parsed)) throw new Error('podman machine list provided a malformed response');\n    if (parsed.length === 0 && name) throw new Error('podman machine list provided an empty array');\n    // On Linux we might not have any machine\n    if (parsed.length === 0) return VMType.UNKNOWN;\n    if (parsed.length > 1 && !name)\n      throw new Error('name need to be provided when more than one podman machine is configured.');\n\n    let output: MachineJSON;\n    if (name) {\n      output = parsed.find(machine => typeof machine === 'object' && 'Name' in machine && machine.Name === name);\n      if (!output) throw new Error(`cannot find matching podman machine with name ${name}`);\n    } else {\n      output = parsed[0];\n    }\n\n    return this.parseVMType(output.VMType);\n  }\n\n  getContainerProviderConnection(connection: ContainerProviderConnectionInfo): ContainerProviderConnection {\n    const output = (this.#providers.get(connection.providerId) ?? []).find(\n      mConnection => connection.name === mConnection.name,\n    );\n    if (!output) throw new Error(`no container provider connection found for connection name ${connection.name}`);\n    return output;\n  }\n\n  findRunningContainerProviderConnection(): ContainerProviderConnection | undefined {\n    for (const connections of Array.from(this.#providers.values())) {\n      const result = connections.find(connection => connection.status() === 'started');\n      if (result) return result;\n    }\n    return undefined;\n  }\n\n  /**\n   * This method return the ContainerProviderConnection corresponding to an engineId\n   * @param engineId\n   */\n  async getConnectionByEngineId(engineId: string): Promise<ContainerProviderConnection> {\n    const connections = Array.from(this.#providers.values()).flat();\n    for (const connection of connections) {\n      const infos = await containerEngine.listInfos({ provider: connection });\n      if (infos.length === 0) continue;\n\n      if (infos[0].engineId === engineId) return connection;\n    }\n    throw new Error('connection not found');\n  }\n\n  async checkContainerConnectionStatusAndResources(\n    options: CheckContainerConnectionResourcesOptions,\n  ): Promise<ContainerConnectionInfo> {\n    // starting from podman desktop 1.10 we have the navigate functions\n    const hasNavigateFunction = !!navigation.navigateToResources;\n\n    // if we do not precise the connection and are on linux we assume native usage\n    if (env.isLinux && !options.connection) {\n      return {\n        status: 'native',\n        canRedirect: hasNavigateFunction,\n      };\n    }\n\n    let connection: ContainerProviderConnection | undefined = undefined;\n    if (options.connection) {\n      connection = this.getContainerProviderConnection(options.connection);\n    } else {\n      connection = this.findRunningContainerProviderConnection();\n    }\n\n    if (!connection) {\n      return {\n        status: 'no-machine',\n        canRedirect: hasNavigateFunction,\n      };\n    }\n\n    const engineInfos = await containerEngine.listInfos({\n      provider: connection,\n    });\n\n    if (engineInfos.length === 0) {\n      return {\n        status: 'no-machine',\n        canRedirect: hasNavigateFunction,\n      };\n    }\n\n    const engineInfo = engineInfos[0];\n    if (!engineInfo) {\n      return {\n        status: 'no-machine',\n        canRedirect: hasNavigateFunction,\n      };\n    }\n\n    const hasCpus = engineInfo.cpus !== undefined && engineInfo.cpus >= MIN_CPUS_VALUE;\n    const multiplier = options.context === 'recipe' ? 1.25 : 1.1;\n\n    const memoryExpected = options.model.memory * multiplier;\n\n    let hasMemory: boolean = true;\n    if (engineInfo.memory !== undefined && engineInfo.memoryUsed !== undefined) {\n      hasMemory = engineInfo.memory - engineInfo.memoryUsed >= memoryExpected;\n    }\n\n    let memoryIdle: number = 0;\n    if (engineInfo.memory !== undefined && engineInfo.memoryUsed !== undefined) {\n      memoryIdle = engineInfo.memory - engineInfo.memoryUsed;\n    }\n\n    if (!hasCpus || !hasMemory) {\n      return {\n        name: connection.name,\n        cpus: engineInfo.cpus ?? 0,\n        memoryIdle: memoryIdle,\n        cpusExpected: MIN_CPUS_VALUE,\n        memoryExpected: memoryExpected,\n        status: 'low-resources',\n        canEdit: !!connection.lifecycle?.edit,\n        canRedirect: hasNavigateFunction,\n      };\n    }\n\n    return {\n      name: connection.name,\n      status: 'running',\n      canRedirect: hasNavigateFunction,\n    };\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/recipes/BuilderManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport type { ContainerConfig } from '../../models/AIConfig';\nimport fs from 'node:fs';\nimport { BuilderManager } from './BuilderManager';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport type { ContainerProviderConnection, ImageInfo } from '@podman-desktop/api';\nimport { containerEngine } from '@podman-desktop/api';\nimport { VMType } from '@shared/models/IPodman';\n\nconst taskRegistry = {\n  getTask: vi.fn(),\n  createTask: vi.fn(),\n  updateTask: vi.fn(),\n  delete: vi.fn(),\n  deleteAll: vi.fn(),\n  getTasks: vi.fn(),\n  getTasksByLabels: vi.fn(),\n  deleteByLabels: vi.fn(),\n} as unknown as TaskRegistry;\n\nvi.mock('@podman-desktop/api', () => ({\n  containerEngine: {\n    buildImage: vi.fn(),\n    listImages: vi.fn(),\n  },\n}));\n\nconst connectionMock: ContainerProviderConnection = {\n  name: 'Podman Machine',\n  vmType: VMType.UNKNOWN,\n} as unknown as ContainerProviderConnection;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(taskRegistry.createTask).mockImplementation((name, state, labels) => ({\n    id: 'random',\n    name: name,\n    state: state,\n    labels: labels ?? {},\n    error: undefined,\n  }));\n});\n\ndescribe('buildImages', () => {\n  const recipe = {\n    id: 'recipe1',\n  } as Recipe;\n  const containers: ContainerConfig[] = [\n    {\n      name: 'container1',\n      contextdir: 'contextdir1',\n      containerfile: 'Containerfile',\n      arch: ['amd64'],\n      modelService: false,\n      gpu_env: [],\n      ports: [8080],\n    },\n  ];\n  const manager = new BuilderManager(taskRegistry);\n\n  test('setTaskState should be called with error if context does not exist', async () => {\n    vi.spyOn(fs, 'existsSync').mockReturnValue(false);\n    vi.mocked(containerEngine.listImages).mockRejectedValue([]);\n    await expect(manager.build(connectionMock, recipe, containers, 'config')).rejects.toThrow(\n      'Context configured does not exist.',\n    );\n  });\n  test('setTaskState should be called with error if buildImage execution fails', async () => {\n    vi.spyOn(fs, 'existsSync').mockReturnValue(true);\n    vi.mocked(containerEngine.buildImage).mockRejectedValue('error');\n    vi.mocked(containerEngine.listImages).mockRejectedValue([]);\n\n    await expect(manager.build(connectionMock, recipe, containers, 'config')).rejects.toThrow(\n      'Something went wrong while building the image: error',\n    );\n    expect(taskRegistry.updateTask).toBeCalledWith({\n      error: 'Something went wrong while building the image: error',\n      name: 'Building container1',\n      id: expect.any(String),\n      state: expect.any(String),\n      labels: {},\n    });\n  });\n  test('setTaskState should be called with error if unable to find the image after built', async () => {\n    vi.spyOn(fs, 'existsSync').mockReturnValue(true);\n    vi.mocked(containerEngine.buildImage).mockResolvedValue({});\n    vi.mocked(containerEngine.listImages).mockResolvedValue([]);\n\n    await expect(manager.build(connectionMock, recipe, containers, 'config')).rejects.toThrow(\n      'no image found for container1:latest',\n    );\n    expect(taskRegistry.updateTask).toBeCalledWith({\n      error: 'no image found for container1:latest',\n      name: 'Building container1',\n      id: expect.any(String),\n      state: expect.any(String),\n      labels: {},\n    });\n  });\n  test('succeed if building image do not fail', async () => {\n    vi.spyOn(fs, 'existsSync').mockReturnValue(true);\n    vi.mocked(containerEngine.buildImage).mockResolvedValue({});\n    vi.mocked(containerEngine.listImages).mockResolvedValue([\n      {\n        RepoTags: ['recipe1-container1:latest'],\n        engineId: 'engine',\n        Id: 'id1',\n      } as unknown as ImageInfo,\n    ]);\n\n    const imageInfoList = await manager.build(connectionMock, recipe, containers, 'config');\n    expect(taskRegistry.updateTask).toBeCalledWith({\n      name: 'Building container1',\n      id: expect.any(String),\n      state: 'success',\n      labels: {},\n    });\n    expect(imageInfoList.length).toBe(1);\n    expect(imageInfoList[0].ports.length).toBe(1);\n    expect(imageInfoList[0].ports[0]).equals('8080');\n\n    expect(containerEngine.buildImage).toHaveBeenCalledWith(\n      'contextdir1',\n      expect.any(Function),\n      expect.objectContaining({\n        provider: connectionMock,\n      }),\n    );\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/managers/recipes/BuilderManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport {\n  type BuildImageOptions,\n  type Disposable,\n  containerEngine,\n  type ContainerProviderConnection,\n} from '@podman-desktop/api';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport type { RecipeImage, Recipe } from '@shared/models/IRecipe';\nimport type { ContainerConfig } from '../../models/AIConfig';\nimport type { Task } from '@shared/models/ITask';\nimport path from 'node:path';\nimport { getParentDirectory } from '../../utils/pathUtils';\nimport fs from 'node:fs';\nimport { getImageTag } from '../../utils/imagesUtils';\nimport {\n  IMAGE_LABEL_APP_PORTS,\n  IMAGE_LABEL_APPLICATION_NAME,\n  IMAGE_LABEL_MODEL_SERVICE,\n  IMAGE_LABEL_RECIPE_ID,\n} from '../../utils/RecipeConstants';\n\nexport class BuilderManager implements Disposable {\n  private controller: Map<string, AbortController> = new Map();\n\n  constructor(private taskRegistry: TaskRegistry) {}\n\n  /**\n   * On dispose, the builder will abort all current build.\n   */\n  dispose(): void {\n    // eslint-disable-next-line sonarjs/array-callback-without-return\n    Array.from(this.controller.values()).every(controller => controller.abort('disposing builder manager'));\n  }\n\n  async build(\n    connection: ContainerProviderConnection,\n    recipe: Recipe,\n    containers: ContainerConfig[],\n    configPath: string,\n    labels: { [key: string]: string } = {},\n  ): Promise<RecipeImage[]> {\n    const containerTasks: { [key: string]: Task } = Object.fromEntries(\n      containers.map(container => [\n        container.name,\n        this.taskRegistry.createTask(`Building ${container.name}`, 'loading', labels),\n      ]),\n    );\n\n    const imageInfoList: RecipeImage[] = [];\n\n    // Promise all the build images\n    const abortController = new AbortController();\n\n    // only one build per recipe is supported\n    if (this.controller.has(recipe.id)) {\n      this.controller.get(recipe.id)?.abort('multiple build not supported.');\n    }\n\n    this.controller.set(recipe.id, abortController);\n\n    try {\n      await Promise.all(\n        containers.map(container => {\n          const task = containerTasks[container.name];\n\n          // We use the parent directory of our configFile as the rootdir, then we append the contextDir provided\n          const context = path.join(getParentDirectory(configPath), container.contextdir);\n          console.log(`Application Manager using context ${context} for container ${container.name}`);\n\n          // Ensure the context provided exist otherwise throw an Error\n          if (!fs.existsSync(context)) {\n            task.error = 'The context provided does not exist.';\n            this.taskRegistry.updateTask(task);\n            throw new Error('Context configured does not exist.');\n          }\n\n          const imageTag = getImageTag(recipe, container);\n          const buildOptions: BuildImageOptions = {\n            provider: connection,\n            containerFile: container.containerfile,\n            tag: imageTag,\n            labels: {\n              ...labels,\n              [IMAGE_LABEL_RECIPE_ID]: recipe.id,\n              [IMAGE_LABEL_MODEL_SERVICE]: container.modelService ? 'true' : 'false',\n              [IMAGE_LABEL_APPLICATION_NAME]: container.name,\n              [IMAGE_LABEL_APP_PORTS]: (container.ports ?? []).join(','),\n            },\n            abortController: abortController,\n          };\n\n          let error = false;\n          return containerEngine\n            .buildImage(\n              context,\n              (event, data) => {\n                // todo: do something with the event\n                if (event === 'error' || (event === 'finish' && data !== '')) {\n                  console.error('Something went wrong while building the image: ', data);\n                  task.error = `Something went wrong while building the image: ${data}`;\n                  this.taskRegistry.updateTask(task);\n                  error = true;\n                }\n              },\n              buildOptions,\n            )\n            .catch((err: unknown) => {\n              task.error = `Something went wrong while building the image: ${String(err)}`;\n              this.taskRegistry.updateTask(task);\n              throw new Error(`Something went wrong while building the image: ${String(err)}`);\n            })\n            .then(() => {\n              if (error) {\n                throw new Error(`Something went wrong while building the image: ${imageTag}`);\n              }\n            });\n        }),\n      );\n    } catch (err: unknown) {\n      abortController.abort();\n      throw err;\n    } finally {\n      // remove abort controller\n      this.controller.delete(recipe.id);\n    }\n\n    // after image are built we return their data\n    const images = await containerEngine.listImages({ provider: connection });\n    await Promise.all(\n      containers.map(async container => {\n        const task = containerTasks[container.name];\n        const imageTag = getImageTag(recipe, container);\n\n        const image = images.find(im => {\n          return im.RepoTags?.some(tag => tag.endsWith(imageTag));\n        });\n\n        if (!image) {\n          task.error = `no image found for ${container.name}:latest`;\n          this.taskRegistry.updateTask(task);\n          throw new Error(`no image found for ${container.name}:latest`);\n        }\n\n        let imageName: string | undefined = undefined;\n        if (image.RepoTags && image.RepoTags.length > 0) {\n          imageName = image.RepoTags[0];\n        }\n\n        imageInfoList.push({\n          id: image.Id,\n          engineId: image.engineId,\n          name: imageName,\n          modelService: container.modelService,\n          ports: container.ports?.map(p => `${p}`) ?? [],\n          appName: container.name,\n          recipeId: recipe.id,\n        });\n\n        task.state = 'success';\n        this.taskRegistry.updateTask(task);\n      }),\n    );\n\n    return imageInfoList;\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/recipes/PodManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, describe, vi, expect, test } from 'vitest';\nimport { PodManager } from './PodManager';\nimport type { ContainerInspectInfo, ContainerJSONEvent, PodCreateOptions, PodInfo } from '@podman-desktop/api';\nimport { EventEmitter, containerEngine } from '@podman-desktop/api';\n\nvi.mock('@podman-desktop/api', () => ({\n  containerEngine: {\n    listPods: vi.fn(),\n    stopPod: vi.fn(),\n    removePod: vi.fn(),\n    startPod: vi.fn(),\n    createPod: vi.fn(),\n    inspectContainer: vi.fn(),\n    onEvent: vi.fn(),\n  },\n  EventEmitter: vi.fn(),\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  // we return the id as health status\n  vi.mocked(containerEngine.inspectContainer).mockImplementation(async (engineId: string, id: string) => {\n    return {\n      State: {\n        Health: {\n          Status: id,\n        },\n      },\n    } as unknown as ContainerInspectInfo;\n  });\n\n  // mocking the EventEmitter mechanism\n  const listeners: ((value: unknown) => void)[] = [];\n\n  vi.mocked(EventEmitter).mockReturnValue({\n    event: vi.fn().mockImplementation(callback => {\n      listeners.push(callback);\n    }),\n    fire: vi.fn().mockImplementation((content: unknown) => {\n      listeners.forEach(listener => listener(content));\n    }),\n  } as unknown as EventEmitter<unknown>);\n});\n\ntest('getAllPods should use container engine list pods method', async () => {\n  await new PodManager().getAllPods();\n\n  expect(containerEngine.listPods).toHaveBeenCalledOnce();\n});\n\ntest('findPodByLabelsValues should only return pods with labels matching values', async () => {\n  vi.mocked(containerEngine.listPods).mockResolvedValue([\n    {\n      Id: 'pod-id-1',\n      Labels: {\n        'dummy-key': 'dummy-invalid',\n        hello: 'eggs',\n      },\n    },\n    {\n      Id: 'pod-id-2',\n      Labels: {\n        hello: 'world',\n        'dummy-key': 'dummy-valid',\n      },\n    },\n    {\n      Id: 'pod-id-2',\n      Labels: {\n        hello: 'world',\n        'dummy-key': 'invalid',\n      },\n    },\n    {\n      Id: 'pod-id-3',\n    },\n  ] as unknown as PodInfo[]);\n\n  const pod = await new PodManager().findPodByLabelsValues({\n    'dummy-key': 'dummy-valid',\n    hello: 'world',\n  });\n  expect(pod).toBeDefined();\n  expect(pod?.Id).toBe('pod-id-2');\n});\n\ntest('getPodsWithLabels should only return pods with proper labels', async () => {\n  vi.mocked(containerEngine.listPods).mockResolvedValue([\n    {\n      Id: 'pod-id-1',\n      Labels: {\n        'dummy-key': 'dummy-value',\n        hello: 'world',\n      },\n    },\n    {\n      Id: 'pod-id-2',\n      Labels: {\n        hello: 'world',\n        'dummy-key': 'dummy-value',\n      },\n    },\n    {\n      Id: 'pod-id-3',\n    },\n  ] as unknown as PodInfo[]);\n  const pods = await new PodManager().getPodsWithLabels(['dummy-key']);\n  expect(pods.length).toBe(2);\n  expect(pods.find(pod => pod.Id === 'pod-id-1')).toBeDefined();\n  expect(pods.find(pod => pod.Id === 'pod-id-2')).toBeDefined();\n  expect(pods.find(pod => pod.Id === 'pod-id-3')).toBeUndefined();\n});\n\ndescribe('getHealth', () => {\n  test('getHealth with no container should be none', async () => {\n    const health = await new PodManager().getHealth({\n      Containers: [],\n    } as unknown as PodInfo);\n    expect(health).toBe('none');\n  });\n\n  test('getHealth with one healthy should be healthy', async () => {\n    const health = await new PodManager().getHealth({\n      Containers: [\n        {\n          Id: 'healthy',\n        },\n      ],\n    } as unknown as PodInfo);\n    expect(health).toBe('healthy');\n  });\n\n  test('getHealth with many healthy and one unhealthy should be unhealthy', async () => {\n    const health = await new PodManager().getHealth({\n      Containers: [\n        {\n          Id: 'healthy',\n        },\n        {\n          Id: 'unhealthy',\n        },\n        {\n          Id: 'healthy',\n        },\n        {\n          Id: 'starting',\n        },\n      ],\n    } as unknown as PodInfo);\n    expect(health).toBe('unhealthy');\n  });\n\n  test('getHealth with many healthy and one starting should be starting', async () => {\n    const health = await new PodManager().getHealth({\n      Containers: [\n        {\n          Id: 'healthy',\n        },\n        {\n          Id: 'healthy',\n        },\n        {\n          Id: 'starting',\n        },\n      ],\n    } as unknown as PodInfo);\n    expect(health).toBe('starting');\n  });\n});\n\ndescribe('getPod', () => {\n  test('getPod should throw an error if none is matching', async () => {\n    vi.mocked(containerEngine.listPods).mockResolvedValue([]);\n    await expect(async () => {\n      await new PodManager().getPod('fakeEngineId', 'fakePodId');\n    }).rejects.toThrowError('pod with engineId fakeEngineId and Id fakePodId cannot be found.');\n  });\n\n  test('getPod should return matching pod', async () => {\n    vi.mocked(containerEngine.listPods).mockResolvedValue([\n      {\n        engineId: 'engine-1',\n        Id: 'pod-id-1',\n        Labels: {\n          'dummy-key': 'dummy-value',\n          hello: 'world',\n        },\n      },\n      {\n        engineId: 'engine-2',\n        Id: 'pod-id-2',\n        Labels: {\n          hello: 'world',\n          'dummy-key': 'dummy-value',\n        },\n      },\n      {\n        engineId: 'engine-3',\n        Id: 'pod-id-3',\n      },\n    ] as unknown as PodInfo[]);\n    const pod = await new PodManager().getPod('engine-3', 'pod-id-3');\n    expect(pod).toBeDefined();\n    expect(pod.engineId).toBe('engine-3');\n    expect(pod.Id).toBe('pod-id-3');\n  });\n});\n\ntest('stopPod should call containerEngine.stopPod', async () => {\n  await new PodManager().stopPod('dummy-engine-id', 'dummy-pod-id');\n  expect(containerEngine.stopPod).toHaveBeenCalledWith('dummy-engine-id', 'dummy-pod-id');\n});\n\ntest('removePod should call containerEngine.removePod', async () => {\n  await new PodManager().removePod('dummy-engine-id', 'dummy-pod-id');\n  expect(containerEngine.removePod).toHaveBeenCalledWith('dummy-engine-id', 'dummy-pod-id');\n});\n\ntest('startPod should call containerEngine.startPod', async () => {\n  await new PodManager().startPod('dummy-engine-id', 'dummy-pod-id');\n  expect(containerEngine.startPod).toHaveBeenCalledWith('dummy-engine-id', 'dummy-pod-id');\n});\n\ntest('createPod should call containerEngine.createPod', async () => {\n  const options: PodCreateOptions = {\n    name: 'dummy-name',\n    portmappings: [],\n  };\n  await new PodManager().createPod(options);\n  expect(containerEngine.createPod).toHaveBeenCalledWith(options);\n});\n\ntest('dispose should dispose onEvent disposable', () => {\n  const disposableMock = vi.fn();\n  vi.mocked(containerEngine.onEvent).mockImplementation(() => {\n    return { dispose: disposableMock };\n  });\n\n  const podManager = new PodManager();\n  podManager.init();\n\n  podManager.dispose();\n\n  expect(containerEngine.onEvent).toHaveBeenCalled();\n  expect(disposableMock).toHaveBeenCalled();\n});\n\nconst getInitializedPodManager = (): {\n  onEventListener: (e: ContainerJSONEvent) => unknown;\n  podManager: PodManager;\n} => {\n  let func: ((e: ContainerJSONEvent) => unknown) | undefined = undefined;\n  vi.mocked(containerEngine.onEvent).mockImplementation(fn => {\n    func = fn;\n    return { dispose: vi.fn() };\n  });\n\n  const podManager = new PodManager();\n  podManager.init();\n\n  if (!func) throw new Error('listener should be defined');\n\n  return { onEventListener: func, podManager };\n};\n\ndescribe('events', () => {\n  test('onStartPodEvent listener should be called on start pod event', async () => {\n    vi.mocked(containerEngine.listPods).mockResolvedValue([\n      {\n        Id: 'pod-id-1',\n        Labels: {\n          'dummy-key': 'dummy-value',\n          hello: 'world',\n        },\n      },\n    ] as unknown as PodInfo[]);\n\n    const { onEventListener, podManager } = getInitializedPodManager();\n\n    const startListenerMock = vi.fn();\n    podManager.onStartPodEvent(startListenerMock);\n\n    onEventListener({ id: 'pod-id-1', Type: 'pod', type: '', status: 'start' });\n\n    await vi.waitFor(() => {\n      expect(startListenerMock).toHaveBeenCalledWith({\n        Id: 'pod-id-1',\n        Labels: {\n          'dummy-key': 'dummy-value',\n          hello: 'world',\n        },\n      });\n    });\n  });\n\n  test('onStopPodEvent listener should be called on start pod event', async () => {\n    vi.mocked(containerEngine.listPods).mockResolvedValue([\n      {\n        Id: 'pod-id-1',\n        Labels: {\n          'dummy-key': 'dummy-value',\n          hello: 'world',\n        },\n      },\n    ] as unknown as PodInfo[]);\n\n    const { onEventListener, podManager } = getInitializedPodManager();\n\n    const stopListenerMock = vi.fn();\n    podManager.onStopPodEvent(stopListenerMock);\n\n    onEventListener({ id: 'pod-id-1', Type: 'pod', type: '', status: 'stop' });\n\n    await vi.waitFor(() => {\n      expect(stopListenerMock).toHaveBeenCalledWith({\n        Id: 'pod-id-1',\n        Labels: {\n          'dummy-key': 'dummy-value',\n          hello: 'world',\n        },\n      });\n    });\n  });\n\n  test('onRemovePodEvent listener should be called on start pod event', async () => {\n    const { onEventListener, podManager } = getInitializedPodManager();\n\n    const removeListenerMock = vi.fn();\n    podManager.onRemovePodEvent(removeListenerMock);\n\n    onEventListener({ id: 'pod-id-1', Type: 'pod', type: '', status: 'remove' });\n\n    await vi.waitFor(() => {\n      expect(removeListenerMock).toHaveBeenCalledWith({\n        podId: 'pod-id-1',\n      });\n    });\n    expect(containerEngine.listPods).not.toHaveBeenCalled();\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/managers/recipes/PodManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { Disposable, PodCreateOptions, PodInfo, Event } from '@podman-desktop/api';\nimport { containerEngine, EventEmitter } from '@podman-desktop/api';\nimport type { PodHealth } from '@shared/models/IApplicationState';\nimport { getPodHealth } from '../../utils/podsUtils';\n\nexport interface PodEvent {\n  podId: string;\n}\n\nexport class PodManager implements Disposable {\n  #eventDisposable: Disposable | undefined;\n\n  // start pod events\n  private readonly _onStartPodEvent = new EventEmitter<PodInfo>();\n  readonly onStartPodEvent: Event<PodInfo> = this._onStartPodEvent.event;\n\n  // stop pod events\n  private readonly _onStopPodEvent = new EventEmitter<PodInfo>();\n  readonly onStopPodEvent: Event<PodInfo> = this._onStopPodEvent.event;\n\n  // remove pod events\n  private readonly _onRemovePodEvent = new EventEmitter<PodEvent>();\n  readonly onRemovePodEvent: Event<PodEvent> = this._onRemovePodEvent.event;\n\n  dispose(): void {\n    this.#eventDisposable?.dispose();\n  }\n\n  init(): void {\n    this.#eventDisposable = containerEngine.onEvent(async event => {\n      // filter on pod event type\n      if (event.Type !== 'pod') {\n        return;\n      }\n\n      if (event.status === 'remove') {\n        return this._onRemovePodEvent.fire({\n          podId: event.id,\n        });\n      }\n\n      const pod: PodInfo = await this.getPodById(event.id);\n      switch (event.status) {\n        case 'start':\n          this._onStartPodEvent.fire(pod);\n          break;\n        case 'stop':\n          this._onStopPodEvent.fire(pod);\n          break;\n      }\n    });\n  }\n\n  /**\n   * Utility method to get all the pods\n   */\n  getAllPods(): Promise<PodInfo[]> {\n    return containerEngine.listPods();\n  }\n\n  /**\n   * return the first pod matching the provided labels and their associated value\n   * @param requestedLabels the labels the pod must be matching\n   */\n  async findPodByLabelsValues(requestedLabels: Record<string, string>): Promise<PodInfo | undefined> {\n    const pods = await this.getAllPods();\n\n    return pods.find(pod => {\n      const labels = pod.Labels;\n      // eslint-disable-next-line sonarjs/different-types-comparison\n      if (labels === undefined) return false;\n\n      for (const [key, value] of Object.entries(requestedLabels)) {\n        if (!(key in labels) || labels[key] !== value) return false;\n      }\n\n      return true;\n    });\n  }\n\n  /**\n   * return pods containing all the labels provided\n   * This method does not check for the values, only existence\n   * @param labels\n   */\n  async getPodsWithLabels(labels: string[]): Promise<PodInfo[]> {\n    const pods = await this.getAllPods();\n\n    return pods.filter(pod => labels.every(label => !!pod.Labels && label in pod.Labels));\n  }\n\n  /**\n   * Given a pod Info, will fetch the health status of each containing composing it, and\n   * will return a PodHealth\n   * @param pod the pod to inspect\n   */\n  async getHealth(pod: PodInfo): Promise<PodHealth> {\n    const containerStates: (string | undefined)[] = await Promise.all(\n      pod.Containers.map(container =>\n        containerEngine.inspectContainer(pod.engineId, container.Id).then(data => data.State.Health?.Status),\n      ),\n    );\n\n    return getPodHealth(containerStates);\n  }\n\n  /**\n   * This handy method is private as we do not want expose method not providing\n   * the engineId, but this is required because PodEvent do not provide the engineId\n   * @param id\n   * @private\n   */\n  private async getPodById(id: string): Promise<PodInfo> {\n    const pods = await this.getAllPods();\n    const result = pods.find(pod => pod.Id === id);\n    if (!result) throw new Error(`pod with Id ${id} cannot be found.`);\n    return result;\n  }\n\n  async getPod(engineId: string, Id: string): Promise<PodInfo> {\n    const pods = await this.getAllPods();\n    const result = pods.find(pod => pod.engineId === engineId && pod.Id === Id);\n    if (!result) throw new Error(`pod with engineId ${engineId} and Id ${Id} cannot be found.`);\n    return result;\n  }\n\n  async stopPod(engineId: string, id: string): Promise<void> {\n    return containerEngine.stopPod(engineId, id);\n  }\n\n  async removePod(engineId: string, id: string): Promise<void> {\n    return containerEngine.removePod(engineId, id);\n  }\n\n  async startPod(engineId: string, id: string): Promise<void> {\n    return containerEngine.startPod(engineId, id);\n  }\n\n  async createPod(podOptions: PodCreateOptions): Promise<{ engineId: string; Id: string }> {\n    return containerEngine.createPod(podOptions);\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/recipes/RecipeManager.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport type { BuilderManager } from './BuilderManager';\nimport type { GitManager } from '../gitManager';\nimport type { LocalRepositoryRegistry } from '../../registries/LocalRepositoryRegistry';\nimport { RecipeManager } from './RecipeManager';\nimport { containerEngine, type ContainerProviderConnection } from '@podman-desktop/api';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport type { Stats } from 'node:fs';\nimport { existsSync, statSync } from 'node:fs';\nimport { AIConfigFormat, parseYamlFile } from '../../models/AIConfig';\nimport { goarch } from '../../utils/arch';\nimport { VMType } from '@shared/models/IPodman';\nimport type { InferenceManager } from '../inference/inferenceManager';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { ApplicationOptions } from '../../models/ApplicationOptions';\n\nconst taskRegistryMock = {\n  createTask: vi.fn(),\n  updateTask: vi.fn(),\n} as unknown as TaskRegistry;\n\nconst builderManagerMock = {\n  build: vi.fn(),\n} as unknown as BuilderManager;\n\nconst gitManagerMock = {\n  processCheckout: vi.fn(),\n} as unknown as GitManager;\n\nconst localRepositoriesMock = {\n  register: vi.fn(),\n} as unknown as LocalRepositoryRegistry;\n\nconst inferenceManagerMock = {} as unknown as InferenceManager;\n\nconst recipeMock: Recipe = {\n  id: 'recipe-test',\n  name: 'Test Recipe',\n  categories: [],\n  description: 'test recipe description',\n  repository: 'http://test-repository.test',\n  readme: 'test recipe readme',\n};\n\nconst connectionMock: ContainerProviderConnection = {\n  name: 'Podman Machine',\n  vmType: VMType.UNKNOWN,\n} as unknown as ContainerProviderConnection;\n\nconst modelInfoMock: ModelInfo = {\n  id: 'modelId',\n  name: 'Model',\n  description: 'model to test',\n} as unknown as ModelInfo;\n\nvi.mock('../../models/AIConfig', () => ({\n  AIConfigFormat: {\n    CURRENT: 'current',\n  },\n  parseYamlFile: vi.fn(),\n}));\n\nvi.mock('node:fs', () => ({\n  existsSync: vi.fn(),\n  statSync: vi.fn(),\n}));\n\nvi.mock('@podman-desktop/api', () => ({\n  containerEngine: {\n    listImages: vi.fn(),\n  },\n}));\n\nvi.mock('../../utils/arch', () => ({\n  goarch: vi.fn(),\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(containerEngine.listImages).mockResolvedValue([]);\n  vi.mocked(taskRegistryMock.createTask).mockImplementation((name, state, labels) => ({\n    name,\n    state,\n    labels,\n    id: 'fake-task',\n  }));\n\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.mocked(statSync).mockReturnValue({\n    isDirectory: () => true,\n  } as unknown as Stats);\n\n  vi.mocked(parseYamlFile).mockReturnValue({\n    version: AIConfigFormat.CURRENT,\n    application: {\n      containers: [\n        {\n          arch: ['dummy-arch'],\n          modelService: false,\n          name: 'test-container',\n          gpu_env: [],\n          contextdir: '.',\n        },\n      ],\n    },\n  });\n\n  vi.mocked(goarch).mockReturnValue('dummy-arch');\n});\n\nasync function getInitializedRecipeManager(): Promise<RecipeManager> {\n  const manager = new RecipeManager(\n    'test-app-user-directory',\n    gitManagerMock,\n    taskRegistryMock,\n    builderManagerMock,\n    localRepositoriesMock,\n    inferenceManagerMock,\n  );\n  manager.init();\n  return manager;\n}\n\ndescribe('cloneRecipe', () => {\n  test('error in checkout should set the task to error and propagate it', async () => {\n    vi.mocked(gitManagerMock.processCheckout).mockRejectedValue(new Error('clone error'));\n\n    const manager = await getInitializedRecipeManager();\n\n    await expect(() => {\n      return manager.cloneRecipe(recipeMock);\n    }).rejects.toThrowError('clone error');\n\n    expect(taskRegistryMock.updateTask).toHaveBeenCalledWith(\n      expect.objectContaining({\n        state: 'error',\n      }),\n    );\n  });\n\n  test('labels should be propagated', async () => {\n    const manager = await getInitializedRecipeManager();\n    await manager.cloneRecipe(recipeMock, {\n      'test-label': 'test-value',\n    });\n\n    expect(gitManagerMock.processCheckout).toHaveBeenCalledWith({\n      repository: recipeMock.repository,\n      ref: recipeMock.ref,\n      targetDirectory: expect.any(String),\n    });\n\n    expect(taskRegistryMock.createTask).toHaveBeenCalledWith('Checking out repository', 'loading', {\n      'test-label': 'test-value',\n      'recipe-id': recipeMock.id,\n      git: 'checkout',\n    });\n\n    expect(localRepositoriesMock.register).toHaveBeenCalledWith({\n      path: expect.any(String),\n      sourcePath: expect.any(String),\n      labels: {\n        'recipe-id': recipeMock.id,\n      },\n    });\n  });\n});\n\ndescribe.each([true, false])('buildRecipe, with model is %o', withModel => {\n  let applicationOptions: ApplicationOptions;\n  beforeEach(() => {\n    applicationOptions = withModel\n      ? {\n          connection: connectionMock,\n          recipe: recipeMock,\n          model: modelInfoMock,\n        }\n      : {\n          connection: connectionMock,\n          recipe: recipeMock,\n        };\n  });\n  test('error in build propagate it', async () => {\n    vi.mocked(builderManagerMock.build).mockRejectedValue(new Error('build error'));\n\n    const manager = await getInitializedRecipeManager();\n\n    await expect(() => {\n      return manager.buildRecipe(applicationOptions);\n    }).rejects.toThrowError('build error');\n  });\n\n  test('labels should be propagated', async () => {\n    const manager = await getInitializedRecipeManager();\n\n    await manager.buildRecipe(applicationOptions, {\n      'test-label': 'test-value',\n    });\n\n    expect(taskRegistryMock.createTask).toHaveBeenCalledWith('Loading configuration', 'loading', {\n      'test-label': 'test-value',\n      'recipe-id': recipeMock.id,\n    });\n\n    expect(builderManagerMock.build).toHaveBeenCalledWith(\n      connectionMock,\n      recipeMock,\n      [\n        {\n          arch: ['dummy-arch'],\n          modelService: false,\n          name: 'test-container',\n          gpu_env: [],\n          contextdir: '.',\n        },\n      ],\n      expect.any(String),\n      {\n        'test-label': 'test-value',\n        'recipe-id': recipeMock.id,\n      },\n    );\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/managers/recipes/RecipeManager.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { GitCloneInfo, GitManager } from '../gitManager';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport type { Recipe, RecipeComponents } from '@shared/models/IRecipe';\nimport path from 'node:path';\nimport type { Task } from '@shared/models/ITask';\nimport type { LocalRepositoryRegistry } from '../../registries/LocalRepositoryRegistry';\nimport type { AIConfig, AIConfigFile, ContainerConfig } from '../../models/AIConfig';\nimport { parseYamlFile } from '../../models/AIConfig';\nimport { existsSync, statSync } from 'node:fs';\nimport { goarch } from '../../utils/arch';\nimport type { BuilderManager } from './BuilderManager';\nimport type { Disposable } from '@podman-desktop/api';\nimport { CONFIG_FILENAME } from '../../utils/RecipeConstants';\nimport type { InferenceManager } from '../inference/inferenceManager';\nimport { withDefaultConfiguration } from '../../utils/inferenceUtils';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport { type ApplicationOptions, isApplicationOptionsWithModelInference } from '../../models/ApplicationOptions';\n\nexport interface AIContainers {\n  aiConfigFile: AIConfigFile;\n  containers: ContainerConfig[];\n}\n\nexport class RecipeManager implements Disposable {\n  constructor(\n    private appUserDirectory: string,\n    private git: GitManager,\n    private taskRegistry: TaskRegistry,\n    private builderManager: BuilderManager,\n    private localRepositories: LocalRepositoryRegistry,\n    private inferenceManager: InferenceManager,\n  ) {}\n\n  dispose(): void {}\n\n  init(): void {}\n\n  private async doCheckout(gitCloneInfo: GitCloneInfo, labels?: { [id: string]: string }): Promise<void> {\n    // Creating checkout task\n    const checkoutTask: Task = this.taskRegistry.createTask('Checking out repository', 'loading', {\n      ...labels,\n      git: 'checkout',\n    });\n\n    try {\n      await this.git.processCheckout(gitCloneInfo);\n      checkoutTask.state = 'success';\n    } catch (err: unknown) {\n      checkoutTask.state = 'error';\n      checkoutTask.error = String(err);\n      // propagate error\n      throw err;\n    } finally {\n      // Update task registry\n      this.taskRegistry.updateTask(checkoutTask);\n    }\n  }\n\n  public async cloneRecipe(recipe: Recipe, labels?: { [key: string]: string }): Promise<void> {\n    const localFolder = path.join(this.appUserDirectory, recipe.id);\n\n    // clone the recipe repository on the local folder\n    const gitCloneInfo: GitCloneInfo = {\n      repository: recipe.repository,\n      ref: recipe.ref,\n      targetDirectory: localFolder,\n    };\n    await this.doCheckout(gitCloneInfo, {\n      ...labels,\n      'recipe-id': recipe.id,\n    });\n\n    this.localRepositories.register({\n      path: gitCloneInfo.targetDirectory,\n      sourcePath: path.join(gitCloneInfo.targetDirectory, recipe.basedir ?? ''),\n      labels: {\n        'recipe-id': recipe.id,\n      },\n    });\n  }\n\n  public async buildRecipe(options: ApplicationOptions, labels?: { [key: string]: string }): Promise<RecipeComponents> {\n    const localFolder = path.join(this.appUserDirectory, options.recipe.id);\n\n    let inferenceServer: InferenceServer | undefined;\n    if (isApplicationOptionsWithModelInference(options)) {\n      // if the recipe has a defined backend, we gives priority to using an inference server\n      if (options.recipe.backend && options.recipe.backend === options.model.backend) {\n        let task: Task | undefined;\n        try {\n          inferenceServer = this.inferenceManager.findServerByModel(options.model);\n          task = this.taskRegistry.createTask('Starting Inference server', 'loading', labels);\n          if (!inferenceServer) {\n            const inferenceContainerId = await this.inferenceManager.createInferenceServer(\n              await withDefaultConfiguration({\n                modelsInfo: [options.model],\n              }),\n            );\n            inferenceServer = this.inferenceManager.get(inferenceContainerId);\n            this.taskRegistry.updateTask({\n              ...task,\n              labels: {\n                ...task.labels,\n                containerId: inferenceContainerId,\n              },\n            });\n          } else if (inferenceServer.status === 'stopped') {\n            await this.inferenceManager.startInferenceServer(inferenceServer.container.containerId);\n          }\n          task.state = 'success';\n        } catch (e) {\n          // we only skip the task update if the error is that we do not support this backend.\n          // If so, we build the image for the model service\n          if (task && String(e) !== 'no enabled provider could be found.') {\n            task.state = 'error';\n            task.error = `Something went wrong while starting the inference server: ${String(e)}`;\n            throw e;\n          }\n        } finally {\n          if (task) {\n            this.taskRegistry.updateTask(task);\n          }\n        }\n      }\n    }\n\n    // load and parse the recipe configuration file and filter containers based on architecture\n    const configAndFilteredContainers = this.getConfigAndFilterContainers(\n      options.recipe.basedir,\n      localFolder,\n      !!inferenceServer,\n      {\n        ...labels,\n        'recipe-id': options.recipe.id,\n      },\n    );\n\n    const images = await this.builderManager.build(\n      options.connection,\n      options.recipe,\n      configAndFilteredContainers.containers,\n      configAndFilteredContainers.aiConfigFile.path,\n      {\n        ...labels,\n        'recipe-id': options.recipe.id,\n      },\n    );\n\n    return {\n      images,\n      inferenceServer,\n    };\n  }\n\n  private getConfigAndFilterContainers(\n    recipeBaseDir: string | undefined,\n    localFolder: string,\n    useInferenceServer: boolean,\n    labels?: { [key: string]: string },\n  ): AIContainers {\n    // Adding loading configuration task\n    const task = this.taskRegistry.createTask('Loading configuration', 'loading', labels);\n\n    let aiConfigFile: AIConfigFile;\n    try {\n      // load and parse the recipe configuration file\n      aiConfigFile = this.getConfiguration(recipeBaseDir, localFolder);\n    } catch (e) {\n      task.error = `Something went wrong while loading configuration: ${String(e)}.`;\n      this.taskRegistry.updateTask(task);\n      throw e;\n    }\n\n    // filter the containers based on architecture, gpu accelerator and backend (that define which model supports)\n    let filteredContainers: ContainerConfig[] = this.filterContainers(aiConfigFile.aiConfig);\n    // if we are using the inference server we can remove the model service\n    if (useInferenceServer) {\n      filteredContainers = filteredContainers.filter(c => !c.modelService);\n    }\n    if (filteredContainers.length > 0) {\n      // Mark as success.\n      task.state = 'success';\n      this.taskRegistry.updateTask(task);\n    } else {\n      // Mark as failure.\n      task.error = 'No containers available.';\n      this.taskRegistry.updateTask(task);\n      throw new Error('No containers available.');\n    }\n\n    return {\n      aiConfigFile: aiConfigFile,\n      containers: filteredContainers,\n    };\n  }\n\n  private filterContainers(aiConfig: AIConfig): ContainerConfig[] {\n    return aiConfig.application.containers.filter(\n      container => container.gpu_env.length === 0 && container.arch.some(arc => arc === goarch()),\n    );\n  }\n\n  private getConfiguration(recipeBaseDir: string | undefined, localFolder: string): AIConfigFile {\n    let configFile: string;\n    if (recipeBaseDir !== undefined) {\n      configFile = path.join(localFolder, recipeBaseDir, CONFIG_FILENAME);\n    } else {\n      configFile = path.join(localFolder, CONFIG_FILENAME);\n    }\n\n    if (!existsSync(configFile)) {\n      throw new Error(`The file located at ${configFile} does not exist.`);\n    }\n\n    // If the user configured the config as a directory we check for \"ai-lab.yaml\" inside.\n    if (statSync(configFile).isDirectory()) {\n      const tmpPath = path.join(configFile, CONFIG_FILENAME);\n      // If it has the ai-lab.yaml we use it.\n      if (existsSync(tmpPath)) {\n        configFile = tmpPath;\n      }\n    }\n\n    // Parsing the configuration\n    console.log(`Reading configuration from ${configFile}.`);\n    let aiConfig: AIConfig;\n    try {\n      aiConfig = parseYamlFile(configFile, goarch());\n    } catch (err) {\n      console.error('Cannot load configure file.', err);\n      throw new Error(`Cannot load configuration file.`);\n    }\n\n    // Mark as success.\n    return {\n      aiConfig,\n      path: configFile,\n    };\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/managers/snippets/java-okhttp-snippet.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test } from 'vitest';\nimport { javaOkHttpGenerator } from './java-okhttp-snippet';\n\ntest('expect return generated snippet', async () => {\n  const payload = await javaOkHttpGenerator({ url: 'http://localhost:32412/v1/chat/completions' });\n  expect(payload).toBeDefined();\n  expect(payload).toContain('.url(\"http://localhost:32412/v1/chat/completions\")');\n});\n"
  },
  {
    "path": "packages/backend/src/managers/snippets/java-okhttp-snippet.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { RequestOptions } from '@shared/models/RequestOptions';\nimport mustache from 'mustache';\nimport javaOkHttpTemplate from '../../templates/java-okhttp.mustache?raw';\n\nexport async function javaOkHttpGenerator(requestOptions: RequestOptions): Promise<string> {\n  if (!requestOptions.url.endsWith('/v1/chat/completions')) throw new Error('Incompatible generator');\n  return mustache.render(javaOkHttpTemplate, {\n    endpoint: requestOptions.url,\n  });\n}\n"
  },
  {
    "path": "packages/backend/src/managers/snippets/python-langchain-snippet.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test } from 'vitest';\nimport { pythonLangChainGenerator } from './python-langchain-snippet';\n\ntest('expect return generated snippet', async () => {\n  const payload = await pythonLangChainGenerator({ url: 'http://localhost:32412/v1/chat/completions' });\n  expect(payload).toBeDefined();\n  expect(payload).toContain('model_service = \"http://localhost:32412/v1/\"');\n});\n"
  },
  {
    "path": "packages/backend/src/managers/snippets/python-langchain-snippet.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { RequestOptions } from '@shared/models/RequestOptions';\nimport mustache from 'mustache';\nimport pythonLangChainTemplate from '../../templates/python-langchain.mustache?raw';\n\nexport async function pythonLangChainGenerator(requestOptions: RequestOptions): Promise<string> {\n  if (!requestOptions.url.endsWith('/v1/chat/completions')) throw new Error('Incompatible generator');\n  return mustache.render(pythonLangChainTemplate, {\n    endpoint: requestOptions.url.replace('chat/completions', ''),\n  });\n}\n"
  },
  {
    "path": "packages/backend/src/managers/snippets/quarkus-snippet.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { quarkusLangchain4Jgenerator } from './quarkus-snippet';\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ntest('expect fetched version in generated payload', async () => {\n  const oldFetch = global.fetch;\n  try {\n    global.fetch = vi.fn().mockResolvedValue({\n      text: () =>\n        Promise.resolve(\n          '<metadata><groupId>io.quarkiverse.langchain4j</groupId><artifactId>quarkus-langchain4j-core</artifactId><versioning><latest>latest-version</latest><release>release-version</release></versioning></metadata>',\n        ),\n    });\n    const payload = await quarkusLangchain4Jgenerator({ url: 'http://localhost:32412/v1/chat/completions' });\n    expect(payload).toBeDefined();\n    expect(payload).toContain('<version>release-version</version>');\n  } finally {\n    global.fetch = oldFetch;\n  }\n});\n"
  },
  {
    "path": "packages/backend/src/managers/snippets/quarkus-snippet.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { RequestOptions } from '@shared/models/RequestOptions';\nimport mustache from 'mustache';\nimport template from '../../templates/quarkus-langchain4j.mustache?raw';\nimport xmljs from 'xml-js';\n\nconst SUFFIX_LENGTH = '/chat/completions'.length;\n\nconst METADATA_URL =\n  'https://repo1.maven.org/maven2/io/quarkiverse/langchain4j/quarkus-langchain4j-core/maven-metadata.xml';\n\nlet quarkusLangchain4jVersion: string;\n\nasync function getQuarkusLangchain4jVersion(): Promise<string> {\n  if (quarkusLangchain4jVersion) {\n    return quarkusLangchain4jVersion;\n  }\n  const response = await fetch(METADATA_URL, { redirect: 'follow' });\n  const content = JSON.parse(xmljs.xml2json(await response.text(), { compact: true }));\n  // eslint-disable-next-line sonarjs/no-nested-assignment\n  return (quarkusLangchain4jVersion = content.metadata.versioning.release._text);\n}\nexport async function quarkusLangchain4Jgenerator(requestOptions: RequestOptions): Promise<string> {\n  if (!requestOptions.url.endsWith('/v1/chat/completions')) throw new Error('Incompatible generator');\n  return mustache.render(template, {\n    baseUrl: requestOptions.url.substring(0, requestOptions.url.length - SUFFIX_LENGTH),\n    version: await getQuarkusLangchain4jVersion(),\n  });\n}\n"
  },
  {
    "path": "packages/backend/src/models/AIConfig.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test, describe, vi } from 'vitest';\nimport fs from 'node:fs';\nimport { type AIConfig, AIConfigFormat, parseYamlFile } from './AIConfig';\n\n// Define mock file paths and contents\nconst mockYamlPath = '/path/to/mock.yml';\nconst defaultArch = 'x64';\n\nconst readFileSync = vi.spyOn(fs, 'readFileSync');\n\ndescribe('parseYaml', () => {\n  test('malformed configuration', () => {\n    readFileSync.mockReturnValue(``);\n    expect(() => {\n      parseYamlFile(mockYamlPath, defaultArch);\n    }).toThrowError('malformed configuration file.');\n  });\n\n  test('missing application property', () => {\n    readFileSync.mockReturnValue(`\nwrong:\n`);\n    expect(() => {\n      parseYamlFile(mockYamlPath, defaultArch);\n    }).toThrowError('malformed configuration file: missing version');\n  });\n\n  test('version mismatch', () => {\n    readFileSync.mockReturnValue(`\nversion: unknown\napplication: true\n`);\n    expect(() => {\n      parseYamlFile(mockYamlPath, defaultArch);\n    }).toThrowError('malformed configuration file: version not supported, got unknown expected v1.0.');\n  });\n\n  test('application primitive', () => {\n    readFileSync.mockReturnValue(`\nversion: ${AIConfigFormat.CURRENT}\napplication: true\n`);\n    expect(() => {\n      parseYamlFile(mockYamlPath, defaultArch);\n    }).toThrowError('AIConfig has bad formatting: application does not have valid container property');\n  });\n\n  test('containers not an array', () => {\n    readFileSync.mockReturnValue(`\nversion: ${AIConfigFormat.CURRENT}\napplication:\n  containers:\n    name: container1\n    contextdir: /path/to/dir1\n    arch: [\"x86\"]\n    model-service: true\n    gpu-env: [\"env1\", \"env2\"]\n    ports: [ 8080 ]\n`);\n    expect(() => {\n      parseYamlFile(mockYamlPath, defaultArch);\n    }).toThrowError('AIConfig has bad formatting: containers property must be an array.');\n  });\n\n  test('containers object', () => {\n    readFileSync.mockReturnValue(`\nversion: ${AIConfigFormat.CURRENT}\napplication:\n  containers: true\n`);\n    expect(() => {\n      parseYamlFile(mockYamlPath, defaultArch);\n    }).toThrowError('AIConfig has bad formatting: containers property must be an array.');\n  });\n\n  test('should use architecture as string', () => {\n    readFileSync.mockReturnValue(`\nversion: ${AIConfigFormat.CURRENT}\napplication:\n  containers:\n    - name: container1\n      contextdir: /path/to/dir1\n      arch: x86\n      ports: [ 8080 ]\n`);\n\n    const expectedConfig: AIConfig = {\n      version: AIConfigFormat.CURRENT,\n      application: {\n        containers: [\n          {\n            name: 'container1',\n            contextdir: '/path/to/dir1',\n            arch: ['x86'],\n            gpu_env: [],\n            modelService: false,\n            ports: [8080],\n          },\n        ],\n      },\n    };\n\n    expect(parseYamlFile(mockYamlPath, defaultArch)).toEqual(expectedConfig);\n  });\n\n  test('should use all architectures', () => {\n    readFileSync.mockReturnValue(`\nversion: ${AIConfigFormat.CURRENT}\napplication:\n  containers:\n    - name: container1\n      contextdir: /path/to/dir1\n      arch: ['arch1', 'arch2']\n      ports: [ 8080 ]\n`);\n\n    const expectedConfig: AIConfig = {\n      version: AIConfigFormat.CURRENT,\n      application: {\n        containers: [\n          {\n            name: 'container1',\n            contextdir: '/path/to/dir1',\n            arch: ['arch1', 'arch2'],\n            gpu_env: [],\n            modelService: false,\n            ports: [8080],\n          },\n        ],\n      },\n    };\n\n    expect(parseYamlFile(mockYamlPath, defaultArch)).toEqual(expectedConfig);\n  });\n\n  test('should put the default architecture', () => {\n    readFileSync.mockReturnValue(`\nversion: ${AIConfigFormat.CURRENT}\napplication:\n  containers:\n    - name: container1\n      contextdir: /path/to/dir1\n      ports: [ 8080 ]\n`);\n\n    const expectedConfig: AIConfig = {\n      version: AIConfigFormat.CURRENT,\n      application: {\n        containers: [\n          {\n            name: 'container1',\n            contextdir: '/path/to/dir1',\n            arch: [defaultArch],\n            gpu_env: [],\n            modelService: false,\n            ports: [8080],\n          },\n        ],\n      },\n    };\n\n    expect(parseYamlFile(mockYamlPath, defaultArch)).toEqual(expectedConfig);\n  });\n\n  test('should use the image provided in the config', () => {\n    readFileSync.mockReturnValue(`\nversion: ${AIConfigFormat.CURRENT}\napplication:\n  containers:\n    - name: container1\n      contextdir: /path/to/dir1\n      ports: [ 8080 ]\n      image: dummy-image\n`);\n\n    const expectedConfig: AIConfig = {\n      version: AIConfigFormat.CURRENT,\n      application: {\n        containers: [\n          {\n            name: 'container1',\n            contextdir: '/path/to/dir1',\n            arch: [defaultArch],\n            gpu_env: [],\n            modelService: false,\n            ports: [8080],\n            image: 'dummy-image',\n          },\n        ],\n      },\n    };\n\n    expect(parseYamlFile(mockYamlPath, defaultArch)).toEqual(expectedConfig);\n  });\n\n  test('ports should always be a final number', () => {\n    readFileSync.mockReturnValue(`\nversion: ${AIConfigFormat.CURRENT}\napplication:\n  containers:\n    - name: container1\n      contextdir: /path/to/dir1\n      ports: [ '8080', 8888 ]\n      image: dummy-image\n`);\n\n    const expectedConfig: AIConfig = {\n      version: AIConfigFormat.CURRENT,\n      application: {\n        containers: [\n          {\n            name: 'container1',\n            contextdir: '/path/to/dir1',\n            arch: [defaultArch],\n            gpu_env: [],\n            modelService: false,\n            ports: [8080, 8888],\n            image: 'dummy-image',\n          },\n        ],\n      },\n    };\n\n    expect(parseYamlFile(mockYamlPath, defaultArch)).toEqual(expectedConfig);\n  });\n\n  test('should use gpu env', () => {\n    readFileSync.mockReturnValue(`\nversion: ${AIConfigFormat.CURRENT}\napplication:\n  containers:\n    - name: container1\n      contextdir: /path/to/dir1\n      arch: [\"x86\"]\n      model-service: true\n      gpu-env: [\"env1\", \"env2\"]\n      ports: [ 8080 ]\n    - name: container2\n      arch: [\"arm\"]\n      ports: [ 8001 ]\n`);\n\n    const expectedConfig: AIConfig = {\n      version: AIConfigFormat.CURRENT,\n      application: {\n        containers: [\n          {\n            name: 'container1',\n            contextdir: '/path/to/dir1',\n            arch: ['x86'],\n            modelService: true,\n            gpu_env: ['env1', 'env2'],\n            ports: [8080],\n          },\n          {\n            name: 'container2',\n            contextdir: '.',\n            arch: ['arm'],\n            modelService: false,\n            gpu_env: [],\n            ports: [8001],\n          },\n        ],\n      },\n    };\n\n    expect(parseYamlFile(mockYamlPath, defaultArch)).toEqual(expectedConfig);\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/models/AIConfig.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport * as jsYaml from 'js-yaml';\nimport fs from 'node:fs';\n\nexport interface ContainerConfig {\n  name: string;\n  contextdir: string;\n  containerfile?: string;\n  arch: string[];\n  modelService: boolean;\n  gpu_env: string[];\n  ports?: number[];\n  image?: string;\n  backend?: string[];\n}\n\nexport enum AIConfigFormat {\n  CURRENT = 'v1.0',\n}\nexport interface AIConfig {\n  version: AIConfigFormat;\n  application: {\n    containers: ContainerConfig[];\n  };\n}\n\nexport interface AIConfigFile {\n  aiConfig: AIConfig;\n  path: string;\n}\n\nexport function isString(value: unknown): value is string {\n  return (!!value && typeof value === 'string') || value instanceof String;\n}\n\nexport function assertString(value: unknown): string {\n  if (isString(value)) return value;\n  throw new Error('value not a string');\n}\n\nexport function parseYamlFile(filepath: string, defaultArch: string): AIConfig {\n  const raw: string = fs.readFileSync(filepath, 'utf-8');\n\n  const aiLabConfig: unknown = jsYaml.load(raw);\n  if (!aiLabConfig || typeof aiLabConfig !== 'object') {\n    throw new Error('malformed configuration file.');\n  }\n\n  if (!('version' in aiLabConfig) || typeof aiLabConfig.version !== 'string')\n    throw new Error('malformed configuration file: missing version');\n\n  if (aiLabConfig.version !== AIConfigFormat.CURRENT)\n    throw new Error(\n      `malformed configuration file: version not supported, got ${aiLabConfig.version} expected ${AIConfigFormat.CURRENT}.`,\n    );\n\n  if (!('application' in aiLabConfig)) {\n    throw new Error('malformed configuration file: missing application property');\n  }\n\n  const application: unknown = aiLabConfig['application'];\n  if (!application || typeof application !== 'object' || !('containers' in application)) {\n    throw new Error('AIConfig has bad formatting: application does not have valid container property');\n  }\n\n  if (!Array.isArray(application['containers'])) {\n    throw new Error('AIConfig has bad formatting: containers property must be an array.');\n  }\n\n  const containers: unknown[] = application['containers'];\n\n  return {\n    version: AIConfigFormat.CURRENT,\n    application: {\n      containers: containers.map(container => {\n        if (!container || typeof container !== 'object') throw new Error('containers array malformed');\n\n        let contextdir: string;\n        if ('contextdir' in container) {\n          contextdir = assertString(container['contextdir']);\n        } else {\n          contextdir = '.';\n        }\n\n        const architectures: string[] = [];\n        if (!('arch' in container)) {\n          architectures.push(defaultArch);\n        } else if (Array.isArray(container['arch']) && container['arch'].every(arch => typeof arch === 'string')) {\n          architectures.push(...container['arch']);\n        } else if (typeof container['arch'] === 'string') {\n          architectures.push(container['arch']);\n        } else {\n          throw new Error('malformed arch property');\n        }\n\n        let containerfile: string | undefined = undefined;\n        if ('containerfile' in container && isString(container['containerfile'])) {\n          containerfile = container['containerfile'];\n        }\n\n        if (!('name' in container) || typeof container['name'] !== 'string') {\n          throw new Error('invalid name property: must be string');\n        }\n\n        return {\n          arch: architectures,\n          modelService: 'model-service' in container && container['model-service'] === true,\n          containerfile,\n          contextdir: contextdir,\n          name: container['name'],\n          gpu_env: 'gpu-env' in container && Array.isArray(container['gpu-env']) ? container['gpu-env'] : [],\n          ports:\n            'ports' in container && Array.isArray(container['ports'])\n              ? container['ports'].map(port => parseInt(port))\n              : [],\n          image: 'image' in container && isString(container['image']) ? container['image'] : undefined,\n          backend: 'backend' in container && Array.isArray(container['backend']) ? container['backend'] : undefined,\n        };\n      }),\n    },\n  };\n}\n"
  },
  {
    "path": "packages/backend/src/models/ApplicationOptions.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { ContainerProviderConnection } from '@podman-desktop/api';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { Recipe, RecipeDependencies } from '@shared/models/IRecipe';\n\nexport type ApplicationOptions = ApplicationOptionsDefault | ApplicationOptionsWithModelInference;\n\nexport interface ApplicationOptionsDefault {\n  connection: ContainerProviderConnection;\n  recipe: Recipe;\n  dependencies?: RecipeDependencies;\n}\n\nexport type ApplicationOptionsWithModelInference = ApplicationOptionsDefault & {\n  model: ModelInfo;\n};\n\nexport function isApplicationOptionsWithModelInference(\n  options: ApplicationOptions,\n): options is ApplicationOptionsWithModelInference {\n  return 'model' in options;\n}\n"
  },
  {
    "path": "packages/backend/src/models/HuggingFaceModelHandler.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { EventEmitter } from '@podman-desktop/api';\nimport type { TelemetryLogger } from '@podman-desktop/api';\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { ModelsManager } from '../managers/modelsManager';\nimport type { CatalogManager } from '../managers/catalogManager';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { TaskRegistry } from '../registries/TaskRegistry';\nimport type { CancellationTokenRegistry } from '../registries/CancellationTokenRegistry';\nimport type { PodmanConnection } from '../managers/podmanConnection';\nimport type { ConfigurationRegistry } from '../registries/ConfigurationRegistry';\nimport { ModelHandlerRegistry } from '../registries/ModelHandlerRegistry';\nimport { HuggingFaceModelHandler } from './HuggingFaceModelHandler';\nimport { snapshotDownload } from '@huggingface/hub';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nvi.mock('@podman-desktop/api', () => {\n  return {\n    EventEmitter: vi.fn(),\n  };\n});\n\nvi.mock('@huggingface/hub', () => {\n  return {\n    scanCacheDir: vi.fn(),\n    snapshotDownload: vi.fn(),\n  };\n});\n\nconst rpcExtensionMock = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nconst catalogManagerMock = {\n  getModels(): ModelInfo[] {\n    return [\n      { id: 'model-id-1', name: 'model-id-1-model' } as ModelInfo,\n      { id: 'model-id-2', name: 'model-id-2-model' } as ModelInfo,\n    ];\n  },\n  onUpdate: vi.fn(),\n} as unknown as CatalogManager;\n\nconst telemetryLogger = {\n  logUsage: vi.fn(),\n  logError: vi.fn(),\n} as unknown as TelemetryLogger;\n\nconst taskRegistry: TaskRegistry = new TaskRegistry(rpcExtensionMock);\n\nconst cancellationTokenRegistryMock = {\n  createCancellationTokenSource: vi.fn(),\n} as unknown as CancellationTokenRegistry;\n\nconst podmanConnectionMock = {\n  getContainerProviderConnections: vi.fn(),\n} as unknown as PodmanConnection;\n\nconst configurationRegistryMock = {\n  getExtensionConfiguration: vi.fn(),\n} as unknown as ConfigurationRegistry;\n\nconst modelHandlerRegistry = new ModelHandlerRegistry(rpcExtensionMock);\n\nconst modelsManager: ModelsManager = new ModelsManager(\n  rpcExtensionMock,\n  catalogManagerMock,\n  telemetryLogger,\n  taskRegistry,\n  cancellationTokenRegistryMock,\n  podmanConnectionMock,\n  configurationRegistryMock,\n  modelHandlerRegistry,\n);\n\nconst huggingFaceModelHandler = new HuggingFaceModelHandler(modelsManager);\n\nbeforeEach(() => {\n  const listeners: ((value: unknown) => void)[] = [];\n\n  const eventReturned = {\n    event: vi.fn(),\n    fire: vi.fn(),\n  };\n\n  vi.mocked(EventEmitter).mockReturnValue(eventReturned as unknown as EventEmitter<unknown>);\n  vi.mocked(eventReturned.event).mockImplementation(callback => {\n    listeners.push(callback);\n  });\n  vi.mocked(eventReturned.fire).mockImplementation((content: unknown) => {\n    listeners.forEach(listener => listener(content));\n  });\n});\n\ntest('check http url are not supported', () => {\n  expect(huggingFaceModelHandler.accept('http://example.com')).toBe(false);\n});\n\ntest('check https url are not supported', () => {\n  expect(huggingFaceModelHandler.accept('http://example.com')).toBe(false);\n});\n\ntest('check huggingface url are supported', () => {\n  expect(huggingFaceModelHandler.accept('huggingface://ibm-granite/my-model')).toBe(true);\n});\n\ntest('download reports error', async () => {\n  vi.mocked(snapshotDownload).mockRejectedValue(new Error('error'));\n  const listenerMock = vi.fn();\n  const downloader = huggingFaceModelHandler.createDownloader(\n    { id: 'model-id-1', name: 'model-id-1-model', url: 'huggingface://ibm-granite/my-model' } as ModelInfo,\n    { aborted: false } as AbortSignal,\n  );\n  downloader.onEvent(listenerMock);\n  let err: unknown;\n  try {\n    await downloader.perform('model-id-1');\n  } catch (error) {\n    err = error;\n  }\n  expect(err).toBeDefined();\n  expect(listenerMock).toHaveBeenCalledWith({\n    id: 'model-id-1',\n    message: 'Something went wrong: Error: error.',\n    status: 'error',\n  });\n});\n\ntest('download returns cache in path', async () => {\n  vi.mocked(snapshotDownload).mockResolvedValue('cache-path');\n  const listenerMock = vi.fn();\n  const downloader = huggingFaceModelHandler.createDownloader(\n    { id: 'model-id-1', name: 'model-id-1-model', url: 'huggingface://ibm-granite/my-model' } as ModelInfo,\n    { aborted: false } as AbortSignal,\n  );\n  downloader.onEvent(listenerMock);\n  await downloader.perform('model-id-1');\n  expect(downloader.getTarget()).toBe('cache-path');\n  expect(listenerMock).toHaveBeenCalledWith({\n    duration: expect.anything(),\n    id: 'model-id-1',\n    message: expect.anything(),\n    status: 'completed',\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/models/HuggingFaceModelHandler.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { ModelHandler } from './ModelHandler';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { Downloader } from '../utils/downloader';\nimport { scanCacheDir, snapshotDownload } from '@huggingface/hub';\nimport type { CompletionEvent } from './baseEvent';\nimport { getDurationSecondsSince } from '../utils/utils';\nimport type { ModelsManager } from '../managers/modelsManager';\nimport fs from 'node:fs/promises';\n\nfunction parseURL(url: string): { repo: string; revision?: string } | undefined {\n  const u = URL.parse(url);\n  if (u) {\n    return { repo: u.pathname.slice(1), revision: u.searchParams.get('revision') ?? 'main' };\n  }\n  return undefined;\n}\n\nclass HuggingFaceDownloader extends Downloader {\n  #target: string = '';\n\n  constructor(\n    url: string,\n    private repo: string,\n    private revision: string | undefined,\n    private abortSignal: AbortSignal,\n  ) {\n    super(url, '');\n  }\n\n  override getTarget(): string {\n    return this.#target;\n  }\n\n  async perform(id: string): Promise<void> {\n    const startTime = performance.now();\n\n    try {\n      this.#target = await snapshotDownload({\n        repo: this.repo,\n        revision: this.revision,\n      });\n      const durationSeconds = getDurationSecondsSince(startTime);\n      this._onEvent.fire({\n        id: id,\n        status: 'completed',\n        message: `Duration ${durationSeconds}s.`,\n        duration: durationSeconds,\n      } as CompletionEvent);\n    } catch (err: unknown) {\n      if (!this.abortSignal?.aborted) {\n        this._onEvent.fire({\n          id: id,\n          status: 'error',\n          message: `Something went wrong: ${String(err)}.`,\n        });\n      } else {\n        this._onEvent.fire({\n          id: id,\n          status: 'canceled',\n          message: `Request cancelled: ${String(err)}.`,\n        });\n      }\n      throw err;\n    } finally {\n      this.completed = true;\n    }\n  }\n}\n\nexport class HuggingFaceModelHandler extends ModelHandler {\n  constructor(modelsManager: ModelsManager) {\n    super('huggingface model registry', modelsManager);\n  }\n\n  accept(url: string): boolean {\n    return url.startsWith('huggingface') || url.startsWith('hf');\n  }\n\n  createDownloader(model: ModelInfo, abortSignal: AbortSignal): Downloader {\n    const result = parseURL(model.url!);\n    if (result) {\n      return new HuggingFaceDownloader(model.url!, result.repo, result.revision, abortSignal);\n    }\n    throw new Error(`Invalid URL: ${model.url} for model ${model.name}`);\n  }\n\n  async deleteModel(model: ModelInfo): Promise<void> {\n    if (model.file) {\n      await fs.rm(model.file?.path, { recursive: true });\n    } else {\n      throw new Error(`Model ${model.name} not downloaded yet.`);\n    }\n  }\n\n  dispose(): void {}\n\n  async getLocalModelsFromDisk(): Promise<void> {\n    const hfModels = this.modelsManager\n      .getModelsInfo()\n      .filter(model => model.url && this.accept(model.url))\n      .map(model => {\n        return { model: model, repo: parseURL(model.url!) };\n      })\n      .filter(info => info.repo);\n\n    scanCacheDir()\n      .then(hfinfo => {\n        for (const repo of hfinfo.repos) {\n          for (const revision of repo.revisions) {\n            for (const ref of revision.refs) {\n              const model = hfModels.find(m => m.repo?.repo === repo.id.name && m.repo?.revision === ref);\n              if (model) {\n                model.model.file = {\n                  path: revision.path,\n                  file: '',\n                  creation: revision.lastModifiedAt,\n                  size: revision.size,\n                };\n              }\n            }\n          }\n        }\n      })\n      .catch((err: unknown): void => {\n        console.error('Something went wrong while scanning cache.', err);\n      });\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/models/ModelHandler.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { Disposable } from '@podman-desktop/api';\nimport { EventEmitter } from '@podman-desktop/api';\nimport type { Downloader } from '../utils/downloader';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { ModelsManager } from '../managers/modelsManager';\n\nexport abstract class ModelHandler implements Disposable {\n  readonly name: string;\n  readonly modelsManager: ModelsManager;\n  protected _onUpdate = new EventEmitter<void>();\n  readonly onUpdate = this._onUpdate.event;\n\n  protected constructor(name: string, modelsManager: ModelsManager) {\n    this.name = name;\n    this.modelsManager = modelsManager;\n  }\n\n  /**\n   * Releases any resources held by the model handler.\n   */\n  abstract dispose(): void;\n\n  /**\n   * Returns true if the model handler can handle the given URL.\n   * @param url\n   */\n  abstract accept(url: string): boolean;\n\n  /**\n   * Creates a downloader for the given model.\n   * @param model the model to download\n   * @param abortSignal the signal to abort the download\n   */\n  abstract createDownloader(model: ModelInfo, abortSignal: AbortSignal): Downloader;\n\n  /**\n   * Retrieves the local models from disk.\n   */\n  abstract getLocalModelsFromDisk(): Promise<void>;\n\n  /**\n   * Deletes the given model from local storage.\n   * @param model the model\n   */\n  abstract deleteModel(model: ModelInfo): Promise<void>;\n}\n"
  },
  {
    "path": "packages/backend/src/models/TaskRunner.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport interface RunAsTaskOptions {\n  loadingLabel: string;\n  // label set when the task terminates normally, by default the loading label is kept\n  successLabel?: string;\n  // label set when the task terminates in error, by default the loading label is kept\n  errorLabel?: string;\n  // the error message to display when task terminates in error\n  errorMsg: (err: unknown) => string;\n  // if true, all subtasks (tasks found with the same labels) will be immediately marked in error if this task fails\n  failFastSubtasks?: boolean;\n}\n\nexport interface TaskRunnerTools {\n  updateLabels: (f: (labels: Record<string, string>) => Record<string, string>) => void;\n}\n"
  },
  {
    "path": "packages/backend/src/models/URLModelHandler.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport fs from 'node:fs';\nimport { basename, join, resolve } from 'node:path';\nimport type { FileSystemWatcher } from '@podman-desktop/api';\nimport { fs as apiFs } from '@podman-desktop/api';\nimport { ModelHandler } from './ModelHandler';\nimport type { ModelsManager } from '../managers/modelsManager';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { Downloader } from '../utils/downloader';\nimport { URLDownloader } from '../utils/urldownloader';\n\nexport class URLModelHandler extends ModelHandler {\n  #watcher: FileSystemWatcher;\n\n  constructor(\n    modelsManager: ModelsManager,\n    private modelsDir: string,\n  ) {\n    super('url model registry', modelsManager);\n    this.#watcher = apiFs.createFileSystemWatcher(this.modelsDir);\n    this.#watcher.onDidCreate(() => this._onUpdate.fire());\n    this.#watcher.onDidDelete(() => this._onUpdate.fire());\n    this.#watcher.onDidChange(() => this._onUpdate.fire());\n  }\n\n  override dispose(): void {\n    this.#watcher.dispose();\n  }\n\n  override accept(url: string): boolean {\n    return url.startsWith('https') || url.startsWith('http') || url.startsWith('file');\n  }\n\n  override createDownloader(model: ModelInfo, abortSignal: AbortSignal): Downloader {\n    const destDir = join(this.modelsDir, model.id);\n    const target = resolve(destDir, basename(model.url!));\n    return new URLDownloader(model.url!, target, model.sha256, abortSignal);\n  }\n\n  override async getLocalModelsFromDisk(): Promise<void> {\n    if (!fs.existsSync(this.modelsDir)) {\n      return;\n    }\n    const entries = await fs.promises.readdir(this.modelsDir, { withFileTypes: true });\n    const dirs = entries.filter(dir => dir.isDirectory());\n    for (const d of dirs) {\n      const modelEntries = await fs.promises.readdir(resolve(d.parentPath, d.name));\n      if (modelEntries.length !== 1) {\n        // we support models with one file only for now\n        continue;\n      }\n      const modelFile = modelEntries[0];\n      const fullPath = resolve(d.parentPath, d.name, modelFile);\n\n      // Check for corresponding models or tmp file that should be ignored\n      try {\n        const model = this.modelsManager.getModelInfo(d.name);\n        if (fullPath.endsWith('.tmp')) {\n          continue;\n        }\n\n        let info: { size?: number; mtime?: Date } = { size: undefined, mtime: undefined };\n        try {\n          info = await fs.promises.stat(fullPath);\n        } catch (err: unknown) {\n          console.error('Something went wrong while getting file stats (probably in use).', err);\n        }\n\n        model.file = {\n          file: modelFile,\n          path: resolve(d.parentPath, d.name),\n          size: info.size,\n          creation: info.mtime,\n        };\n      } catch (e: unknown) {\n        console.warn(`Can't find model info for local folder ${d.name}.`, e);\n      }\n    }\n  }\n\n  async deleteModel(model: ModelInfo): Promise<void> {\n    const folder = resolve(this.modelsDir, model.id);\n    await fs.promises.rm(folder, { recursive: true, force: true, maxRetries: 3 });\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/models/baseEvent.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport interface BaseEvent {\n  id: string;\n  status: 'error' | 'completed' | 'progress' | 'canceled';\n  message?: string;\n}\n\nexport interface CompletionEvent extends BaseEvent {\n  status: 'completed' | 'error' | 'canceled';\n  duration: number;\n}\n\nexport interface ProgressEvent extends BaseEvent {\n  status: 'progress';\n  value: number;\n  total: number;\n}\n\nexport const isCompletionEvent = (value: unknown): value is CompletionEvent => {\n  return (\n    !!value &&\n    typeof value === 'object' &&\n    'status' in value &&\n    typeof value['status'] === 'string' &&\n    ['canceled', 'completed', 'error'].includes(value['status'])\n  );\n};\n\nexport const isProgressEvent = (value: unknown): value is ProgressEvent => {\n  return (\n    !!value && typeof value === 'object' && 'status' in value && value['status'] === 'progress' && 'value' in value\n  );\n};\n"
  },
  {
    "path": "packages/backend/src/registries/ApplicationRegistry.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { RecipeModelIndex } from '@shared/models/IRecipeModelIndex';\n\nexport class ApplicationRegistry<T extends RecipeModelIndex> {\n  #applications = new Map<string, T>();\n\n  keys(): RecipeModelIndex[] {\n    return Array.from(this.#applications.values()).map(a => ({ recipeId: a.recipeId, modelId: a.modelId }));\n  }\n\n  has(recipeModel: RecipeModelIndex): boolean {\n    return this.#applications.has(this.hash(recipeModel));\n  }\n\n  delete(recipeModel: RecipeModelIndex): boolean {\n    return this.#applications.delete(this.hash(recipeModel));\n  }\n\n  values(): IterableIterator<T> {\n    return this.#applications.values();\n  }\n\n  get(recipeModel: RecipeModelIndex): T {\n    const application = this.#applications.get(this.hash(recipeModel));\n    if (!application) throw new Error('application not found.');\n    return application;\n  }\n\n  set(recipeModel: RecipeModelIndex, value: T): void {\n    this.#applications.set(this.hash(recipeModel), value);\n  }\n\n  clear(): void {\n    this.#applications.clear();\n  }\n\n  private hash(recipeModel: RecipeModelIndex): string {\n    return recipeModel.recipeId + recipeModel.modelId;\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/registries/CancellationTokenRegistry.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { CancellationTokenRegistry } from './CancellationTokenRegistry';\nimport { CancellationTokenSource, EventEmitter } from '@podman-desktop/api';\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    EventEmitter: vi.fn(),\n    CancellationTokenSource: vi.fn(),\n  };\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  // mock event emitters\n  const listeners: ((value: unknown) => void)[] = [];\n\n  vi.mocked(EventEmitter).mockReturnValue({\n    event: vi.fn().mockImplementation(callback => {\n      listeners.push(callback);\n    }),\n    dispose: vi.fn(),\n    fire: vi.fn().mockImplementation((content: unknown) => {\n      listeners.forEach(listener => listener(content));\n    }),\n  } as unknown as EventEmitter<unknown>);\n\n  vi.mocked(CancellationTokenSource).mockReturnValue({\n    cancel: vi.fn(),\n    dispose: vi.fn(),\n    token: {\n      isCancellationRequested: false,\n      onCancellationRequested: vi.fn(),\n    },\n  });\n});\n\ntest('created token should be retrievable', () => {\n  const registry = new CancellationTokenRegistry();\n  const tokenId = registry.createCancellationTokenSource();\n  expect(tokenId).toBeDefined();\n  expect(registry.hasCancellationTokenSource(tokenId)).toBeTruthy();\n});\n\ntest('created token should not be cancelled', () => {\n  const registry = new CancellationTokenRegistry();\n  const source = registry.getCancellationTokenSource(registry.createCancellationTokenSource());\n  expect(source).toBeDefined();\n  expect(source?.token.isCancellationRequested).toBeFalsy();\n});\n\ntest('cancel token should be removed from registry', () => {\n  const registry = new CancellationTokenRegistry();\n  const tokenId = registry.createCancellationTokenSource();\n\n  expect(registry.hasCancellationTokenSource(tokenId)).toBeTruthy();\n\n  registry.cancel(tokenId);\n\n  expect(registry.hasCancellationTokenSource(tokenId)).toBeFalsy();\n});\n\ntest('disposing registry should dispose with cancel all tokens', () => {\n  const registry = new CancellationTokenRegistry();\n  const source = registry.getCancellationTokenSource(registry.createCancellationTokenSource());\n\n  registry.dispose();\n  expect(source?.cancel).toHaveBeenCalled();\n  expect(source?.dispose).toHaveBeenCalled();\n});\n\ntest('creating cancellation token with function should register it', () => {\n  const registry = new CancellationTokenRegistry();\n  const func = vi.fn();\n  const source = registry.getCancellationTokenSource(registry.createCancellationTokenSource(func));\n\n  expect(source?.token.onCancellationRequested).toHaveBeenCalledWith(func);\n});\n"
  },
  {
    "path": "packages/backend/src/registries/CancellationTokenRegistry.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { CancellationTokenSource, type Disposable } from '@podman-desktop/api';\n\nexport class CancellationTokenRegistry implements Disposable {\n  #callbackId: number;\n  #callbacksCancellableToken: Map<number, CancellationTokenSource>;\n\n  constructor() {\n    this.#callbackId = 0;\n    this.#callbacksCancellableToken = new Map<number, CancellationTokenSource>();\n  }\n\n  /**\n   * Creating a cancellation token.\n   * @param func an optional function that will be called when the cancel action will be triggered\n   */\n  createCancellationTokenSource(func?: () => void): number {\n    // keep track of this request\n    this.#callbackId++;\n\n    const token = new CancellationTokenSource();\n    if (func !== undefined) {\n      token.token.onCancellationRequested(func);\n    }\n\n    // store the callback that will resolve the promise\n    this.#callbacksCancellableToken.set(this.#callbackId, token);\n\n    return this.#callbackId;\n  }\n\n  getCancellationTokenSource(id: number): CancellationTokenSource | undefined {\n    if (this.hasCancellationTokenSource(id)) {\n      return this.#callbacksCancellableToken.get(id);\n    }\n    return undefined;\n  }\n\n  hasCancellationTokenSource(id: number): boolean {\n    return this.#callbacksCancellableToken.has(id);\n  }\n\n  cancel(tokenId: number): void {\n    if (!this.hasCancellationTokenSource(tokenId))\n      throw new Error(`Cancellation token with id ${tokenId} does not exist.`);\n    this.getCancellationTokenSource(tokenId)?.cancel();\n    this.delete(tokenId);\n  }\n\n  delete(tokenId: number): void {\n    this.#callbacksCancellableToken.delete(tokenId);\n  }\n\n  dispose(): void {\n    Array.from(this.#callbacksCancellableToken.values()).forEach(source => {\n      source.cancel();\n      source.dispose();\n    });\n    this.#callbacksCancellableToken.clear();\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/registries/ConfigurationRegistry.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { vi, expect, test } from 'vitest';\nimport { configuration, type Configuration } from '@podman-desktop/api';\nimport { ConfigurationRegistry } from './ConfigurationRegistry';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nconst fakeConfiguration = {\n  get: vi.fn(),\n  has: vi.fn(),\n  update: vi.fn(),\n} as unknown as Configuration;\n\nconst rpcExtensionMock = {\n  fire: vi.fn().mockResolvedValue(true),\n} as unknown as RpcExtension;\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    configuration: {\n      getConfiguration: (): unknown => fakeConfiguration,\n      onDidChangeConfiguration: vi.fn(),\n    },\n  };\n});\n\ntest('init should init listener', () => {\n  const registry = new ConfigurationRegistry(rpcExtensionMock, 'appdir');\n  vi.mocked(fakeConfiguration.has).mockReturnValue(true);\n\n  registry.init();\n  expect(configuration.onDidChangeConfiguration).toHaveBeenCalled();\n});\n\ntest('dispose should dispose listener', () => {\n  const registry = new ConfigurationRegistry(rpcExtensionMock, 'appdir');\n  vi.mocked(fakeConfiguration.has).mockReturnValue(true);\n\n  const disposeMock = vi.fn();\n  vi.mocked(configuration.onDidChangeConfiguration).mockReturnValue({ dispose: disposeMock });\n\n  registry.init();\n  expect(configuration.onDidChangeConfiguration).toHaveBeenCalled();\n\n  registry.dispose();\n  expect(disposeMock).toHaveBeenCalled();\n});\n\ntest('update should trigger configuration update', async () => {\n  const registry = new ConfigurationRegistry(rpcExtensionMock, 'appdir');\n  vi.mocked(fakeConfiguration.has).mockReturnValue(true);\n  vi.mocked(fakeConfiguration.update).mockResolvedValue(undefined);\n\n  registry.init();\n  await registry.updateExtensionConfiguration({ modelsPath: '' });\n  expect(fakeConfiguration.update).toHaveBeenCalledWith('models.path', '');\n});\n"
  },
  {
    "path": "packages/backend/src/registries/ConfigurationRegistry.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { configuration, version, type Configuration, type Disposable } from '@podman-desktop/api';\nimport { Publisher } from '../utils/Publisher';\nimport type { ExtensionConfiguration } from '@shared/models/IExtensionConfiguration';\nimport { MSG_CONFIGURATION_UPDATE } from '@shared/Messages';\nimport path from 'node:path';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nconst CONFIGURATION_SECTIONS: string[] = [\n  'models.path',\n  'experimentalGPU',\n  'apiPort',\n  'inferenceRuntime',\n  'experimentalTuning',\n  'modelUploadDisabled',\n  'showGPUPromotion',\n  'appearance',\n];\n\nconst API_PORT_DEFAULT = 10434;\n\nexport class ConfigurationRegistry extends Publisher<ExtensionConfiguration> implements Disposable {\n  #configuration: Configuration;\n  #configurationPodmanDesktop: Configuration;\n  #configurationDisposable: Disposable | undefined;\n\n  constructor(\n    rpcExtension: RpcExtension,\n    private appUserDirectory: string,\n  ) {\n    super(rpcExtension, MSG_CONFIGURATION_UPDATE, () => this.getExtensionConfiguration());\n\n    this.#configuration = configuration.getConfiguration('ai-lab');\n    this.#configurationPodmanDesktop = configuration.getConfiguration('preferences');\n  }\n\n  getExtensionConfiguration(): ExtensionConfiguration {\n    return {\n      modelsPath: this.getModelsPath(),\n      experimentalGPU: this.#configuration.get<boolean>('experimentalGPU') ?? false,\n      apiPort: this.#configuration.get<number>('apiPort') ?? API_PORT_DEFAULT,\n      inferenceRuntime: this.#configuration.get<string>('inferenceRuntime') ?? 'all',\n      experimentalTuning: this.#configuration.get<boolean>('experimentalTuning') ?? false,\n      modelUploadDisabled: this.#configuration.get<boolean>('modelUploadDisabled') ?? false,\n      showGPUPromotion: this.#configuration.get<boolean>('showGPUPromotion') ?? true,\n      appearance: this.#configurationPodmanDesktop.get<string>('appearance') ?? 'dark',\n    };\n  }\n\n  getPodmanDesktopVersion(): string {\n    return version;\n  }\n\n  private getFieldName(section: string): keyof Partial<ExtensionConfiguration> {\n    return section.replace(/\\.(\\w)/, (match, char) => char.toUpperCase()) as keyof Partial<ExtensionConfiguration>;\n  }\n\n  async updateExtensionConfiguration(update: Partial<ExtensionConfiguration>): Promise<void> {\n    for (const section of CONFIGURATION_SECTIONS) {\n      const fieldName = this.getFieldName(section);\n      const value = update[fieldName];\n      if (value !== undefined) {\n        await this.#configuration.update(section, value);\n      }\n    }\n    this.notify(); //https://github.com/containers/podman-desktop/issues/9194\n  }\n\n  private getModelsPath(): string {\n    const value = this.#configuration.get<string>('models.path');\n    if (value && value.length > 0) {\n      return value;\n    }\n    return path.join(this.appUserDirectory, 'models');\n  }\n\n  dispose(): void {\n    this.#configurationDisposable?.dispose();\n  }\n\n  init(): void {\n    this.#configurationDisposable = configuration.onDidChangeConfiguration(event => {\n      if (CONFIGURATION_SECTIONS.some(section => event.affectsConfiguration(`ai-lab.${section}`))) {\n        this.notify();\n      }\n      if (CONFIGURATION_SECTIONS.some(section => event.affectsConfiguration(`preferences.${section}`))) {\n        this.notify();\n      }\n    });\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/registries/ContainerRegistry.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { beforeAll, expect, test, vi } from 'vitest';\nimport { ContainerRegistry } from './ContainerRegistry';\nimport { type ContainerJSONEvent, EventEmitter } from '@podman-desktop/api';\nimport { TestEventEmitter } from '../tests/utils';\n\nconst mocks = vi.hoisted(() => ({\n  onEventMock: vi.fn(),\n  DisposableCreateMock: vi.fn(),\n}));\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    EventEmitter: vi.fn(),\n    Disposable: {\n      create: mocks.DisposableCreateMock,\n    },\n    containerEngine: {\n      onEvent: mocks.onEventMock,\n    },\n  };\n});\n\nbeforeAll(() => {\n  vi.mocked(EventEmitter).mockImplementation(() => new TestEventEmitter() as unknown as EventEmitter<unknown>);\n});\n\ntest('ContainerRegistry init', () => {\n  const registry = new ContainerRegistry();\n  registry.init();\n\n  expect(mocks.onEventMock).toHaveBeenCalledOnce();\n});\n\ntest('ContainerRegistry subscribe', () => {\n  // Get the callback created by the ContainerRegistry\n  let callback: ((event: ContainerJSONEvent) => void) | undefined;\n  mocks.onEventMock.mockImplementation((method: (event: ContainerJSONEvent) => void) => {\n    callback = method;\n  });\n\n  // Create the ContainerRegistry and init\n  const registry = new ContainerRegistry();\n  registry.init();\n\n  // Let's create a dummy subscriber\n  let subscribedStatus: undefined | string = undefined;\n  registry.subscribe('random', (status: string) => {\n    subscribedStatus = status;\n  });\n\n  if (!callback) throw new Error('undefined callback');\n\n  // Generate a fake event\n  callback({\n    status: 'die',\n    id: 'random',\n    type: 'container',\n  });\n\n  expect(subscribedStatus).toBe('die');\n  expect(mocks.DisposableCreateMock).toHaveBeenCalledOnce();\n});\n\ntest('ContainerRegistry unsubscribe all if container remove', () => {\n  // Get the callback created by the ContainerRegistry\n  let callback: ((event: ContainerJSONEvent) => void) | undefined;\n  mocks.onEventMock.mockImplementation((method: (event: ContainerJSONEvent) => void) => {\n    callback = method;\n  });\n\n  // Create the ContainerRegistry and init\n  const registry = new ContainerRegistry();\n  registry.init();\n\n  // Let's create a dummy subscriber\n  const subscribeMock = vi.fn();\n  registry.subscribe('random', subscribeMock);\n\n  if (!callback) throw new Error('undefined callback');\n\n  // Generate a remove event\n  callback({ status: 'remove', id: 'random', type: 'container' });\n\n  // Call it a second time\n  callback({ status: 'remove', id: 'random', type: 'container' });\n\n  // Our subscriber should only have been called once, the first, after it should have been removed.\n  expect(subscribeMock).toHaveBeenCalledOnce();\n});\n\ntest('ContainerRegistry subscriber disposed should not be called', () => {\n  // Get the callback created by the ContainerRegistry\n  let callback: ((event: ContainerJSONEvent) => void) | undefined;\n  mocks.onEventMock.mockImplementation((method: (event: ContainerJSONEvent) => void) => {\n    callback = method;\n  });\n\n  mocks.DisposableCreateMock.mockImplementation(callback => ({\n    dispose: (): void => callback(),\n  }));\n\n  // Create the ContainerRegistry and init\n  const registry = new ContainerRegistry();\n  registry.init();\n\n  // Let's create a dummy subscriber\n  const subscribeMock = vi.fn();\n  const disposable = registry.subscribe('random', subscribeMock);\n  disposable.dispose();\n\n  if (!callback) throw new Error('undefined callback');\n\n  // Generate a random event\n  callback({ status: 'die', id: 'random', type: 'container' });\n\n  // never should have been called\n  expect(subscribeMock).toHaveBeenCalledTimes(0);\n});\n\ntest('ContainerRegistry should fire ContainerStart when container start', () => {\n  // Get the callback created by the ContainerRegistry\n  let callback: ((event: ContainerJSONEvent) => void) | undefined;\n  mocks.onEventMock.mockImplementation((method: (event: ContainerJSONEvent) => void) => {\n    callback = method;\n  });\n\n  // Create the ContainerRegistry and init\n  const registry = new ContainerRegistry();\n  registry.init();\n\n  const startListenerMock = vi.fn();\n  registry.onStartContainerEvent(startListenerMock);\n\n  if (!callback) throw new Error('undefined callback');\n\n  // Generate a remove event\n  callback({ status: 'remove', id: 'random', type: 'container' });\n\n  expect(startListenerMock).not.toHaveBeenCalled();\n\n  // Call it a second time\n  callback({ status: 'start', id: 'random', type: 'container' });\n\n  // Our subscriber should only have been called once, the first, after it should have been removed.\n  expect(startListenerMock).toHaveBeenCalledOnce();\n});\n\ntest('ContainerRegistry should fire ContainerStop when container stop', () => {\n  // Get the callback created by the ContainerRegistry\n  let callback: ((event: ContainerJSONEvent) => void) | undefined;\n  mocks.onEventMock.mockImplementation((method: (event: ContainerJSONEvent) => void) => {\n    callback = method;\n  });\n\n  // Create the ContainerRegistry and init\n  const registry = new ContainerRegistry();\n  registry.init();\n\n  const stopListenerMock = vi.fn();\n  registry.onStopContainerEvent(stopListenerMock);\n\n  if (!callback) throw new Error('undefined callback');\n\n  // Generate a remove event\n  callback({ status: 'remove', id: 'random', type: 'container' });\n\n  expect(stopListenerMock).not.toHaveBeenCalled();\n\n  // Call it a second time\n  callback({ status: 'start', id: 'random', type: 'container' });\n\n  // Our subscriber should only have been called once, the first, after it should have been removed.\n  expect(stopListenerMock).not.toHaveBeenCalled();\n\n  callback({ status: 'die', id: 'random', type: 'container' });\n\n  // Our subscriber should only have been called once, the first, after it should have been removed.\n  expect(stopListenerMock).toHaveBeenCalledOnce();\n});\n"
  },
  {
    "path": "packages/backend/src/registries/ContainerRegistry.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport * as podmanDesktopApi from '@podman-desktop/api';\n\nexport type Subscriber = {\n  id: number;\n  callback: (status: string) => void;\n};\n\nexport interface ContainerEvent {\n  id: string;\n}\n\nexport interface ContainerHealthy {\n  id: string;\n}\n\nexport class ContainerRegistry implements podmanDesktopApi.Disposable {\n  private count: number = 0;\n  private subscribers: Map<string, Subscriber[]> = new Map();\n\n  private readonly _onStartContainerEvent = new podmanDesktopApi.EventEmitter<ContainerEvent>();\n  readonly onStartContainerEvent: podmanDesktopApi.Event<ContainerEvent> = this._onStartContainerEvent.event;\n  private readonly _onStopContainerEvent = new podmanDesktopApi.EventEmitter<ContainerEvent>();\n  readonly onStopContainerEvent: podmanDesktopApi.Event<ContainerEvent> = this._onStopContainerEvent.event;\n\n  private readonly _onHealthyContainerEvent = new podmanDesktopApi.EventEmitter<ContainerHealthy>();\n  readonly onHealthyContainerEvent: podmanDesktopApi.Event<ContainerHealthy> = this._onHealthyContainerEvent.event;\n\n  #eventDisposable: podmanDesktopApi.Disposable | undefined;\n\n  init(): void {\n    this.#eventDisposable = podmanDesktopApi.containerEngine.onEvent(event => {\n      if (event.status === 'start') {\n        this._onStartContainerEvent.fire({\n          id: event.id,\n        });\n      } else if (event.status === 'die') {\n        this._onStopContainerEvent.fire({\n          id: event.id,\n        });\n      }\n\n      if (event.status === 'health_status' && 'HealthStatus' in event && event.HealthStatus === 'healthy') {\n        this._onHealthyContainerEvent.fire({\n          id: event.id,\n        });\n      }\n\n      if (this.subscribers.has(event.id)) {\n        this.subscribers.get(event.id)?.forEach(subscriber => subscriber.callback(event.status));\n\n        // If the event type is remove, we dispose all subscribers for the specific containers\n        if (event.status === 'remove') {\n          this.subscribers.delete(event.id);\n        }\n      }\n    });\n  }\n\n  dispose(): void {\n    this.#eventDisposable?.dispose();\n  }\n\n  subscribe(containerId: string, callback: (status: string) => void): podmanDesktopApi.Disposable {\n    const subscriberId = ++this.count;\n    const nSubs: Subscriber[] = [\n      ...(this.subscribers.get(containerId) ?? []),\n      {\n        id: subscriberId,\n        callback: callback,\n      },\n    ];\n\n    this.subscribers.set(containerId, nSubs);\n    return podmanDesktopApi.Disposable.create(() => {\n      if (!this.subscribers.has(containerId)) return;\n\n      this.subscribers.set(\n        containerId,\n        nSubs.filter(subscriber => subscriber.id !== subscriberId),\n      );\n    });\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/registries/ConversationRegistry.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { Publisher } from '../utils/Publisher';\nimport type {\n  AssistantChat,\n  ChatMessage,\n  Conversation,\n  Message,\n  ModelUsage,\n  ToolCall,\n} from '@shared/models/IPlaygroundMessage';\nimport type { Disposable } from '@podman-desktop/api';\nimport { MSG_CONVERSATIONS_UPDATE } from '@shared/Messages';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nexport class ConversationRegistry extends Publisher<Conversation[]> implements Disposable {\n  #conversations: Map<string, Conversation>;\n  #counter: number;\n\n  constructor(rpcExtension: RpcExtension) {\n    super(rpcExtension, MSG_CONVERSATIONS_UPDATE, () => this.getAll());\n    this.#conversations = new Map<string, Conversation>();\n    this.#counter = 0;\n  }\n\n  getUniqueId(): string {\n    return `${++this.#counter}`;\n  }\n\n  /**\n   * Remove a message from a conversation\n   * @param conversationId\n   * @param messageId\n   */\n  removeMessage(conversationId: string, messageId: string): void {\n    const conversation: Conversation = this.get(conversationId);\n\n    conversation.messages = conversation.messages.filter(message => message.id !== messageId);\n    this.notify();\n  }\n\n  /**\n   * Utility method to update a message content in a given conversation\n   * @param conversationId\n   * @param messageId\n   * @param message\n   */\n  update(conversationId: string, messageId: string, message: Partial<ChatMessage>): void {\n    const conversation: Conversation = this.get(conversationId);\n\n    const messageIndex = conversation.messages.findIndex(message => message.id === messageId);\n    if (messageIndex === -1)\n      throw new Error(`message with id ${messageId} does not exist in conversation ${conversationId}.`);\n\n    // Update the message with the provided content\n    conversation.messages[messageIndex] = {\n      ...conversation.messages[messageIndex],\n      ...message,\n      id: messageId, // preventing we are not updating the id\n    };\n    this.notify();\n  }\n\n  deleteConversation(id: string): void {\n    this.#conversations.delete(id);\n    this.notify();\n  }\n\n  createConversation(name: string, modelId: string): string {\n    const conversationId = this.getUniqueId();\n    this.#conversations.set(conversationId, {\n      name: name,\n      modelId: modelId,\n      messages: [],\n      id: conversationId,\n      usage: {\n        completion_tokens: 0,\n        prompt_tokens: 0,\n      } as ModelUsage,\n    });\n    this.notify();\n    return conversationId;\n  }\n\n  /**\n   * This method will be responsible for finalizing the message\n   * @param conversationId\n   * @param messageId\n   */\n  completeMessage(conversationId: string, messageId: string): void {\n    const conversation: Conversation = this.get(conversationId);\n\n    const messageIndex = conversation.messages.findIndex(message => message.id === messageId);\n    if (messageIndex === -1)\n      throw new Error(`message with id ${messageId} does not exist in conversation ${conversationId}.`);\n\n    this.update(conversationId, messageId, {\n      ...conversation.messages[messageIndex],\n      choices: undefined,\n      role: 'assistant',\n      completed: Date.now(),\n    } as AssistantChat);\n  }\n\n  /**\n   * Utility method to quickly add a usage to a conversation\n   * @param conversationId\n   * @param usage\n   */\n  setUsage(conversationId: string, usage: ModelUsage): void {\n    const conversation: Conversation = this.get(conversationId);\n    this.#conversations.set(conversationId, {\n      ...conversation,\n      usage,\n    });\n    this.notify();\n  }\n\n  /**\n   * Utility method to quickly add a delta to a given a message inside a conversation\n   * @param conversationId\n   * @param messageId\n   * @param delta\n   */\n  textDelta(conversationId: string, messageId: string, delta: string): void {\n    const conversation: Conversation = this.get(conversationId);\n    const messageIndex = conversation.messages.findIndex(message => message.id === messageId);\n    if (messageIndex === -1) {\n      throw new Error(`message with id ${messageId} does not exist in conversation ${conversationId}.`);\n    }\n    this.update(conversationId, messageId, {\n      ...conversation.messages[messageIndex],\n      content: ((conversation.messages[messageIndex] as AssistantChat).content ?? '') + delta,\n    } as AssistantChat);\n  }\n\n  /**\n   * Utility method to quickly add a tool-call assistant message to a conversation\n   */\n  toolResult(conversationId: string, toolCallId: string, toolResult: string | object): void {\n    const conversation: Conversation = this.get(conversationId);\n    const messageIndex = conversation.messages.findIndex(\n      message =>\n        (message as ChatMessage)?.role === 'assistant' &&\n        ((message as AssistantChat).content as ToolCall)?.type === 'tool-call' &&\n        ((message as AssistantChat).content as ToolCall)?.toolCallId === toolCallId,\n    );\n    if (messageIndex === -1) {\n      throw new Error(`message with for tool call ${toolCallId} does not exist in conversation ${conversationId}.`);\n    }\n    const content: ToolCall = {\n      ...((conversation.messages[messageIndex] as AssistantChat).content as ToolCall),\n      result: toolResult,\n    };\n    this.update(conversationId, conversation.messages[messageIndex].id, {\n      ...conversation.messages[messageIndex],\n      completed: Date.now(),\n      content,\n    } as AssistantChat);\n  }\n\n  /**\n   * Utility method to add a new Message to a given conversation\n   * @param conversationId\n   * @param message\n   */\n  submit(conversationId: string, message: Message): void {\n    const conversation = this.#conversations.get(conversationId);\n    if (conversation === undefined) throw new Error(`conversation with id ${conversationId} does not exist.`);\n\n    this.#conversations.set(conversationId, {\n      ...conversation,\n      messages: [...conversation.messages, message],\n    });\n    this.notify();\n  }\n\n  dispose(): void {\n    this.#conversations.clear();\n  }\n\n  get(conversationId: string): Conversation {\n    const conversation: Conversation | undefined = this.#conversations.get(conversationId);\n    if (conversation === undefined) throw new Error(`conversation with id ${conversationId} does not exist.`);\n    return conversation;\n  }\n\n  getAll(): Conversation[] {\n    return Array.from(this.#conversations.values());\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/registries/InferenceProviderRegistry.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { Publisher } from '../utils/Publisher';\nimport type { InferenceProvider } from '../workers/provider/InferenceProvider';\nimport { Disposable } from '@podman-desktop/api';\nimport { MSG_INFERENCE_PROVIDER_UPDATE } from '@shared/Messages';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport type { InferenceType } from '@shared/models/IInference';\n\nexport class InferenceProviderRegistry extends Publisher<string[]> {\n  #providers: Map<string, InferenceProvider>;\n  constructor(rpcExtension: RpcExtension) {\n    super(rpcExtension, MSG_INFERENCE_PROVIDER_UPDATE, () => this.getAll().map(provider => provider.name));\n    this.#providers = new Map();\n  }\n\n  register(provider: InferenceProvider): Disposable {\n    this.#providers.set(provider.name, provider);\n\n    this.notify();\n    return Disposable.create(() => {\n      this.unregister(provider.name);\n    });\n  }\n\n  unregister(name: string): void {\n    this.#providers.delete(name);\n  }\n\n  getAll(): InferenceProvider[] {\n    return Array.from(this.#providers.values());\n  }\n\n  getByType(type: InferenceType): InferenceProvider[] {\n    return Array.from(this.#providers.values()).filter(provider => provider.type === type);\n  }\n\n  get(name: string): InferenceProvider {\n    const provider = this.#providers.get(name);\n    if (provider === undefined) throw new Error(`no provider with name ${name} was found.`);\n    return provider;\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/registries/LocalRepositoryRegistry.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { LocalRepositoryRegistry } from './LocalRepositoryRegistry';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport fs from 'node:fs';\nimport path from 'node:path';\nimport type { CatalogManager } from '../managers/catalogManager';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { MSG_LOCAL_REPOSITORY_UPDATE } from '@shared/Messages';\n\nconst mocks = vi.hoisted(() => ({\n  DisposableCreateMock: vi.fn(),\n}));\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    Disposable: {\n      create: mocks.DisposableCreateMock,\n    },\n  };\n});\n\nvi.mock('node:fs', () => {\n  return {\n    existsSync: vi.fn(),\n    promises: {\n      rm: vi.fn(),\n    },\n  };\n});\n\nconst catalogManagerMock = {\n  onUpdate: vi.fn(),\n  getRecipes: vi.fn(),\n} as unknown as CatalogManager;\n\nconst rpcExtensionMock = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mock('node:fs');\n  vi.mocked(rpcExtensionMock.fire).mockResolvedValue(true);\n});\n\ntest('should not have any repositories by default', () => {\n  const localRepositories = new LocalRepositoryRegistry(rpcExtensionMock, '/appUserDirectory', catalogManagerMock);\n  expect(localRepositories.getLocalRepositories().length).toBe(0);\n});\n\ntest('should notify webview when register', () => {\n  const localRepositories = new LocalRepositoryRegistry(rpcExtensionMock, '/appUserDirectory', catalogManagerMock);\n  localRepositories.register({ path: 'random', sourcePath: 'random', labels: { 'recipe-id': 'random' } });\n  expect(rpcExtensionMock.fire).toHaveBeenNthCalledWith(1, MSG_LOCAL_REPOSITORY_UPDATE, [\n    { path: 'random', sourcePath: 'random', labels: { 'recipe-id': 'random' } },\n  ]);\n});\n\ntest('should notify webview when unregister', async () => {\n  const localRepositories = new LocalRepositoryRegistry(rpcExtensionMock, '/appUserDirectory', catalogManagerMock);\n  vi.spyOn(fs.promises, 'rm').mockResolvedValue();\n  localRepositories.register({ path: 'random', sourcePath: 'random', labels: { 'recipe-id': 'random' } });\n  await localRepositories.deleteLocalRepository('random');\n\n  expect(rpcExtensionMock.fire).toHaveBeenLastCalledWith(MSG_LOCAL_REPOSITORY_UPDATE, []);\n});\n\ntest('should register localRepo if it find the folder of the recipe', () => {\n  vi.spyOn(fs, 'existsSync').mockReturnValue(true);\n  vi.mocked(catalogManagerMock.getRecipes).mockReturnValue([\n    {\n      id: 'recipe',\n    } as unknown as Recipe,\n  ]);\n\n  const localRepositories = new LocalRepositoryRegistry(rpcExtensionMock, '/appUserDirectory', catalogManagerMock);\n\n  const registerMock = vi.spyOn(localRepositories, 'register');\n  localRepositories.init();\n\n  const folder = path.join('/appUserDirectory', 'recipe');\n  expect(registerMock).toHaveBeenCalledWith({\n    path: folder,\n    sourcePath: folder,\n    labels: { 'recipe-id': 'recipe' },\n  });\n});\n\ntest('should register localRepo when catalog get updated', () => {\n  vi.spyOn(fs, 'existsSync').mockReturnValue(true);\n  vi.mocked(catalogManagerMock.getRecipes).mockReturnValue([]);\n\n  let listener: ((catalog: ApplicationCatalog) => void) | undefined = undefined;\n  vi.mocked(catalogManagerMock.onUpdate).mockImplementation((fn: (catalog: ApplicationCatalog) => void) => {\n    listener = fn;\n    return { dispose: vi.fn() };\n  });\n\n  const localRepositories = new LocalRepositoryRegistry(rpcExtensionMock, '/appUserDirectory', catalogManagerMock);\n\n  const registerMock = vi.spyOn(localRepositories, 'register');\n  localRepositories.init();\n\n  const folder = path.join('/appUserDirectory', 'recipe');\n  expect(registerMock).not.toHaveBeenCalled();\n  expect(listener).toBeDefined();\n  if (!listener) throw new Error('undefined listener');\n\n  (listener as (catalog: ApplicationCatalog) => void)({\n    recipes: [\n      {\n        id: 'recipe',\n      } as unknown as Recipe,\n    ],\n    models: [],\n    categories: [],\n  });\n\n  expect(registerMock).toHaveBeenCalledWith({\n    path: folder,\n    sourcePath: folder,\n    labels: { 'recipe-id': 'recipe' },\n  });\n});\n\ntest('should NOT register localRepo if it does not find the folder of the recipe', () => {\n  vi.spyOn(fs, 'existsSync').mockReturnValue(false);\n  vi.mocked(catalogManagerMock.getRecipes).mockReturnValue([\n    {\n      id: 'recipe',\n    } as unknown as Recipe,\n  ]);\n\n  const localRepositories = new LocalRepositoryRegistry(rpcExtensionMock, '/appUserDirectory', catalogManagerMock);\n  const registerMock = vi.spyOn(localRepositories, 'register');\n  localRepositories.init();\n  expect(registerMock).not.toHaveBeenCalled();\n});\n"
  },
  {
    "path": "packages/backend/src/registries/LocalRepositoryRegistry.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { LocalRepository } from '@shared/models/ILocalRepository';\nimport { Disposable } from '@podman-desktop/api';\nimport { Publisher } from '../utils/Publisher';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport fs from 'node:fs';\nimport path from 'node:path';\nimport type { CatalogManager } from '../managers/catalogManager';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { MSG_LOCAL_REPOSITORY_UPDATE } from '@shared/Messages';\n\n/**\n * The LocalRepositoryRegistry is responsible for keeping track of the directories where recipe are cloned\n */\nexport class LocalRepositoryRegistry extends Publisher<LocalRepository[]> implements Disposable {\n  // Map path => LocalRepository\n  private repositories: Map<string, LocalRepository> = new Map();\n  #catalogEventDisposable: Disposable | undefined;\n\n  constructor(\n    rpcExtension: RpcExtension,\n    private appUserDirectory: string,\n    private catalogManager: CatalogManager,\n  ) {\n    super(rpcExtension, MSG_LOCAL_REPOSITORY_UPDATE, () => this.getLocalRepositories());\n  }\n\n  dispose(): void {\n    this.#catalogEventDisposable?.dispose();\n  }\n\n  init(): void {\n    this.#catalogEventDisposable = this.catalogManager.onUpdate(({ recipes }) => {\n      this.loadLocalRecipeRepositories(recipes);\n    });\n\n    this.loadLocalRecipeRepositories(this.catalogManager.getRecipes());\n  }\n\n  register(localRepository: LocalRepository): Disposable {\n    this.repositories.set(localRepository.path, localRepository);\n    this.notify();\n\n    return Disposable.create(() => {\n      this.unregister(localRepository.path);\n    });\n  }\n\n  unregister(path: string): void {\n    this.repositories.delete(path);\n    this.notify();\n  }\n\n  async deleteLocalRepository(path: string): Promise<void> {\n    await fs.promises.rm(path, { recursive: true, force: true, maxRetries: 3 });\n    // once it has been removed, it also update the localRepo list\n    this.unregister(path);\n  }\n\n  getLocalRepositories(): LocalRepository[] {\n    return Array.from(this.repositories.values());\n  }\n\n  private loadLocalRecipeRepositories(recipes: Recipe[]): void {\n    recipes.forEach(recipe => {\n      const recipeFolder = path.join(this.appUserDirectory, recipe.id);\n      if (fs.existsSync(recipeFolder)) {\n        this.register({\n          path: recipeFolder,\n          sourcePath: path.join(recipeFolder, recipe.basedir ?? ''),\n          labels: {\n            'recipe-id': recipe.id,\n          },\n        });\n      }\n    });\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/registries/ModelHandlerRegistry.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { Publisher } from '../utils/Publisher';\nimport { Disposable } from '@podman-desktop/api';\nimport { MSG_MODEL_HANDLERS_UPDATE } from '@shared/Messages';\nimport type { ModelHandler } from '../models/ModelHandler';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nexport class ModelHandlerRegistry extends Publisher<string[]> {\n  #providers: Map<string, ModelHandler>;\n  constructor(rpcExtension: RpcExtension) {\n    super(rpcExtension, MSG_MODEL_HANDLERS_UPDATE, () => this.getAll().map(provider => provider.name));\n    this.#providers = new Map();\n  }\n\n  register(provider: ModelHandler): Disposable {\n    this.#providers.set(provider.name, provider);\n\n    this.notify();\n    return Disposable.create(() => {\n      this.unregister(provider);\n    });\n  }\n\n  unregister(provider: ModelHandler): void {\n    this.#providers.delete(provider.name);\n    this.notify();\n  }\n\n  getAll(): ModelHandler[] {\n    return Array.from(this.#providers.values());\n  }\n\n  findModelHandler(url: string): ModelHandler | undefined {\n    return Array.from(this.#providers.values()).find(modelHandler => modelHandler.accept(url));\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/registries/NavigationRegistry.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeAll, afterAll, beforeEach, describe, expect, test, vi } from 'vitest';\nimport { commands, navigation, type WebviewPanel, type Disposable } from '@podman-desktop/api';\nimport { NavigationRegistry } from './NavigationRegistry';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { MSG_NAVIGATION_ROUTE_UPDATE } from '@shared/Messages';\n\nvi.mock('@podman-desktop/api', async () => ({\n  commands: {\n    registerCommand: vi.fn(),\n  },\n  navigation: {\n    register: vi.fn(),\n  },\n}));\n\nconst panelMock: WebviewPanel = {\n  reveal: vi.fn(),\n  webview: {\n    postMessage: vi.fn(),\n  },\n} as unknown as WebviewPanel;\n\nconst rpcExtensionMock = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.restoreAllMocks();\n});\n\ndescribe('incompatible podman-desktop', () => {\n  let register: typeof navigation.register | undefined;\n  beforeAll(() => {\n    register = navigation.register;\n    (navigation.register as unknown as undefined) = undefined;\n  });\n\n  afterAll(() => {\n    if (!register) return;\n    navigation.register = register;\n  });\n\n  test('init should not register command and navigation when using old version of podman', () => {\n    (navigation.register as unknown as undefined) = undefined;\n    const registry = new NavigationRegistry(panelMock, rpcExtensionMock);\n    registry.init();\n\n    expect(commands.registerCommand).not.toHaveBeenCalled();\n  });\n});\n\ntest('init should register command and navigation', () => {\n  const registry = new NavigationRegistry(panelMock, rpcExtensionMock);\n  registry.init();\n\n  expect(commands.registerCommand).toHaveBeenCalled();\n  expect(navigation.register).toHaveBeenCalled();\n});\n\ntest('dispose should dispose all command and navigation registered', () => {\n  const registry = new NavigationRegistry(panelMock, rpcExtensionMock);\n  const disposables: Disposable[] = [];\n  vi.mocked(commands.registerCommand).mockImplementation(() => {\n    const disposable: Disposable = {\n      dispose: vi.fn(),\n    };\n    disposables.push(disposable);\n    return disposable;\n  });\n  vi.mocked(navigation.register).mockImplementation(() => {\n    const disposable: Disposable = {\n      dispose: vi.fn(),\n    };\n    disposables.push(disposable);\n    return disposable;\n  });\n\n  registry.dispose();\n\n  disposables.forEach((disposable: Disposable) => {\n    expect(disposable.dispose).toHaveBeenCalledOnce();\n  });\n});\n\ntest('navigateToInferenceCreate should reveal and postMessage to webview', async () => {\n  const registry = new NavigationRegistry(panelMock, rpcExtensionMock);\n\n  await registry.navigateToInferenceCreate('dummyTrackingId');\n\n  await vi.waitFor(() => {\n    expect(panelMock.reveal).toHaveBeenCalledOnce();\n  });\n\n  expect(rpcExtensionMock.fire).toHaveBeenCalledWith(\n    MSG_NAVIGATION_ROUTE_UPDATE,\n    '/service/create?trackingId=dummyTrackingId',\n  );\n});\n\ntest('navigateToRecipeStart should reveal and postMessage to webview', async () => {\n  const registry = new NavigationRegistry(panelMock, rpcExtensionMock);\n\n  await registry.navigateToRecipeStart('dummyRecipeId', 'dummyTrackingId');\n\n  await vi.waitFor(() => {\n    expect(panelMock.reveal).toHaveBeenCalledOnce();\n  });\n\n  expect(rpcExtensionMock.fire).toHaveBeenCalledWith(\n    MSG_NAVIGATION_ROUTE_UPDATE,\n    '/recipe/dummyRecipeId/start?trackingId=dummyTrackingId',\n  );\n});\n\ntest('reading the route has side-effect', async () => {\n  const registry = new NavigationRegistry(panelMock, rpcExtensionMock);\n\n  await registry.navigateToRecipeStart('dummyRecipeId', 'dummyTrackingId');\n\n  expect(registry.readRoute()).toBeDefined();\n  expect(registry.readRoute()).toBeUndefined();\n});\n"
  },
  {
    "path": "packages/backend/src/registries/NavigationRegistry.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { type Disposable, navigation, type WebviewPanel, commands } from '@podman-desktop/api';\nimport { MSG_NAVIGATION_ROUTE_UPDATE } from '@shared/Messages';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nexport const RECIPE_START_ROUTE = 'recipe.start';\nexport const RECIPE_START_NAVIGATE_COMMAND = 'ai-lab.navigation.recipe.start';\n\nexport const INFERENCE_CREATE_ROUTE = 'inference.create';\nexport const INFERENCE_CREATE_NAVIGATE_COMMAND = 'ai-lab.navigation.inference.create';\n\nexport class NavigationRegistry implements Disposable {\n  #disposables: Disposable[] = [];\n  #route: string | undefined = undefined;\n\n  constructor(\n    private panel: WebviewPanel,\n    private rpcExtension: RpcExtension,\n  ) {}\n\n  init(): void {\n    if (!navigation.register) {\n      console.warn('this version of podman-desktop do not support task actions: some feature will not be available.');\n      return;\n    }\n\n    // register the recipes start navigation and command\n    this.#disposables.push(\n      commands.registerCommand(RECIPE_START_NAVIGATE_COMMAND, this.navigateToRecipeStart.bind(this)),\n    );\n    this.#disposables.push(navigation.register(RECIPE_START_ROUTE, RECIPE_START_NAVIGATE_COMMAND));\n\n    // register the inference create navigation and command\n    this.#disposables.push(\n      commands.registerCommand(INFERENCE_CREATE_NAVIGATE_COMMAND, this.navigateToInferenceCreate.bind(this)),\n    );\n    this.#disposables.push(navigation.register(INFERENCE_CREATE_ROUTE, INFERENCE_CREATE_NAVIGATE_COMMAND));\n  }\n\n  /**\n   * This function return the route, and reset it.\n   * Meaning after read the route is undefined\n   */\n  public readRoute(): string | undefined {\n    const result: string | undefined = this.#route;\n    this.#route = undefined;\n    return result;\n  }\n\n  dispose(): void {\n    this.#disposables.forEach(disposable => disposable.dispose());\n  }\n\n  protected async updateRoute(route: string): Promise<void> {\n    await this.rpcExtension.fire(MSG_NAVIGATION_ROUTE_UPDATE, route);\n    this.#route = route;\n    this.panel.reveal();\n  }\n\n  public async navigateToRecipeStart(recipeId: string, trackingId: string): Promise<void> {\n    return this.updateRoute(`/recipe/${recipeId}/start?trackingId=${trackingId}`);\n  }\n\n  public async navigateToInferenceCreate(trackingId: string): Promise<void> {\n    return this.updateRoute(`/service/create?trackingId=${trackingId}`);\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/registries/TaskRegistry.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { TaskRegistry } from './TaskRegistry';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nconst rpcExtension = {\n  fire: vi.fn(),\n} as unknown as RpcExtension;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(rpcExtension.fire).mockResolvedValue(true);\n});\n\ntest('should not have any tasks by default', () => {\n  const taskRegistry = new TaskRegistry(rpcExtension);\n  expect(taskRegistry.getTasks().length).toBe(0);\n});\n\ntest('dispose should cleanup all tasks', () => {\n  const taskRegistry = new TaskRegistry(rpcExtension);\n\n  taskRegistry.createTask('random', 'loading');\n  expect(taskRegistry.getTasks()).toHaveLength(1);\n\n  taskRegistry.dispose();\n  expect(taskRegistry.getTasks()).toHaveLength(0);\n});\n\ntest('should notify when create task', () => {\n  const taskRegistry = new TaskRegistry(rpcExtension);\n  taskRegistry.createTask('random', 'loading');\n  expect(rpcExtension.fire).toHaveBeenCalled();\n});\n\ntest('should notify when update task', () => {\n  const taskRegistry = new TaskRegistry(rpcExtension);\n  const task = taskRegistry.createTask('random', 'loading');\n  taskRegistry.updateTask(task);\n\n  expect(rpcExtension.fire).toHaveBeenCalledTimes(2);\n});\n\ntest('should get tasks by label', () => {\n  const taskRegistry = new TaskRegistry(rpcExtension);\n\n  taskRegistry.createTask('random-1', 'loading', { index: '1' });\n  taskRegistry.createTask('random-2', 'loading', { index: '2' });\n\n  const tasksWithIndex1 = taskRegistry.getTasksByLabels({ index: '1' });\n  const tasksWithIndex2 = taskRegistry.getTasksByLabels({ index: '2' });\n\n  expect(tasksWithIndex1.length).toBe(1);\n  expect(tasksWithIndex2.length).toBe(1);\n  expect(tasksWithIndex1[0].name).toBe('random-1');\n  expect(tasksWithIndex2[0].name).toBe('random-2');\n});\n\ntest('should delete tasks by label', () => {\n  const taskRegistry = new TaskRegistry(rpcExtension);\n\n  taskRegistry.createTask('random-1', 'loading', { index: '1' });\n  taskRegistry.createTask('random-2', 'loading', { index: '2' });\n\n  taskRegistry.deleteByLabels({ index: '1' });\n\n  expect(taskRegistry.getTasks().length).toBe(1);\n  expect(taskRegistry.getTasks()[0].name).toBe('random-2');\n});\n\ntest('should get tasks by multiple labels', () => {\n  const taskRegistry = new TaskRegistry(rpcExtension);\n\n  taskRegistry.createTask('task-1', 'loading', { type: 'A', priority: 'high' });\n  taskRegistry.createTask('task-2', 'loading', { type: 'B', priority: 'low' });\n  taskRegistry.createTask('task-3', 'loading', { type: 'A', priority: 'medium' });\n\n  const tasksWithTypeA = taskRegistry.getTasksByLabels({ type: 'A' });\n  const tasksWithHighPriority = taskRegistry.getTasksByLabels({ priority: 'high' });\n  const tasksWithTypeAAndHighPriority = taskRegistry.getTasksByLabels({ type: 'A', priority: 'high' });\n\n  expect(tasksWithTypeA.length).toBe(2);\n  expect(tasksWithHighPriority.length).toBe(1);\n  expect(tasksWithTypeAAndHighPriority.length).toBe(1);\n  expect(tasksWithTypeAAndHighPriority[0].name).toBe('task-1');\n});\n"
  },
  {
    "path": "packages/backend/src/registries/TaskRegistry.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { type Disposable } from '@podman-desktop/api';\nimport type { Task, TaskState } from '@shared/models/ITask';\nimport { MSG_TASKS_UPDATE } from '@shared/Messages';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\n/**\n * A registry for managing tasks.\n */\nexport class TaskRegistry implements Disposable {\n  private counter: number = 0;\n  private tasks: Map<string, Task> = new Map<string, Task>();\n\n  /**\n   * Constructs a new TaskRegistry.\n   * @param rpcExtension The rpc extension instance to use for communication.\n   */\n  constructor(private rpcExtension: RpcExtension) {}\n\n  dispose(): void {\n    this.counter = 0;\n    this.tasks.clear();\n  }\n\n  /**\n   * Retrieves a task by its ID.\n   * @param id The ID of the task to retrieve.\n   * @returns The task with the specified ID, or undefined if not found.\n   */\n  get(id: string): Task | undefined {\n    if (this.tasks.has(id)) return this.tasks.get(id);\n    return undefined;\n  }\n\n  /**\n   * Creates a new task.\n   * @param name The name of the task.\n   * @param state The initial state of the task.\n   * @param labels Optional labels for the task.\n   * @returns The newly created task.\n   */\n  createTask(name: string, state: TaskState, labels: { [id: string]: string } = {}): Task {\n    const task = {\n      id: `task-${++this.counter}`,\n      name: name,\n      state: state,\n      labels: labels,\n    };\n    this.tasks.set(task.id, task);\n    this.notify();\n    return task;\n  }\n\n  /**\n   * Updates an existing task.\n   * @param task The task to update.\n   * @throws Error if the task with the specified ID does not exist.\n   */\n  updateTask(task: Task): void {\n    if (!this.tasks.has(task.id)) throw new Error(`Task with id ${task.id} does not exist.`);\n    this.tasks.set(task.id, {\n      ...task,\n      state: task.error !== undefined ? 'error' : task.state, // enforce error state when error is defined\n    });\n    this.notify();\n  }\n\n  /**\n   * Deletes a task by its ID.\n   * @param taskId The ID of the task to delete.\n   */\n  delete(taskId: string): void {\n    this.deleteAll([taskId]);\n  }\n\n  /**\n   * Deletes multiple tasks by their IDs.\n   * @param taskIds The IDs of the tasks to delete.\n   */\n  deleteAll(taskIds: string[]): void {\n    taskIds.forEach(taskId => this.tasks.delete(taskId));\n    this.notify();\n  }\n\n  /**\n   * Retrieves all tasks.\n   * @returns An array of all tasks.\n   */\n  getTasks(): Task[] {\n    return Array.from(this.tasks.values());\n  }\n\n  /**\n   * Retrieves tasks that match the specified labels.\n   * @param requestedLabels The labels to match against.\n   * @returns An array of tasks that match the specified labels.\n   */\n  getTasksByLabels(requestedLabels: { [key: string]: string }): Task[] {\n    return this.getTasks().filter(task => this.filter(task, requestedLabels));\n  }\n\n  /**\n   * Return the first task matching all the labels provided\n   * @param requestedLabels\n   */\n  findTaskByLabels(requestedLabels: { [key: string]: string }): Task | undefined {\n    return this.getTasks().find(task => this.filter(task, requestedLabels));\n  }\n\n  private filter(task: Task, requestedLabels: { [key: string]: string }): boolean {\n    const labels = task.labels;\n    if (labels === undefined) return false;\n\n    for (const [key, value] of Object.entries(requestedLabels)) {\n      if (!(key in labels) || labels[key] !== value) return false;\n    }\n\n    return true;\n  }\n\n  /**\n   * Deletes tasks that match the specified labels.\n   * @param labels The labels to match against for deletion.\n   */\n  deleteByLabels(labels: { [key: string]: string }): void {\n    this.deleteAll(this.getTasksByLabels(labels).map(task => task.id));\n  }\n\n  private notify(): void {\n    this.rpcExtension.fire(MSG_TASKS_UPDATE, this.getTasks()).catch((err: unknown) => {\n      console.error('error notifying tasks', err);\n    });\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/studio-api-impl.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\n/* eslint-disable @typescript-eslint/no-explicit-any */\n\nimport { beforeEach, expect, test, vi, describe } from 'vitest';\nimport content from './tests/ai-test.json';\nimport type { ApplicationManager } from './managers/application/applicationManager';\nimport { StudioApiImpl } from './studio-api-impl';\nimport type { InferenceManager } from './managers/inference/inferenceManager';\nimport type { ContainerProviderConnection, ProviderContainerConnection, TelemetryLogger } from '@podman-desktop/api';\nimport { window, EventEmitter, navigation } from '@podman-desktop/api';\nimport { CatalogManager } from './managers/catalogManager';\nimport type { ModelsManager } from './managers/modelsManager';\nimport { timeout } from './utils/utils';\nimport type { TaskRegistry } from './registries/TaskRegistry';\nimport { LocalRepositoryRegistry } from './registries/LocalRepositoryRegistry';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport type { PlaygroundV2Manager } from './managers/playgroundV2Manager';\nimport type { SnippetManager } from './managers/SnippetManager';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { CancellationTokenRegistry } from './registries/CancellationTokenRegistry';\nimport path from 'node:path';\nimport type { LocalModelImportInfo } from '@shared/models/ILocalModelInfo';\nimport * as podman from './utils/podman';\nimport type { ConfigurationRegistry } from './registries/ConfigurationRegistry';\nimport type { RecipeManager } from './managers/recipes/RecipeManager';\nimport type { PodmanConnection } from './managers/podmanConnection';\nimport type { NavigationRegistry } from './registries/NavigationRegistry';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\n\nvi.mock('./ai.json', () => {\n  return {\n    default: content,\n  };\n});\n\nvi.mock('node:fs', () => {\n  return {\n    existsSync: vi.fn(),\n    promises: {\n      readFile: vi.fn(),\n    },\n  };\n});\n\nconst mocks = vi.hoisted(() => ({\n  withProgressMock: vi.fn(),\n  showWarningMessageMock: vi.fn(),\n  deleteApplicationMock: vi.fn(),\n  uriFileMock: vi.fn(),\n  openExternalMock: vi.fn(),\n}));\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    EventEmitter: vi.fn(),\n    window: {\n      withProgress: mocks.withProgressMock,\n      showWarningMessage: mocks.showWarningMessageMock,\n      showErrorMessage: vi.fn(),\n      showOpenDialog: vi.fn(),\n    },\n    ProgressLocation: {\n      TASK_WIDGET: 'TASK_WIDGET',\n    },\n    fs: {\n      createFileSystemWatcher: (): unknown => ({\n        onDidCreate: vi.fn(),\n        onDidDelete: vi.fn(),\n        onDidChange: vi.fn(),\n      }),\n    },\n    Uri: {\n      file: mocks.uriFileMock,\n    },\n    env: {\n      openExternal: mocks.openExternalMock,\n    },\n    navigation: {\n      navigateToResources: vi.fn(),\n      navigateToEditProviderContainerConnection: vi.fn(),\n    },\n  };\n});\n\nlet studioApiImpl: StudioApiImpl;\nlet catalogManager: CatalogManager;\nlet localRepositoryRegistry: LocalRepositoryRegistry;\nlet applicationManager: ApplicationManager;\n\nconst podmanConnectionMock: PodmanConnection = {\n  findRunningContainerProviderConnection: vi.fn(),\n} as unknown as PodmanConnection;\n\nbeforeEach(async () => {\n  vi.resetAllMocks();\n\n  const appUserDirectory = '.';\n\n  // Creating CatalogManager\n  catalogManager = new CatalogManager(\n    {\n      fire: vi.fn().mockResolvedValue(true),\n    } as unknown as RpcExtension,\n    appUserDirectory,\n  );\n\n  applicationManager = {\n    removeApplication: mocks.deleteApplicationMock,\n    requestPullApplication: vi.fn(),\n  } as unknown as ApplicationManager;\n\n  localRepositoryRegistry = new LocalRepositoryRegistry(\n    {\n      fire: vi.fn().mockResolvedValue(true),\n    } as unknown as RpcExtension,\n    appUserDirectory,\n    {} as unknown as CatalogManager,\n  );\n\n  const telemetryMock = {\n    logUsage: vi.fn(),\n    logError: vi.fn(),\n  } as unknown as TelemetryLogger;\n\n  // Creating StudioApiImpl\n  studioApiImpl = new StudioApiImpl(\n    applicationManager,\n    catalogManager,\n    {} as ModelsManager,\n    telemetryMock,\n    localRepositoryRegistry,\n    {} as unknown as TaskRegistry,\n    {} as unknown as InferenceManager,\n    {} as unknown as PlaygroundV2Manager,\n    {} as unknown as SnippetManager,\n    {} as unknown as CancellationTokenRegistry,\n    {} as unknown as ConfigurationRegistry,\n    {} as unknown as RecipeManager,\n    podmanConnectionMock,\n    {} as unknown as NavigationRegistry,\n  );\n  vi.mock('node:fs');\n\n  const listeners: ((value: unknown) => void)[] = [];\n\n  vi.mocked(EventEmitter).mockReturnValue({\n    event: vi.fn().mockImplementation(callback => {\n      listeners.push(callback);\n    }),\n    fire: vi.fn().mockImplementation((content: unknown) => {\n      listeners.forEach(listener => listener(content));\n    }),\n  } as unknown as EventEmitter<unknown>);\n});\n\ndescribe.each([true, false])('with model is %o', withModel => {\n  test('expect requestPullApplication to provide a tracking id', async () => {\n    const connectionMock = {\n      name: 'Podman machine',\n    } as unknown as ContainerProviderConnection;\n    vi.mocked(podmanConnectionMock.findRunningContainerProviderConnection).mockReturnValue(connectionMock);\n    vi.spyOn(catalogManager, 'getRecipes').mockReturnValue([\n      {\n        id: 'recipe 1',\n      } as unknown as Recipe,\n    ]);\n    vi.spyOn(catalogManager, 'getModelById').mockReturnValue({\n      id: 'model 1',\n    } as unknown as ModelInfo);\n\n    vi.mocked(applicationManager.requestPullApplication).mockResolvedValue('dummy-tracker');\n\n    const recipeId = 'recipe 1';\n    let modelId: string | undefined;\n    if (withModel) {\n      modelId = 'model1';\n    }\n    const trackingId = await studioApiImpl.requestPullApplication(withModel ? { recipeId, modelId } : { recipeId });\n    expect(applicationManager.requestPullApplication).toHaveBeenCalledWith({\n      connection: connectionMock,\n      recipe: expect.objectContaining({\n        id: 'recipe 1',\n      }),\n      model: withModel\n        ? expect.objectContaining({\n            id: 'model 1',\n          })\n        : undefined,\n    });\n    expect(trackingId).toBe('dummy-tracker');\n  });\n});\n\ntest('requestRemoveApplication should ask confirmation', async () => {\n  vi.spyOn(catalogManager, 'getRecipeById').mockReturnValue({\n    name: 'Recipe 1',\n  } as unknown as Recipe);\n  mocks.showWarningMessageMock.mockResolvedValue('Confirm');\n  await studioApiImpl.requestRemoveApplication('recipe-id-1', 'model-id-1');\n  await timeout(0);\n  expect(mocks.deleteApplicationMock).toHaveBeenCalled();\n});\n\ntest('requestDeleteLocalRepository should ask confirmation', async () => {\n  mocks.showWarningMessageMock.mockResolvedValue('Confirm');\n  const deleteLocalRepositoryMock = vi.spyOn(localRepositoryRegistry, 'deleteLocalRepository').mockResolvedValue();\n  await studioApiImpl.requestDeleteLocalRepository('path');\n  await timeout(0);\n  expect(deleteLocalRepositoryMock).toHaveBeenCalled();\n});\n\ntest('if requestDeleteLocalRepository fails an errorMessage should show up', async () => {\n  mocks.showWarningMessageMock.mockResolvedValue('Confirm');\n  const deleteLocalRepositoryMock = vi\n    .spyOn(localRepositoryRegistry, 'deleteLocalRepository')\n    .mockRejectedValue('error deleting');\n  const errorMessageMock = vi.spyOn(window, 'showErrorMessage').mockResolvedValue('');\n  await studioApiImpl.requestDeleteLocalRepository('path');\n  await timeout(0);\n  expect(deleteLocalRepositoryMock).toHaveBeenCalled();\n  expect(errorMessageMock).toBeCalledWith('Error deleting local path \"path\". Error: error deleting');\n});\n\ndescribe.each([{ os: 'windows' }, { os: 'linux' }, { os: 'macos' }])('verify openVSCode', ({ os }) => {\n  test(`check openVSCode generates the correct URL on ${os}`, async () => {\n    vi.mock('node:path');\n    vi.spyOn(path, 'isAbsolute').mockReturnValue(true);\n    vi.spyOn(path, 'normalize').mockImplementation((path: string) => {\n      return path;\n    });\n    const folder = os === 'windows' ? 'C:\\\\\\\\Users\\\\\\\\podman-desktop\\\\\\\\work' : '/home/podman-desktop/work';\n\n    mocks.uriFileMock.mockImplementation((path: string) => {\n      return {\n        path: path,\n        with: (change?: {\n          scheme?: string;\n          authority?: string;\n          path?: string;\n          query?: string;\n          fragment?: string;\n        }): unknown => {\n          return {\n            path: path,\n            ...change,\n          };\n        },\n      };\n    });\n\n    mocks.openExternalMock.mockResolvedValue(true);\n    await studioApiImpl.openVSCode(folder);\n    expect(mocks.openExternalMock).toHaveBeenCalledWith(\n      expect.objectContaining({ path: expect.stringMatching(/^\\//), authority: 'file', scheme: 'vscode' }),\n    );\n  });\n});\n\ntest('openDialog should call podmanDesktopAPi showOpenDialog', async () => {\n  const openDialogMock = vi.spyOn(window, 'showOpenDialog');\n  await studioApiImpl.openDialog({\n    title: 'title',\n  });\n  expect(openDialogMock).toBeCalledWith({\n    title: 'title',\n  });\n});\n\ntest('importModels should call catalogManager', async () => {\n  const addLocalModelsMock = vi\n    .spyOn(catalogManager, 'importUserModels')\n    .mockImplementation((_models: LocalModelImportInfo[]) => Promise.resolve());\n  const models: LocalModelImportInfo[] = [\n    {\n      name: 'name',\n      path: 'path',\n    },\n    {\n      name: 'name1',\n      path: 'path1',\n    },\n  ];\n  await studioApiImpl.importModels(models);\n  expect(addLocalModelsMock).toBeCalledWith(models);\n});\n\ndescribe('validateLocalModel', () => {\n  test('Expect validateLocalModel to complete as path is valid', async () => {\n    vi.mock('node:path');\n    vi.spyOn(path, 'resolve').mockImplementation((path: string) => {\n      return path;\n    });\n    vi.spyOn(path, 'join').mockImplementation((path1: string, path2: string) => `${path1}/${path2}`);\n    vi.spyOn(studioApiImpl, 'getModelsInfo').mockResolvedValue([\n      {\n        id: 'model',\n        file: {\n          path: 'path1',\n          file: 'file.gguf',\n        },\n      } as unknown as ModelInfo,\n    ]);\n    await studioApiImpl.validateLocalModel({\n      path: 'path',\n      name: 'file.gguf',\n    });\n  });\n\n  test('Expect validateLocalModel to raise an error as path is valid', async () => {\n    vi.mock('node:path');\n    vi.spyOn(path, 'resolve').mockImplementation((path: string) => {\n      return path;\n    });\n    vi.spyOn(path, 'dirname').mockReturnValue('path');\n    vi.spyOn(path, 'basename').mockReturnValue('file.gguf');\n    vi.spyOn(path, 'join').mockImplementation((path1: string, path2: string) => `${path1}/${path2}`);\n    vi.spyOn(studioApiImpl, 'getModelsInfo').mockResolvedValue([\n      {\n        id: 'model',\n        file: {\n          path: 'path',\n          file: 'file.gguf',\n        },\n      } as unknown as ModelInfo,\n    ]);\n    await expect(\n      studioApiImpl.validateLocalModel({\n        path: 'path/file.gguf',\n        name: 'file',\n      }),\n    ).rejects.toThrowError('file already imported');\n  });\n});\n\ntest('navigateToResources should call navigation.navigateToResources', async () => {\n  const navigationSpy = vi.spyOn(navigation, 'navigateToResources');\n  await studioApiImpl.navigateToResources();\n  await timeout(0);\n  expect(navigationSpy).toHaveBeenCalled();\n});\n\ntest('navigateToEditConnectionProvider should call navigation.navigateToEditProviderContainerConnection', async () => {\n  const connection: ProviderContainerConnection = {\n    providerId: 'id',\n    connection: {\n      endpoint: {\n        socketPath: '/path',\n      },\n      name: 'name',\n      type: 'podman',\n      status: vi.fn(),\n    },\n  };\n  vi.spyOn(podman, 'getPodmanConnection').mockReturnValue(connection);\n  const navigationSpy = vi.spyOn(navigation, 'navigateToEditProviderContainerConnection');\n  await studioApiImpl.navigateToEditConnectionProvider('connection');\n  await timeout(0);\n  expect(navigationSpy).toHaveBeenCalledWith(connection);\n});\n"
  },
  {
    "path": "packages/backend/src/studio-api-impl.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { StudioAPI } from '@shared/StudioAPI';\nimport type { ApplicationManager } from './managers/application/applicationManager';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport * as podmanDesktopApi from '@podman-desktop/api';\n\nimport type { CatalogManager } from './managers/catalogManager';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\nimport type { ModelsManager } from './managers/modelsManager';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\nimport type { Task } from '@shared/models/ITask';\nimport type { TaskRegistry } from './registries/TaskRegistry';\nimport type { LocalRepository } from '@shared/models/ILocalRepository';\nimport type { LocalRepositoryRegistry } from './registries/LocalRepositoryRegistry';\nimport path from 'node:path';\nimport type { InferenceServer, InferenceType } from '@shared/models/IInference';\nimport type { CreationInferenceServerOptions } from '@shared/models/InferenceServerConfig';\nimport type { InferenceManager } from './managers/inference/inferenceManager';\nimport type { Conversation } from '@shared/models/IPlaygroundMessage';\nimport type { PlaygroundV2Manager } from './managers/playgroundV2Manager';\nimport { getFreeRandomPort } from './utils/ports';\nimport { withDefaultConfiguration } from './utils/inferenceUtils';\nimport type { RequestOptions } from '@shared/models/RequestOptions';\nimport type { SnippetManager } from './managers/SnippetManager';\nimport type { Language } from 'postman-code-generators';\nimport type { ModelOptions } from '@shared/models/IModelOptions';\nimport type { CancellationTokenRegistry } from './registries/CancellationTokenRegistry';\nimport type { LocalModelImportInfo } from '@shared/models/ILocalModelInfo';\nimport { getPodmanConnection } from './utils/podman';\nimport type {\n  CheckContainerConnectionResourcesOptions,\n  ContainerConnectionInfo,\n  ContainerProviderConnectionInfo,\n} from '@shared/models/IContainerConnectionInfo';\nimport type { ExtensionConfiguration } from '@shared/models/IExtensionConfiguration';\nimport type { ConfigurationRegistry } from './registries/ConfigurationRegistry';\nimport type { RecipeManager } from './managers/recipes/RecipeManager';\nimport type { PodmanConnection } from './managers/podmanConnection';\nimport { isRecipePullOptionsWithModelInference, type RecipePullOptions } from '@shared/models/IRecipe';\nimport type { ContainerProviderConnection } from '@podman-desktop/api';\nimport type { NavigationRegistry } from './registries/NavigationRegistry';\nimport type { FilterRecipesResult, RecipeFilters } from '@shared/models/FilterRecipesResult';\nimport type { ApplicationOptions } from './models/ApplicationOptions';\n\ninterface PortQuickPickItem extends podmanDesktopApi.QuickPickItem {\n  port: number;\n}\n\nexport class StudioApiImpl implements StudioAPI {\n  constructor(\n    private applicationManager: ApplicationManager,\n    private catalogManager: CatalogManager,\n    private modelsManager: ModelsManager,\n    private telemetry: podmanDesktopApi.TelemetryLogger,\n    private localRepositories: LocalRepositoryRegistry,\n    private taskRegistry: TaskRegistry,\n    private inferenceManager: InferenceManager,\n    private playgroundV2: PlaygroundV2Manager,\n    private snippetManager: SnippetManager,\n    private cancellationTokenRegistry: CancellationTokenRegistry,\n    private configurationRegistry: ConfigurationRegistry,\n    private recipeManager: RecipeManager,\n    private podmanConnection: PodmanConnection,\n    private navigationRegistry: NavigationRegistry,\n  ) {}\n\n  async readRoute(): Promise<string | undefined> {\n    return this.navigationRegistry.readRoute();\n  }\n\n  async requestDeleteConversation(conversationId: string): Promise<void> {\n    // Do not wait on the promise as the api would probably timeout before the user answer.\n    podmanDesktopApi.window\n      .showWarningMessage(`Are you sure you want to delete this playground ?`, 'Confirm', 'Cancel')\n      .then((result: string | undefined) => {\n        if (result === 'Confirm') {\n          this.playgroundV2.deleteConversation(conversationId);\n        }\n      })\n      .catch((err: unknown) => {\n        console.error(`Something went wrong with confirmation modals`, err);\n      });\n  }\n\n  async requestCreatePlayground(name: string, model: ModelInfo): Promise<string> {\n    try {\n      return await this.playgroundV2.requestCreatePlayground(name, model);\n    } catch (err: unknown) {\n      console.error('Something went wrong while trying to create playground environment', err);\n      throw err;\n    }\n  }\n\n  submitPlaygroundMessage(containerId: string, userInput: string, options?: ModelOptions): Promise<number> {\n    return this.playgroundV2.submit(containerId, userInput, options);\n  }\n\n  async setPlaygroundSystemPrompt(conversationId: string, content: string | undefined): Promise<void> {\n    this.playgroundV2.setSystemPrompt(conversationId, content);\n  }\n\n  async getPlaygroundConversations(): Promise<Conversation[]> {\n    return this.playgroundV2.getConversations();\n  }\n\n  async getExtensionConfiguration(): Promise<ExtensionConfiguration> {\n    return this.configurationRegistry.getExtensionConfiguration();\n  }\n\n  async getPodmanDesktopVersion(): Promise<string> {\n    return this.configurationRegistry.getPodmanDesktopVersion();\n  }\n\n  async updateExtensionConfiguration(update: Partial<ExtensionConfiguration>): Promise<void> {\n    return this.configurationRegistry.updateExtensionConfiguration(update);\n  }\n\n  async getSnippetLanguages(): Promise<Language[]> {\n    return this.snippetManager.getLanguageList();\n  }\n\n  createSnippet(options: RequestOptions, language: string, variant: string): Promise<string> {\n    return this.snippetManager.generate(options, language, variant);\n  }\n\n  async getInferenceServers(): Promise<InferenceServer[]> {\n    return this.inferenceManager.getServers();\n  }\n\n  async getRegisteredProviders(): Promise<InferenceType[]> {\n    return this.inferenceManager.getRegisteredProviders();\n  }\n\n  async requestDeleteInferenceServer(...containerIds: string[]): Promise<void> {\n    // Do not wait on the promise as the api would probably timeout before the user answer.\n    if (containerIds.length === 0) throw new Error('At least one container id should be provided.');\n\n    let dialogMessage: string;\n    if (containerIds.length === 1) {\n      dialogMessage = `Are you sure you want to delete this service ?`;\n    } else {\n      dialogMessage = `Are you sure you want to delete those ${containerIds.length} services ?`;\n    }\n\n    podmanDesktopApi.window\n      .showWarningMessage(dialogMessage, 'Confirm', 'Cancel')\n      .then((result: string | undefined) => {\n        if (result !== 'Confirm') return;\n\n        Promise.all(containerIds.map(containerId => this.inferenceManager.deleteInferenceServer(containerId))).catch(\n          (err: unknown) => {\n            console.error('Something went wrong while trying to delete the inference server', err);\n          },\n        );\n      })\n      .catch((err: unknown) => {\n        console.error(`Something went wrong with confirmation modals`, err);\n      });\n  }\n\n  async requestCreateInferenceServer(options: CreationInferenceServerOptions): Promise<string> {\n    try {\n      const config = await withDefaultConfiguration(options);\n      return this.inferenceManager.requestCreateInferenceServer(config);\n    } catch (err: unknown) {\n      console.error('Something went wrong while trying to start inference server', err);\n      throw err;\n    }\n  }\n\n  startInferenceServer(containerId: string): Promise<void> {\n    return this.inferenceManager.startInferenceServer(containerId);\n  }\n\n  stopInferenceServer(containerId: string): Promise<void> {\n    return this.inferenceManager.stopInferenceServer(containerId);\n  }\n\n  async ping(): Promise<string> {\n    return 'pong';\n  }\n\n  async openURL(url: string): Promise<boolean> {\n    return await podmanDesktopApi.env.openExternal(podmanDesktopApi.Uri.parse(url));\n  }\n\n  async openFile(file: string, recipeId?: string): Promise<boolean> {\n    const telemetry: Record<string, unknown> = {\n      'recipe.id': recipeId,\n    };\n    try {\n      return await podmanDesktopApi.env.openExternal(podmanDesktopApi.Uri.file(file));\n    } catch (err) {\n      telemetry['errorMessage'] = String(err);\n      throw err;\n    } finally {\n      this.telemetry.logUsage('studio.open-file', telemetry);\n    }\n  }\n\n  async openDialog(options?: podmanDesktopApi.OpenDialogOptions): Promise<podmanDesktopApi.Uri[] | undefined> {\n    return await podmanDesktopApi.window.showOpenDialog(options);\n  }\n\n  async cloneApplication(recipeId: string): Promise<void> {\n    const recipe = this.catalogManager.getRecipes().find(recipe => recipe.id === recipeId);\n    if (!recipe) throw new Error(`recipe with if ${recipeId} not found`);\n\n    return this.recipeManager.cloneRecipe(recipe);\n  }\n\n  async getContainerProviderConnection(): Promise<ContainerProviderConnectionInfo[]> {\n    return this.podmanConnection.getContainerProviderConnectionInfo();\n  }\n\n  async requestPullApplication(options: RecipePullOptions): Promise<string> {\n    const recipe = this.catalogManager.getRecipes().find(recipe => recipe.id === options.recipeId);\n    if (!recipe) throw new Error(`recipe with if ${options.recipeId} not found`);\n\n    let connection: ContainerProviderConnection | undefined = undefined;\n    if (options.connection) {\n      connection = this.podmanConnection.getContainerProviderConnection(options.connection);\n    } else {\n      connection = this.podmanConnection.findRunningContainerProviderConnection();\n    }\n\n    if (!connection) throw new Error('no running container provider connection found.');\n\n    let model: ModelInfo | undefined;\n    let opts: ApplicationOptions;\n    if (isRecipePullOptionsWithModelInference(options)) {\n      model = this.catalogManager.getModelById(options.modelId);\n      opts = {\n        connection,\n        recipe,\n        dependencies: options.dependencies,\n        model,\n      };\n    } else {\n      opts = {\n        connection,\n        recipe,\n        dependencies: options.dependencies,\n      };\n    }\n\n    return this.applicationManager.requestPullApplication(opts);\n  }\n\n  async getModelsInfo(): Promise<ModelInfo[]> {\n    return this.modelsManager.getModelsInfo();\n  }\n\n  getModelMetadata(modelId: string): Promise<Record<string, unknown>> {\n    return this.modelsManager.getModelMetadata(modelId);\n  }\n\n  async getCatalog(): Promise<ApplicationCatalog> {\n    return this.catalogManager.getCatalog();\n  }\n\n  async filterRecipes(filters: RecipeFilters): Promise<FilterRecipesResult> {\n    return this.catalogManager.filterRecipes(filters);\n  }\n\n  async requestRemoveLocalModel(modelId: string): Promise<void> {\n    const modelInfo = this.modelsManager.getLocalModelInfo(modelId);\n\n    // Do not wait on the promise as the api would probably timeout before the user answer.\n    podmanDesktopApi.window\n      .showWarningMessage(\n        `Are you sure you want to delete ${modelId} ? The following files will be removed from disk \"${modelInfo.file}\".`,\n        'Confirm',\n        'Cancel',\n      )\n      .then((result: string | undefined) => {\n        if (result === 'Confirm') {\n          this.modelsManager.deleteModel(modelId).catch((err: unknown) => {\n            console.error('Something went wrong while deleting the models', err);\n            // Lets reloads the models (could fix the issue)\n            this.modelsManager.loadLocalModels().catch((err: unknown) => {\n              console.error('Cannot reload the models', err);\n            });\n          });\n        }\n      })\n      .catch((err: unknown) => {\n        console.error(`Something went wrong with confirmation modals`, err);\n      });\n  }\n\n  navigateToContainer(containerId: string): Promise<void> {\n    return podmanDesktopApi.navigation.navigateToContainer(containerId);\n  }\n\n  async navigateToPod(podId: string): Promise<void> {\n    const pods = await podmanDesktopApi.containerEngine.listPods();\n    const pod = pods.find(pod => pod.Id === podId);\n    if (pod === undefined) throw new Error(`Pod with id ${podId} not found.`);\n    return podmanDesktopApi.navigation.navigateToPod(pod.kind, pod.Name, pod.engineId);\n  }\n\n  async navigateToResources(): Promise<void> {\n    // navigateToResources is only available from desktop 1.10\n    if (podmanDesktopApi.navigation.navigateToResources) {\n      return podmanDesktopApi.navigation.navigateToResources();\n    }\n  }\n\n  async navigateToEditConnectionProvider(connectionName: string): Promise<void> {\n    // navigateToEditProviderContainerConnection is only available from desktop 1.10\n    if (podmanDesktopApi.navigation.navigateToEditProviderContainerConnection) {\n      const connection = getPodmanConnection(connectionName);\n      return podmanDesktopApi.navigation.navigateToEditProviderContainerConnection(connection);\n    }\n  }\n\n  async getApplicationsState(): Promise<ApplicationState[]> {\n    return this.applicationManager.getApplicationsState();\n  }\n\n  async requestStartApplication(recipeId: string, modelId: string): Promise<void> {\n    this.applicationManager.startApplication(recipeId, modelId).catch((err: unknown) => {\n      console.error('Something went wrong while trying to start application', err);\n    });\n  }\n\n  async requestStopApplication(recipeId: string, modelId: string): Promise<void> {\n    this.applicationManager.stopApplication(recipeId, modelId).catch((err: unknown) => {\n      console.error('Something went wrong while trying to stop application', err);\n    });\n  }\n\n  async requestRemoveApplication(recipeId: string, modelId: string): Promise<void> {\n    const recipe = this.catalogManager.getRecipeById(recipeId);\n    // Do not wait on the promise as the api would probably timeout before the user answer.\n    podmanDesktopApi.window\n      .showWarningMessage(\n        `Delete the AI App \"${recipe.name}\"? This will delete the containers running the application and model.`,\n        'Confirm',\n        'Cancel',\n      )\n      .then((result: string | undefined) => {\n        if (result === 'Confirm') {\n          this.applicationManager.removeApplication(recipeId, modelId).catch((err: unknown) => {\n            console.error(`error deleting AI App's pod: ${String(err)}`);\n            podmanDesktopApi.window\n              .showErrorMessage(\n                `Error deleting the AI App \"${recipe.name}\". You can try to stop and delete the AI App's pod manually.`,\n              )\n              .catch((err: unknown) => {\n                console.error(`Something went wrong with confirmation modals`, err);\n              });\n          });\n        }\n      })\n      .catch((err: unknown) => {\n        console.error(`Something went wrong with confirmation modals`, err);\n      });\n  }\n\n  async requestRestartApplication(recipeId: string, modelId: string): Promise<void> {\n    const recipe = this.catalogManager.getRecipeById(recipeId);\n\n    // get the state of the application\n    const state = this.applicationManager\n      .getApplicationsState()\n      .find(state => state.recipeId === recipeId && state.modelId === modelId);\n    if (!state) throw new Error('application is not running.');\n\n    // get the corresponding connection\n    const connection = await this.podmanConnection.getConnectionByEngineId(state.pod.engineId);\n\n    // Do not wait on the promise as the api would probably timeout before the user answer.\n    podmanDesktopApi.window\n      .showWarningMessage(\n        `Restart the AI App \"${recipe.name}\"? This will delete the containers running the application and model, rebuild the images with the current sources, and restart the containers.`,\n        'Confirm',\n        'Cancel',\n      )\n      .then((result: string | undefined) => {\n        if (result === 'Confirm') {\n          this.applicationManager.restartApplication(connection, recipeId, modelId).catch((err: unknown) => {\n            console.error(`error restarting AI App: ${String(err)}`);\n            podmanDesktopApi.window\n              .showErrorMessage(`Error restarting the AI App \"${recipe.name}\"`)\n              .catch((err: unknown) => {\n                console.error(`Something went wrong with confirmation modals`, err);\n              });\n          });\n        }\n      })\n      .catch((err: unknown) => {\n        console.error(`Something went wrong with confirmation modals`, err);\n      });\n  }\n\n  async requestOpenApplication(recipeId: string, modelId: string): Promise<void> {\n    const recipe = this.catalogManager.getRecipeById(recipeId);\n    this.applicationManager\n      .getApplicationPorts(recipeId, modelId)\n      .then((ports: number[]) => {\n        if (ports.length === 0) {\n          podmanDesktopApi.window\n            .showErrorMessage(`AI App ${recipe.name} has no application ports to open`)\n            .catch((err: unknown) => {\n              console.error(`Something went wrong with confirmation modals`, err);\n            });\n        } else if (ports.length === 1) {\n          const uri = `http://localhost:${ports[0]}`;\n          podmanDesktopApi.env.openExternal(podmanDesktopApi.Uri.parse(uri)).catch((err: unknown) => {\n            console.error(`Something went wrong while opening ${uri}`, err);\n          });\n        } else {\n          podmanDesktopApi.window\n            .showQuickPick(\n              ports.map(p => {\n                const item: PortQuickPickItem = { port: p, label: `${p}`, description: `Port ${p}` };\n                return item;\n              }),\n              { placeHolder: 'Select the port to open' },\n            )\n            .then((selectedPort: PortQuickPickItem | undefined) => {\n              if (!selectedPort) return;\n              const uri = `http://localhost:${selectedPort.port}`;\n              podmanDesktopApi.env.openExternal(podmanDesktopApi.Uri.parse(uri)).catch((err: unknown) => {\n                console.error(`Something went wrong while opening ${uri}`, err);\n              });\n            })\n            .catch((err: unknown) => {\n              console.error(`Something went wrong with confirmation modals`, err);\n            });\n        }\n      })\n      .catch((err: unknown) => {\n        console.error(`error opening AI App: ${String(err)}`);\n        podmanDesktopApi.window.showErrorMessage(`Error opening the AI App \"${recipe.name}\"`).catch((err: unknown) => {\n          console.error(`Something went wrong with confirmation modals`, err);\n        });\n      });\n  }\n\n  async telemetryLogUsage(eventName: string, data?: Record<string, unknown>): Promise<void> {\n    this.telemetry.logUsage(eventName, data);\n  }\n\n  async telemetryLogError(eventName: string, data?: Record<string, unknown>): Promise<void> {\n    this.telemetry.logError(eventName, data);\n  }\n\n  async getLocalRepositories(): Promise<LocalRepository[]> {\n    return this.localRepositories.getLocalRepositories();\n  }\n\n  async getTasks(): Promise<Task[]> {\n    return this.taskRegistry.getTasks();\n  }\n\n  async openVSCode(directory: string, recipeId?: string): Promise<void> {\n    const telemetry: Record<string, unknown> = {\n      'recipe.id': recipeId,\n    };\n\n    try {\n      if (!path.isAbsolute(directory)) {\n        throw new Error('Do not support relative directory.');\n      }\n\n      let unixPath: string = path.normalize(directory).replace(/[\\\\/]+/g, '/');\n      if (!unixPath.startsWith('/')) {\n        unixPath = `/${unixPath}`;\n      }\n\n      await podmanDesktopApi.env.openExternal(\n        podmanDesktopApi.Uri.file(unixPath).with({ scheme: 'vscode', authority: 'file' }),\n      );\n    } catch (err) {\n      telemetry['errorMessage'] = String(err);\n      console.error('Something went wrong while trying to open VSCode', err);\n      throw err;\n    } finally {\n      this.telemetry.logUsage('studio.open-vscode', telemetry);\n    }\n  }\n\n  async downloadModel(modelId: string): Promise<void> {\n    const modelInfo: ModelInfo = this.modelsManager.getModelInfo(modelId);\n\n    // Do not wait for the download task as it is too long.\n    this.modelsManager.requestDownloadModel(modelInfo).catch((err: unknown) => {\n      console.error(`Something went wrong while trying to download the model ${modelId}`, err);\n    });\n  }\n\n  getHostFreePort(): Promise<number> {\n    return getFreeRandomPort('0.0.0.0');\n  }\n\n  async requestDeleteLocalRepository(path: string): Promise<void> {\n    // Do not wait on the promise as the api would probably timeout before the user answer.\n    podmanDesktopApi.window\n      .showWarningMessage(`Delete permanently \"${path}\"?`, 'Confirm', 'Cancel')\n      .then((result: string | undefined) => {\n        if (result === 'Confirm') {\n          this.localRepositories.deleteLocalRepository(path).catch((err: unknown) => {\n            console.error(`error deleting path: ${String(err)}`);\n            podmanDesktopApi.window\n              .showErrorMessage(`Error deleting local path \"${path}\". Error: ${String(err)}`)\n              .catch((err: unknown) => {\n                console.error(`Something went wrong with confirmation modals`, err);\n              });\n          });\n        }\n      })\n      .catch((err: unknown) => {\n        console.error(`Something went wrong with confirmation modals`, err);\n      });\n  }\n\n  async requestCancelToken(tokenId: number): Promise<void> {\n    if (!this.cancellationTokenRegistry.hasCancellationTokenSource(tokenId))\n      throw new Error(`Cancellation token with id ${tokenId} does not exist.`);\n    this.cancellationTokenRegistry.getCancellationTokenSource(tokenId)?.cancel();\n  }\n\n  async importModels(models: LocalModelImportInfo[]): Promise<void> {\n    return this.catalogManager.importUserModels(models);\n  }\n\n  async validateLocalModel(model: LocalModelImportInfo): Promise<void> {\n    const catalogModels = await this.getModelsInfo();\n\n    for (const catalogModel of catalogModels) {\n      if (!catalogModel.file) {\n        continue;\n      }\n\n      if (catalogModel.file.path === path.dirname(model.path) && catalogModel.file.file === path.basename(model.path)) {\n        throw new Error('file already imported');\n      }\n    }\n  }\n\n  copyToClipboard(content: string): Promise<void> {\n    return podmanDesktopApi.env.clipboard.writeText(content);\n  }\n\n  async checkContainerConnectionStatusAndResources(\n    options: CheckContainerConnectionResourcesOptions,\n  ): Promise<ContainerConnectionInfo> {\n    return this.podmanConnection.checkContainerConnectionStatusAndResources(options);\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/studio.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\n/* eslint-disable @typescript-eslint/no-explicit-any */\n\nimport { afterEach, beforeEach, expect, test, vi, type MockInstance } from 'vitest';\nimport { Studio } from './studio';\nimport { type ExtensionContext, EventEmitter } from '@podman-desktop/api';\nimport { CatalogManager } from './managers/catalogManager';\n\nimport * as fs from 'node:fs';\n\nvi.mock('./managers/modelsManager');\nvi.mock('./managers/catalogManager');\n\nconst mockedExtensionContext = {\n  subscriptions: [],\n  storagePath: 'dummy-storage-path',\n} as unknown as ExtensionContext;\n\nconst studio = new Studio(mockedExtensionContext);\n\nconst mocks = vi.hoisted(() => ({\n  listContainers: vi.fn(),\n  getContainerConnections: vi.fn(),\n  postMessage: vi.fn(),\n  logErrorMock: vi.fn(),\n  consoleWarnMock: vi.fn(),\n  consoleLogMock: vi.fn(),\n}));\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    configuration: {\n      getConfiguration: (): unknown => ({\n        get: vi.fn(),\n      }),\n      onDidChangeConfiguration: vi.fn(),\n    },\n    fs: {\n      createFileSystemWatcher: (): unknown => ({\n        onDidCreate: vi.fn(),\n        onDidDelete: vi.fn(),\n        onDidChange: vi.fn(),\n      }),\n    },\n    EventEmitter: vi.fn(),\n    Uri: class {\n      static readonly joinPath = (): unknown => ({ fsPath: '.' });\n    },\n    window: {\n      createWebviewPanel: (): unknown => ({\n        webview: {\n          html: '',\n          onDidReceiveMessage: vi.fn(),\n          postMessage: mocks.postMessage,\n        },\n        onDidChangeViewState: vi.fn(),\n      }),\n      createStatusBarItem: (): unknown => ({\n        show: vi.fn(),\n      }),\n    },\n    env: {\n      createTelemetryLogger: (): unknown => ({\n        logUsage: vi.fn(),\n        logError: mocks.logErrorMock,\n      }),\n    },\n    containerEngine: {\n      onEvent: vi.fn(),\n      listContainers: mocks.listContainers,\n    },\n    navigation: {\n      register: vi.fn(),\n    },\n    provider: {\n      onDidRegisterContainerConnection: vi.fn(),\n      onDidUpdateContainerConnection: vi.fn(),\n      onDidUnregisterContainerConnection: vi.fn(),\n      onDidUpdateProvider: vi.fn(),\n      getContainerConnections: mocks.getContainerConnections,\n    },\n    commands: {\n      registerCommand: vi.fn(),\n    },\n    Disposable: {\n      create: vi.fn(),\n    },\n  };\n});\n\n/// mock console.log\nconst originalConsoleLog = console.log;\n\nbeforeEach(() => {\n  vi.clearAllMocks();\n  console.log = mocks.consoleLogMock;\n  console.warn = mocks.consoleWarnMock;\n\n  vi.mocked(EventEmitter).mockReturnValue({\n    event: vi.fn(),\n    fire: vi.fn(),\n  } as unknown as EventEmitter<unknown>);\n\n  mocks.postMessage.mockResolvedValue(undefined);\n\n  vi.mocked(CatalogManager).mockReturnValue({\n    onUpdate: vi.fn(),\n    init: vi.fn(),\n    getRecipes: vi.fn().mockReturnValue([]),\n  } as unknown as CatalogManager);\n});\n\nafterEach(() => {\n  console.log = originalConsoleLog;\n});\n\ntest('check activate', async () => {\n  mocks.listContainers.mockReturnValue([]);\n  mocks.getContainerConnections.mockReturnValue([]);\n  (vi.spyOn(fs.promises, 'readFile') as unknown as MockInstance<() => Promise<string>>).mockImplementation(() => {\n    return Promise.resolve('<html></html>');\n  });\n  await studio.activate();\n\n  // expect the activate method to be called on the studio class\n  expect(mocks.consoleLogMock).toBeCalledWith('starting AI Lab extension');\n});\n\ntest('check deactivate ', async () => {\n  await studio.deactivate();\n\n  // expect the deactivate method to be called on the studio class\n  expect(mocks.consoleLogMock).toBeCalledWith('stopping AI Lab extension');\n});\n"
  },
  {
    "path": "packages/backend/src/studio.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { env } from '@podman-desktop/api';\nimport type {\n  ExtensionContext,\n  TelemetryLogger,\n  WebviewPanel,\n  WebviewPanelOnDidChangeViewStateEvent,\n} from '@podman-desktop/api';\nimport { RpcExtension } from '@shared/messages/MessageProxy';\nimport { StudioApiImpl } from './studio-api-impl';\nimport { ApplicationManager } from './managers/application/applicationManager';\nimport { GitManager } from './managers/gitManager';\nimport { TaskRegistry } from './registries/TaskRegistry';\nimport { CatalogManager } from './managers/catalogManager';\nimport { ModelsManager } from './managers/modelsManager';\nimport { ContainerRegistry } from './registries/ContainerRegistry';\nimport { PodmanConnection } from './managers/podmanConnection';\nimport { LocalRepositoryRegistry } from './registries/LocalRepositoryRegistry';\nimport { InferenceManager } from './managers/inference/inferenceManager';\nimport { PlaygroundV2Manager } from './managers/playgroundV2Manager';\nimport { SnippetManager } from './managers/SnippetManager';\nimport { CancellationTokenRegistry } from './registries/CancellationTokenRegistry';\nimport { BuilderManager } from './managers/recipes/BuilderManager';\nimport { PodManager } from './managers/recipes/PodManager';\nimport { initWebview } from './webviewUtils';\nimport { LlamaCppPython } from './workers/provider/LlamaCppPython';\nimport { InferenceProviderRegistry } from './registries/InferenceProviderRegistry';\nimport { ConfigurationRegistry } from './registries/ConfigurationRegistry';\nimport { RecipeManager } from './managers/recipes/RecipeManager';\nimport { GPUManager } from './managers/GPUManager';\nimport { WhisperCpp } from './workers/provider/WhisperCpp';\nimport { ApiServer } from './managers/apiServer';\nimport { InstructlabManager } from './managers/instructlab/instructlabManager';\nimport { InstructlabApiImpl } from './instructlab-api-impl';\nimport { NavigationRegistry } from './registries/NavigationRegistry';\nimport type { StudioAPI } from '@shared/StudioAPI';\nimport { STUDIO_API_CHANNEL } from '@shared/StudioAPI';\nimport type { InstructlabAPI } from '@shared/InstructlabAPI';\nimport { INSTRUCTLAB_API_CHANNEL } from '@shared/InstructlabAPI';\nimport { ModelHandlerRegistry } from './registries/ModelHandlerRegistry';\nimport { URLModelHandler } from './models/URLModelHandler';\nimport { HuggingFaceModelHandler } from './models/HuggingFaceModelHandler';\nimport { LlamaStackApiImpl } from './llama-stack-api-impl';\nimport { LLAMA_STACK_API_CHANNEL, type LlamaStackAPI } from '@shared/LlamaStackAPI';\nimport { LlamaStackManager } from './managers/llama-stack/llamaStackManager';\nimport { OpenVINO } from './workers/provider/OpenVINO';\nimport { McpServerManager } from './managers/playground/McpServerManager';\nimport os from 'node:os';\n\nexport class Studio {\n  readonly #extensionContext: ExtensionContext;\n\n  /**\n   * Webview panel used by AI Lab\n   */\n  #panel: WebviewPanel | undefined;\n\n  /**\n   * API related classes\n   */\n  #rpcExtension: RpcExtension | undefined;\n  #studioApi: StudioApiImpl | undefined;\n  #instructlabApi: InstructlabApiImpl | undefined;\n  #llamaStackApi: LlamaStackApiImpl | undefined;\n\n  #localRepositoryRegistry: LocalRepositoryRegistry | undefined;\n  #catalogManager: CatalogManager | undefined;\n  #modelsManager: ModelsManager | undefined;\n  #telemetry: TelemetryLogger | undefined;\n  #inferenceManager: InferenceManager | undefined;\n  #podManager: PodManager | undefined;\n  #builderManager: BuilderManager | undefined;\n  #containerRegistry: ContainerRegistry | undefined;\n  #podmanConnection: PodmanConnection | undefined;\n  #taskRegistry: TaskRegistry | undefined;\n  #cancellationTokenRegistry: CancellationTokenRegistry | undefined;\n  #snippetManager: SnippetManager | undefined;\n  #mcpServerManager: McpServerManager | undefined;\n  #playgroundManager: PlaygroundV2Manager | undefined;\n  #applicationManager: ApplicationManager | undefined;\n  #recipeManager: RecipeManager | undefined;\n  #inferenceProviderRegistry: InferenceProviderRegistry | undefined;\n  #configurationRegistry: ConfigurationRegistry | undefined;\n  #gpuManager: GPUManager | undefined;\n  #navigationRegistry: NavigationRegistry | undefined;\n  #instructlabManager: InstructlabManager | undefined;\n  #llamaStackManager: LlamaStackManager | undefined;\n\n  constructor(readonly extensionContext: ExtensionContext) {\n    this.#extensionContext = extensionContext;\n  }\n\n  public async activate(): Promise<void> {\n    console.log('starting AI Lab extension');\n    this.#telemetry = env.createTelemetryLogger();\n\n    /**\n     * Storage directory for the extension provided by podman desktop\n     */\n    const appUserDirectory = this.extensionContext.storagePath;\n\n    this.#telemetry.logUsage('start');\n\n    /**\n     * The AI Lab has a webview integrated in Podman Desktop\n     * We need to initialize and configure it properly\n     */\n    this.#panel = await initWebview(this.#extensionContext.extensionUri);\n    this.#extensionContext.subscriptions.push(this.#panel);\n    this.#panel.onDidChangeViewState((e: WebviewPanelOnDidChangeViewStateEvent) => {\n      this.#telemetry?.logUsage(e.webviewPanel.visible ? 'opened' : 'closed');\n    });\n\n    /**\n     * The RpcExtension handle the communication channels between the frontend and the backend\n     */\n    this.#rpcExtension = new RpcExtension(this.#panel.webview);\n    this.#rpcExtension.init();\n    this.#extensionContext.subscriptions.push(this.#rpcExtension);\n\n    /**\n     * The navigation registry is used\n     * to register and managed the routes of the extension\n     */\n    this.#navigationRegistry = new NavigationRegistry(this.#panel, this.#rpcExtension);\n    this.#navigationRegistry.init();\n    this.#extensionContext.subscriptions.push(this.#navigationRegistry);\n\n    /**\n     * Cancellation token registry store the tokens used to cancel a task\n     */\n    this.#cancellationTokenRegistry = new CancellationTokenRegistry();\n    this.#extensionContext.subscriptions.push(this.#cancellationTokenRegistry);\n\n    /**\n     * The configuration registry manage the extension preferences/settings\n     */\n    this.#configurationRegistry = new ConfigurationRegistry(this.#rpcExtension, appUserDirectory);\n    this.#configurationRegistry?.init();\n    this.#extensionContext.subscriptions.push(this.#configurationRegistry);\n\n    /**\n     * The container registry handle the events linked to containers (start, remove, die...)\n     */\n    this.#containerRegistry = new ContainerRegistry();\n    this.#containerRegistry.init();\n    this.#extensionContext.subscriptions.push(this.#containerRegistry);\n\n    /**\n     * GitManager is used for cloning, pulling etc. recipes repositories\n     */\n    const gitManager = new GitManager();\n\n    /**\n     * The podman connection class is responsible for podman machine events (start/stop)\n     */\n    this.#podmanConnection = new PodmanConnection(this.#rpcExtension);\n    this.#podmanConnection.init();\n    this.#extensionContext.subscriptions.push(this.#podmanConnection);\n\n    /**\n     * The task registry store the tasks\n     */\n    this.#taskRegistry = new TaskRegistry(this.#rpcExtension);\n    this.#extensionContext.subscriptions.push(this.#taskRegistry);\n\n    /**\n     * Create catalog manager, responsible for loading the catalog files and watching for changes\n     */\n    this.#catalogManager = new CatalogManager(this.#rpcExtension, appUserDirectory);\n    await this.#catalogManager.init();\n\n    /**\n     * The builder manager is handling the building tasks, create corresponding tasks\n     * through the task registry and cancellation.\n     */\n    this.#builderManager = new BuilderManager(this.#taskRegistry);\n    this.#extensionContext.subscriptions.push(this.#builderManager);\n\n    /**\n     * The pod manager is a class responsible for managing the Pods\n     */\n    this.#podManager = new PodManager();\n    this.#podManager.init();\n    this.#extensionContext.subscriptions.push(this.#podManager);\n\n    /**\n     * The ModelManager role is to download and\n     */\n    const modelHandlerRegistry = new ModelHandlerRegistry(this.#rpcExtension);\n    this.#modelsManager = new ModelsManager(\n      this.#rpcExtension,\n      this.#catalogManager,\n      this.#telemetry,\n      this.#taskRegistry,\n      this.#cancellationTokenRegistry,\n      this.#podmanConnection,\n      this.#configurationRegistry,\n      modelHandlerRegistry,\n    );\n    const urlModelHandler = new URLModelHandler(\n      this.#modelsManager,\n      this.#configurationRegistry.getExtensionConfiguration().modelsPath,\n    );\n    this.#extensionContext.subscriptions.push(urlModelHandler);\n    this.#extensionContext.subscriptions.push(modelHandlerRegistry.register(urlModelHandler));\n    const hfModelHandler = new HuggingFaceModelHandler(this.#modelsManager);\n    this.#extensionContext.subscriptions.push(hfModelHandler);\n    this.#extensionContext.subscriptions.push(modelHandlerRegistry.register(hfModelHandler));\n    await this.#modelsManager.init();\n    this.#extensionContext.subscriptions.push(this.#modelsManager);\n\n    /**\n     * The LocalRepositoryRegistry store and watch for recipes repository locally and expose it.\n     */\n    this.#localRepositoryRegistry = new LocalRepositoryRegistry(\n      this.#rpcExtension,\n      appUserDirectory,\n      this.#catalogManager,\n    );\n    this.#localRepositoryRegistry.init();\n    this.#extensionContext.subscriptions.push(this.#localRepositoryRegistry);\n\n    /**\n     * GPUManager is a class responsible for detecting and storing the GPU specs\n     */\n    this.#gpuManager = new GPUManager(this.#rpcExtension);\n    this.#extensionContext.subscriptions.push(this.#gpuManager);\n\n    /**\n     * The Inference Provider registry stores all the InferenceProvider (aka backend) which\n     * can be used to create InferenceServers\n     */\n    this.#inferenceProviderRegistry = new InferenceProviderRegistry(this.#rpcExtension);\n    this.#extensionContext.subscriptions.push(\n      this.#inferenceProviderRegistry.register(\n        new LlamaCppPython(this.#taskRegistry, this.#podmanConnection, this.#gpuManager, this.#configurationRegistry),\n      ),\n    );\n    this.#extensionContext.subscriptions.push(\n      this.#inferenceProviderRegistry.register(new WhisperCpp(this.#taskRegistry, this.#podmanConnection)),\n    );\n    if (os.arch() === 'x64') {\n      this.#extensionContext.subscriptions.push(\n        this.#inferenceProviderRegistry.register(\n          new OpenVINO(this.#taskRegistry, this.#podmanConnection, this.#modelsManager, this.#configurationRegistry),\n        ),\n      );\n    }\n\n    /**\n     * The inference manager create, stop, manage Inference servers\n     */\n    this.#inferenceManager = new InferenceManager(\n      this.#rpcExtension,\n      this.#containerRegistry,\n      this.#podmanConnection,\n      this.#modelsManager,\n      this.#telemetry,\n      this.#taskRegistry,\n      this.#inferenceProviderRegistry,\n      this.#catalogManager,\n    );\n    this.#inferenceManager.init();\n    this.#extensionContext.subscriptions.push(this.#inferenceManager);\n\n    /** The InstructLab tuning sessions manager */\n    this.#instructlabManager = new InstructlabManager(\n      appUserDirectory,\n      this.#taskRegistry,\n      this.#podmanConnection,\n      this.#containerRegistry,\n      this.#telemetry,\n    );\n    this.#instructlabManager.init();\n    this.#extensionContext.subscriptions.push(this.#instructlabManager);\n\n    /** The Llama Stack manager */\n    this.#llamaStackManager = new LlamaStackManager(\n      appUserDirectory,\n      this.#taskRegistry,\n      this.#podmanConnection,\n      this.#containerRegistry,\n      this.#configurationRegistry,\n      this.#telemetry,\n      this.#modelsManager,\n    );\n    this.#extensionContext.subscriptions.push(this.#llamaStackManager);\n    this.#llamaStackManager.init();\n\n    /**\n     * The recipe manage offer some andy methods to manage recipes, build get images etc.\n     */\n    this.#recipeManager = new RecipeManager(\n      appUserDirectory,\n      gitManager,\n      this.#taskRegistry,\n      this.#builderManager,\n      this.#localRepositoryRegistry,\n      this.#inferenceManager,\n    );\n    this.#recipeManager.init();\n    this.#extensionContext.subscriptions.push(this.#recipeManager);\n\n    /**\n     * The application manager is managing the Recipes\n     */\n    this.#applicationManager = new ApplicationManager(\n      this.#taskRegistry,\n      this.#rpcExtension,\n      this.#podmanConnection,\n      this.#catalogManager,\n      this.#modelsManager,\n      this.#telemetry,\n      this.#podManager,\n      this.#recipeManager,\n      this.#llamaStackManager,\n    );\n    this.#applicationManager.init();\n    this.#extensionContext.subscriptions.push(this.#applicationManager);\n\n    this.#mcpServerManager = new McpServerManager(this.#rpcExtension, appUserDirectory);\n    this.#mcpServerManager.init();\n    this.#extensionContext.subscriptions.push(this.#mcpServerManager);\n\n    /**\n     * PlaygroundV2Manager handle the conversations of the Playground by using the InferenceServer available\n     */\n    this.#playgroundManager = new PlaygroundV2Manager(\n      this.#rpcExtension,\n      this.#inferenceManager,\n      this.#taskRegistry,\n      this.#telemetry,\n      this.#cancellationTokenRegistry,\n      this.#mcpServerManager,\n    );\n    this.#extensionContext.subscriptions.push(this.#playgroundManager);\n\n    /**\n     * The snippet manager provide code snippet used in the\n     * InferenceServer details page\n     */\n    this.#snippetManager = new SnippetManager(this.#rpcExtension, this.#telemetry);\n    this.#snippetManager.init();\n\n    /**\n     * The StudioApiImpl is the implementation of our API between backend and frontend\n     */\n    this.#studioApi = new StudioApiImpl(\n      this.#applicationManager,\n      this.#catalogManager,\n      this.#modelsManager,\n      this.#telemetry,\n      this.#localRepositoryRegistry,\n      this.#taskRegistry,\n      this.#inferenceManager,\n      this.#playgroundManager,\n      this.#snippetManager,\n      this.#cancellationTokenRegistry,\n      this.#configurationRegistry,\n      this.#recipeManager,\n      this.#podmanConnection,\n      this.#navigationRegistry,\n    );\n    // Register the instance\n    this.#rpcExtension.registerInstance<StudioAPI, StudioApiImpl>(STUDIO_API_CHANNEL, this.#studioApi);\n\n    const apiServer = new ApiServer(\n      this.#extensionContext,\n      this.#modelsManager,\n      this.#catalogManager,\n      this.#inferenceManager,\n      this.#configurationRegistry,\n      this.#containerRegistry,\n    );\n    await apiServer.init();\n    this.#extensionContext.subscriptions.push(apiServer);\n\n    this.#instructlabApi = new InstructlabApiImpl(this.#instructlabManager);\n    // Register the instance\n    this.#rpcExtension.registerInstance<InstructlabAPI, InstructlabApiImpl>(\n      INSTRUCTLAB_API_CHANNEL,\n      this.#instructlabApi,\n    );\n\n    this.#llamaStackApi = new LlamaStackApiImpl(this.#llamaStackManager);\n    // Register the instance\n    this.#rpcExtension.registerInstance<LlamaStackAPI, LlamaStackApiImpl>(LLAMA_STACK_API_CHANNEL, this.#llamaStackApi);\n  }\n\n  public async deactivate(): Promise<void> {\n    console.log('stopping AI Lab extension');\n    this.#telemetry?.logUsage('stop');\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/templates/java-okhttp.mustache",
    "content": "pom.xml\n=======\n<dependency>\n    <groupId>com.squareup.okhttp</groupId>\n    <artifactId>okhttp</artifactId>\n    <version>2.7.5</version>\n</dependency>\n\nAiService.java\n==============\npackage io.podman.desktop.java.okhttp;\n\nimport com.squareup.okhttp.MediaType;\nimport com.squareup.okhttp.OkHttpClient;\nimport com.squareup.okhttp.Request;\nimport com.squareup.okhttp.RequestBody;\nimport com.squareup.okhttp.Response;\n\nOkHttpClient client = new OkHttpClient();\nMediaType mediaType = MediaType.parse(\"application/json\");\nString json =\n\"\"\"\n    {\n        \"messages\": [\n          {\n            \"content\": \"You are a helpful assistant.\",\n            \"role\": \"system\"\n          },\n          {\n            \"content\": \"What is the capital of France?\",\n            \"role\": \"user\"\n          }\n        ]\n    }\n\"\"\";\nRequestBody body = RequestBody.create(mediaType, json);\nRequest request = new Request.Builder()\n.url(\"{{{ endpoint }}}\")\n.method(\"POST\", body)\n.addHeader(\"Content-Type\", \"application/json\")\n.build();\nResponse response = client.newCall(request).execute();\n\n======\n"
  },
  {
    "path": "packages/backend/src/templates/python-langchain.mustache",
    "content": "pip\n=======\npip install langchain langchain-openai\n\nAiService.py\n==============\nfrom langchain_openai import OpenAI\nfrom langchain.chains import LLMChain\nfrom langchain_core.prompts import ChatPromptTemplate\n\nmodel_service = \"{{{ endpoint }}}\"\n\nllm = OpenAI(base_url=model_service,\n             api_key=\"sk-no-key-required\",\n             streaming=True)\nprompt = ChatPromptTemplate.from_messages([\n  (\"system\", \"You are a helpful assistant.\"),\n  (\"user\", \"What is the capital of France?\")\n])\n\nchain = LLMChain(llm=llm, prompt=prompt)\nresponse = chain.invoke({\n  \"messages\": prompt\n})\nprint(response)\n\n======\n"
  },
  {
    "path": "packages/backend/src/templates/quarkus-langchain4j.mustache",
    "content": "application.properties\n======================\nquarkus.langchain4j.openai.base-url={{{ baseUrl }}}\nquarkus.langchain4j.openai.api-key=sk-dummy\n\npom.xml\n=======\n<dependency>\n  <groupId>io.quarkiverse.langchain4j</groupId>\n  <artifactId>quarkus-langchain4j-core</artifactId>\n  <version>{{{ version }}}</version>\n</dependency>\n<dependency>\n  <groupId>io.quarkiverse.langchain4j</groupId>\n  <artifactId>quarkus-langchain4j-openai</artifactId>\n  <version>{{{ version }}}</version>\n</dependency>\n\nAiService.java\n==============\npackage io.podman.desktop.quarkus.langchain4j;\n\nimport dev.langchain4j.service.UserMessage;\nimport io.quarkiverse.langchain4j.RegisterAiService;\n\n@RegisterAiService\npublic interface AiService {\n\n@UserMessage(\"{question}\")\nString request(String question);\n}\n\n======\nInject AIService into REST resource or other CDI resource and use the request method to call the LLM model. That's it\n"
  },
  {
    "path": "packages/backend/src/tests/ai-test.json",
    "content": "{\n  \"version\": \"1.0\",\n  \"recipes\": [\n    {\n      \"id\": \"chatbot\",\n      \"description\": \"Chat bot application\",\n      \"name\": \"ChatBot\",\n      \"repository\": \"https://github.com/axel7083/locallm\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\"natural-language-processing\"],\n      \"basedir\": \"chatbot\",\n      \"readme\": \"# Locallm\\n\\nThis repo contains artifacts that can be used to build and run LLM (Large Language Model) services locally on your Mac using podman. These containerized LLM services can be used to help developers quickly prototype new LLM based applications, without the need for relying on any other externally hosted services. Since they are already containerized, it also helps developers move from their prototype to production quicker.     \\n\\n## Current Locallm Services: \\n\\n* [Chatbot](#chatbot)\\n* [Text Summarization](#text-summarization)\\n* [Fine-tuning](#fine-tuning)\\n\\n### Chatbot\\n\\nA simple chatbot using the gradio UI. Learn how to build and run this model service here: [Chatbot](/chatbot/).\\n\\n### Text Summarization\\n\\nAn LLM app that can summarize arbitrarily long text inputs. Learn how to build and run this model service here: [Text Summarization](/summarizer/).\\n\\n### Fine Tuning \\n\\nThis application allows a user to select a model and a data set they'd like to fine-tune that model on. Once the application finishes, it outputs a new fine-tuned model for the user to apply to other LLM services. Learn how to build and run this model training job here: [Fine-tuning](/finetune/).\\n\\n## Architecture\\n![](https://raw.githubusercontent.com/MichaelClifford/locallm/main/assets/arch.jpg)\\n\\nThe diagram above indicates the general architecture for each of the individual model services contained in this repo. The core code available here is the \\\"LLM Task Service\\\" and the \\\"API Server\\\", bundled together under `model_services`. With an appropriately chosen model downloaded onto your host, `model_services/builds` contains the Containerfiles required to build an ARM or an x86 (with CUDA) image depending on your need. These model services are intended to be light-weight and run with smaller hardware footprints (given the Locallm name), but they can be run on any hardware that supports containers and scaled up if needed.\\n\\nWe also provide demo \\\"AI Applications\\\" under `ai_applications` for each model service to provide an example of how a developers could interact with the model service for their own needs. \",\n      \"recommended\": [\"llama-2-7b-chat.Q5_K_S\", \"albedobase-xl-1.3\", \"sdxl-turbo\"]\n    },\n    {\n      \"id\": \"recipe0\",\n      \"name\": \"Recipe 1\",\n      \"categories\": [],\n      \"description\": \"\",\n      \"repository\": \"\",\n      \"readme\": \"\"\n    },\n    {\n      \"id\": \"recipe1\",\n      \"name\": \"Recipe 1\",\n      \"categories\": [],\n      \"description\": \"\",\n      \"repository\": \"\",\n      \"readme\": \"\",\n      \"backend\": \"tool1\",\n      \"languages\": [\"lang1\", \"lang10\"],\n      \"frameworks\": [\"fw1\", \"fw10\"]\n    },\n    {\n      \"id\": \"recipe2\",\n      \"name\": \"Recipe 2\",\n      \"categories\": [],\n      \"description\": \"\",\n      \"repository\": \"\",\n      \"readme\": \"\",\n      \"backend\": \"tool2\",\n      \"languages\": [\"lang2\", \"lang10\"],\n      \"frameworks\": [\"fw2\", \"fw10\"]\n    },\n    {\n      \"id\": \"recipe3\",\n      \"name\": \"Recipe 3\",\n      \"categories\": [],\n      \"description\": \"\",\n      \"repository\": \"\",\n      \"readme\": \"\",\n      \"backend\": \"tool3\",\n      \"languages\": [\"lang3\", \"lang11\"],\n      \"frameworks\": [\"fw2\", \"fw10\", \"fw11\"]\n    }\n  ],\n  \"models\": [\n    {\n      \"id\": \"llama-2-7b-chat.Q5_K_S\",\n      \"name\": \"Llama-2-7B-Chat-GGUF\",\n      \"description\": \"Llama 2 is a family of state-of-the-art open-access large language models released by Meta today, and we’re excited to fully support the launch with comprehensive integration in Hugging Face. Llama 2 is being released with a very permissive community license and is available for commercial use. The code, pretrained models, and fine-tuned models are all being released today 🔥\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"?\",\n      \"url\": \"https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_S.gguf\"\n    },\n    {\n      \"id\": \"albedobase-xl-1.3\",\n      \"name\": \"AlbedoBase XL 1.3\",\n      \"description\": \"Stable Diffusion XL has 6.6 billion parameters, which is about 6.6 times more than the SD v1.5 version. I believe that this is not just a number, but a number that can lead to a significant improvement in performance. It has been a while since we realized that the overall performance of SD v1.5 has improved beyond imagination thanks to the explosive contributions of our community. Therefore, I am working on completing this AlbedoBase XL model in order to optimally reproduce the performance improvement that occurred in v1.5 in this XL version as well. My goal is to directly test the performance of all Checkpoints and LoRAs that are publicly uploaded to Civitai, and merge only the resources that are judged to be optimal after passing through several filters. This will surpass the performance of image-generating AI of companies such as Midjourney. As of now, AlbedoBase XL v0.4 has merged exactly 55 selected checkpoints and 138 LoRAs.\",\n      \"registry\": \"Civital\",\n      \"license\": \"openrail++\",\n      \"url\": \"\"\n    },\n    {\n      \"id\": \"sdxl-turbo\",\n      \"name\": \"SDXL Turbo\",\n      \"description\": \"SDXL Turbo achieves state-of-the-art performance with a new distillation technology, enabling single-step image generation with unprecedented quality, reducing the required step count from 50 to just one.\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"sai-c-community\",\n      \"url\": \"\"\n    }\n  ],\n  \"categories\": [\n    {\n      \"id\": \"natural-language-processing\",\n      \"name\": \"Natural Language Processing\",\n      \"description\": \"Models that work with text: classify, summarize, translate, or generate text.\"\n    },\n    {\n      \"id\": \"computer-vision\",\n      \"description\": \"Process images, from classification to object detection and segmentation.\",\n      \"name\": \"Computer Vision\"\n    },\n    {\n      \"id\": \"audio\",\n      \"description\": \"Recognize speech or classify audio with audio models.\",\n      \"name\": \"Audio\"\n    },\n    {\n      \"id\": \"multimodal\",\n      \"description\": \"Stuff about multimodal models goes here omg yes amazing.\",\n      \"name\": \"Multimodal\"\n    }\n  ]\n}\n"
  },
  {
    "path": "packages/backend/src/tests/ai-user-test.json",
    "content": "{\n  \"version\": \"1.0\",\n  \"recipes\": [\n    {\n      \"id\": \"recipe 1\",\n      \"description\" : \"Recipe 1\",\n      \"name\" : \"Recipe 1\",\n      \"repository\": \"https://recipe1.example.com\",\n      \"icon\": \"natural-language-processing\",\n      \"categories\": [\n        \"category1\"\n      ],\n      \"basedir\": \"chatbot\",\n      \"readme\": \"Readme for recipe 1\",\n      \"recommended\": [\n        \"model1\",\n        \"model2\"\n      ]\n    }\n  ],\n  \"models\": [\n    {\n      \"id\": \"model1\",\n      \"name\": \"Model 1\",\n      \"description\": \"Readme for model 1\",\n      \"registry\": \"Hugging Face\",\n      \"license\": \"?\",\n      \"url\": \"https://model1.example.com\",\n      \"memory\": 0\n    },\n    {\n      \"id\": \"model2\",\n      \"name\": \"Model 2\",\n      \"description\": \"Readme for model 2\",\n      \"registry\": \"Civital\",\n      \"license\": \"?\",\n      \"url\": \"https://model2.example.com\",\n      \"memory\": 0\n    }\n  ],\n  \"categories\": [\n    {\n      \"id\": \"category1\",\n      \"name\": \"Category 1\",\n      \"description\" : \"Readme for category 1\"\n    }\n  ]\n}\n"
  },
  {
    "path": "packages/backend/src/tests/utils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nexport class TestEventEmitter {\n  #listeners: ((value: unknown) => void)[] = [];\n\n  event: (listener: (value: unknown) => void) => void;\n\n  constructor() {\n    this.event = (listener): void => {\n      this.#listeners.push(listener);\n    };\n  }\n\n  fire(value: unknown): void {\n    this.#listeners.forEach(listener => listener(value));\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/utils/JsonWatcher.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport { promises, existsSync, mkdirSync } from 'node:fs';\nimport type { FileSystemWatcher } from '@podman-desktop/api';\nimport { EventEmitter, fs } from '@podman-desktop/api';\nimport { JsonWatcher } from './JsonWatcher';\n\nvi.mock('@podman-desktop/api', () => {\n  return {\n    EventEmitter: vi.fn(),\n    fs: {\n      createFileSystemWatcher: (): unknown => ({\n        onDidCreate: vi.fn(),\n        onDidDelete: vi.fn(),\n        onDidChange: vi.fn(),\n      }),\n    },\n  };\n});\n\nvi.mock('node:fs', () => {\n  return {\n    existsSync: vi.fn(),\n    mkdirSync: vi.fn(),\n    promises: {\n      readFile: vi.fn(),\n    },\n  };\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  // Mock event emitters\n  const listeners: ((value: unknown) => void)[] = [];\n  vi.mocked(EventEmitter).mockReturnValue({\n    event: vi.fn().mockImplementation(callback => {\n      listeners.push(callback);\n    }),\n    fire: vi.fn().mockImplementation((content: unknown) => {\n      listeners.forEach(listener => listener(content));\n    }),\n  } as unknown as EventEmitter<unknown>);\n});\n\ntest('should provide default value', async () => {\n  vi.mocked(existsSync).mockReturnValue(false);\n  const watcher = new JsonWatcher<string>('dummyPath', 'dummyDefaultvalue');\n  const listener = vi.fn();\n  watcher.onContentUpdated(listener);\n\n  watcher.init();\n\n  await vi.waitFor(() => {\n    expect(listener).toHaveBeenCalledWith('dummyDefaultvalue');\n  });\n  expect(mkdirSync).toHaveBeenCalled();\n  expect(existsSync).toHaveBeenCalledWith('dummyPath');\n  expect(promises.readFile).not.toHaveBeenCalled();\n});\n\ntest('should read file content', async () => {\n  vi.mocked(existsSync).mockReturnValue(true);\n  vi.spyOn(promises, 'readFile').mockResolvedValue('[\"hello\"]');\n  const watcher = new JsonWatcher<string[]>('dummyPath', []);\n  const listener = vi.fn();\n  watcher.onContentUpdated(listener);\n\n  watcher.init();\n\n  await vi.waitFor(() => {\n    expect(listener).toHaveBeenCalledWith(['hello']);\n  });\n  expect(promises.readFile).toHaveBeenCalledWith('dummyPath', 'utf-8');\n});\n\ndescribe('file system watcher events should fire onContentUpdated', () => {\n  let onDidCreateListener: () => void;\n  let onDidDeleteListener: () => void;\n  let onDidChangeListener: () => void;\n  beforeEach(() => {\n    vi.spyOn(fs, 'createFileSystemWatcher').mockReturnValue({\n      onDidCreate: vi.fn().mockImplementation(listener => (onDidCreateListener = listener)),\n      onDidDelete: vi.fn().mockImplementation(listener => (onDidDeleteListener = listener)),\n      onDidChange: vi.fn().mockImplementation(listener => (onDidChangeListener = listener)),\n    } as unknown as FileSystemWatcher);\n  });\n\n  test('onDidCreate', async () => {\n    vi.mocked(existsSync).mockReturnValue(false);\n    const watcher = new JsonWatcher<string>('dummyPath', 'dummyDefaultValue');\n    const listener = vi.fn();\n    watcher.onContentUpdated(listener);\n    watcher.init();\n\n    expect(onDidCreateListener).toBeDefined();\n    onDidCreateListener();\n\n    await vi.waitFor(() => {\n      expect(listener).toHaveBeenNthCalledWith(2, 'dummyDefaultValue');\n    });\n  });\n\n  test('onDidDeleteListener', async () => {\n    vi.mocked(existsSync).mockReturnValue(false);\n    const watcher = new JsonWatcher<string>('dummyPath', 'dummyDefaultValue');\n    const listener = vi.fn();\n    watcher.onContentUpdated(listener);\n    watcher.init();\n\n    expect(onDidDeleteListener).toBeDefined();\n    onDidDeleteListener();\n\n    await vi.waitFor(() => {\n      expect(listener).toHaveBeenNthCalledWith(2, 'dummyDefaultValue');\n    });\n  });\n\n  test('onDidChangeListener', async () => {\n    vi.mocked(existsSync).mockReturnValue(false);\n    const watcher = new JsonWatcher<string>('dummyPath', 'dummyDefaultValue');\n    const listener = vi.fn();\n    watcher.onContentUpdated(listener);\n    watcher.init();\n\n    expect(onDidChangeListener).toBeDefined();\n    onDidChangeListener();\n\n    await vi.waitFor(() => {\n      expect(listener).toHaveBeenNthCalledWith(2, 'dummyDefaultValue');\n    });\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/utils/JsonWatcher.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { type Disposable, type FileSystemWatcher, fs, EventEmitter, type Event } from '@podman-desktop/api';\nimport { promises, existsSync, mkdirSync } from 'node:fs';\nimport path from 'node:path';\n\nexport class JsonWatcher<T> implements Disposable {\n  #fileSystemWatcher: FileSystemWatcher | undefined;\n\n  private readonly _onEvent = new EventEmitter<T>();\n  readonly onContentUpdated: Event<T> = this._onEvent.event;\n\n  constructor(\n    private path: string,\n    private defaultValue: T,\n  ) {}\n\n  init(): void {\n    try {\n      // we create the parent directory of the watched content\n      // if the parent directory does not exists, the watcher is not initialized properly\n      mkdirSync(path.dirname(this.path), { recursive: true });\n\n      // create file system watcher\n      this.#fileSystemWatcher = fs.createFileSystemWatcher(this.path);\n      // Setup listeners\n      this.#fileSystemWatcher.onDidChange(this.onDidChange.bind(this));\n      this.#fileSystemWatcher.onDidDelete(this.onDidDelete.bind(this));\n      this.#fileSystemWatcher.onDidCreate(this.onDidCreate.bind(this));\n    } catch (err: unknown) {\n      console.error(`unable to watch file ${this.path}, changes won't be detected.`, err);\n    }\n    this.requestUpdate();\n  }\n\n  private onDidCreate(): void {\n    this.requestUpdate();\n  }\n\n  private onDidDelete(): void {\n    this.requestUpdate();\n  }\n\n  private onDidChange(): void {\n    this.requestUpdate();\n  }\n\n  private requestUpdate(): void {\n    this.updateContent().catch((err: unknown) => {\n      console.error('Something went wrong in update content', err);\n    });\n  }\n\n  private async updateContent(): Promise<void> {\n    if (!existsSync(this.path)) {\n      this._onEvent.fire(this.defaultValue);\n      return;\n    }\n\n    try {\n      const data = await promises.readFile(this.path, 'utf-8');\n      this._onEvent.fire(JSON.parse(data));\n    } catch (err: unknown) {\n      console.error('Something went wrong JsonWatcher', err);\n    }\n  }\n\n  dispose(): void {\n    this.#fileSystemWatcher?.dispose();\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/utils/Publisher.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { expect, test, vi } from 'vitest';\nimport { Publisher } from './Publisher';\nimport type { RpcExtension } from '@shared/messages/MessageProxy';\nimport { MSG_TASKS_UPDATE } from '@shared/Messages';\nimport type { Task } from '@shared/models/ITask';\n\ntest('ensure publisher properly use getter', async () => {\n  const rpcExtensionMock = { fire: vi.fn().mockResolvedValue(true) } as unknown as RpcExtension;\n  const fakeTasks = ['dummyValue'];\n  const getterMock = vi.fn().mockReturnValue(fakeTasks);\n  const publisher = new Publisher<Task[]>(rpcExtensionMock, MSG_TASKS_UPDATE, getterMock);\n  publisher.notify();\n\n  await vi.waitFor(() => {\n    expect(rpcExtensionMock.fire).toHaveBeenCalledWith(MSG_TASKS_UPDATE, fakeTasks);\n  });\n  expect(getterMock).toHaveBeenCalled();\n});\n"
  },
  {
    "path": "packages/backend/src/utils/Publisher.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { RpcChannel, RpcExtension } from '@shared/messages/MessageProxy';\n\nexport class Publisher<T> {\n  constructor(\n    private rpcExtension: RpcExtension,\n    private channel: RpcChannel<T>,\n    private getter: () => T,\n  ) {}\n\n  notify(): void {\n    this.rpcExtension.fire(this.channel, this.getter()).catch((err: unknown) => {\n      console.error(`Something went wrong while emitting ${this.channel}: ${String(err)}`);\n    });\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/utils/RecipeConstants.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport const CONFIG_FILENAME = 'ai-lab.yaml';\n\n// pod labels\nexport const POD_LABEL_RECIPE_ID = 'ai-lab-recipe-id';\nexport const POD_LABEL_MODEL_ID = 'ai-lab-model-id';\nexport const POD_LABEL_MODEL_PORTS = 'ai-lab-model-ports';\nexport const POD_LABEL_APP_PORTS = 'ai-lab-application-ports';\n\n// image labels\nexport const IMAGE_LABEL_RECIPE_ID = 'ai-lab-recipe-id';\nexport const IMAGE_LABEL_APP_PORTS = 'ai-lab-application-ports';\nexport const IMAGE_LABEL_MODEL_SERVICE = 'ai-lab-model-service';\nexport const IMAGE_LABEL_APPLICATION_NAME = 'ai-lab-application-name';\n"
  },
  {
    "path": "packages/backend/src/utils/arch.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { arch } from 'node:os';\n\nconst nodeArch2GoArch = new Map<NodeJS.Architecture, string>([\n  ['ia32', '386'],\n  ['x64', 'amd64'],\n]);\n\nexport function goarch(): string {\n  const localArch = arch();\n  return nodeArch2GoArch.get(localArch) ?? (localArch as string);\n}\n"
  },
  {
    "path": "packages/backend/src/utils/catalogUtils.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { test, expect, describe } from 'vitest';\nimport {\n  CatalogFormat,\n  hasCatalogWrongFormat,\n  isNonNullObject,\n  merge,\n  sanitize,\n  sanitizeCategory,\n  sanitizeModel,\n  sanitizeRecipe,\n} from './catalogUtils';\n\n// Dummy data for testing\nconst validModel = {\n  id: 'model-1',\n  name: 'Test Model',\n  description: 'A test model',\n};\n\nconst validRecipe = {\n  id: 'recipe-1',\n  name: 'Test Recipe',\n  categories: ['category-1'],\n  description: 'A test recipe',\n  repository: 'http://example.com',\n  readme: 'Readme content',\n};\n\nconst validCategory = {\n  id: 'category-1',\n  name: 'Test Category',\n  description: 'A test category',\n};\n\ndescribe('sanitize', () => {\n  test('should adapt object not having any version to CURRENT format', () => {\n    const raw = {\n      recipes: [\n        {\n          id: 'chatbot',\n          description: 'This is a Streamlit chat demo application.',\n          name: 'ChatBot',\n          repository: 'https://github.com/containers/ai-lab-recipes',\n          ref: 'v1.1.3',\n          icon: 'natural-language-processing',\n          categories: ['natural-language-processing'],\n          basedir: 'recipes/natural_language_processing/chatbot',\n          readme: '',\n          models: ['hf.instructlab.granite-7b-lab-GGUF', 'hf.instructlab.merlinite-7b-lab-GGUF'],\n        },\n      ],\n      models: [\n        {\n          id: 'Mistral-7B-Instruct-v0.3-Q4_K_M.gguf',\n          name: 'Mistral-7B-Instruct-v0.3-Q4_K_M',\n          description: 'Model imported from path\\\\Mistral-7B-Instruct-v0.3-Q4_K_M.gguf',\n          hw: 'CPU',\n          file: {\n            path: 'path',\n            file: 'Mistral-7B-Instruct-v0.3-Q4_K_M.gguf',\n            size: 4372812000,\n            creation: '2024-06-19T12:14:12.489Z',\n          },\n          memory: 4372812000,\n        },\n      ],\n    };\n    expect(hasCatalogWrongFormat(raw)).toBeTruthy();\n    const catalog = sanitize(raw);\n    expect(catalog.version).equals(CatalogFormat.CURRENT);\n    expect(catalog.models[0].backend).equals('llama-cpp');\n    expect(catalog.models[0].name).equals('Mistral-7B-Instruct-v0.3-Q4_K_M');\n  });\n\n  test('should throw if version is different from CURRENT', () => {\n    const raw = {\n      version: '0.5',\n      recipes: [\n        {\n          id: 'chatbot',\n          description: 'This is a Streamlit chat demo application.',\n          name: 'ChatBot',\n          repository: 'https://github.com/containers/ai-lab-recipes',\n          ref: 'v1.1.3',\n          icon: 'natural-language-processing',\n          categories: ['natural-language-processing'],\n          basedir: 'recipes/natural_language_processing/chatbot',\n          readme: '',\n          recommended: ['hf.instructlab.granite-7b-lab-GGUF', 'hf.instructlab.merlinite-7b-lab-GGUF'],\n          backend: 'llama-cpp',\n        },\n      ],\n      models: [\n        {\n          id: 'Mistral-7B-Instruct-v0.3-Q4_K_M.gguf',\n          name: 'Mistral-7B-Instruct-v0.3-Q4_K_M',\n          description: 'Model imported from path\\\\Mistral-7B-Instruct-v0.3-Q4_K_M.gguf',\n          hw: 'CPU',\n          file: {\n            path: 'path',\n            file: 'Mistral-7B-Instruct-v0.3-Q4_K_M.gguf',\n            size: 4372812000,\n            creation: '2024-06-19T12:14:12.489Z',\n          },\n          memory: 4372812000,\n        },\n      ],\n    };\n\n    expect(hasCatalogWrongFormat(raw)).toBeFalsy();\n    expect(() => sanitize(raw)).toThrowError('the catalog is using an invalid version');\n  });\n\n  test('should return sanitized ApplicationCatalog with valid raw object', () => {\n    const raw = {\n      version: '1.0',\n      recipes: [\n        {\n          id: 'chatbot',\n          description: 'This is a Streamlit chat demo application.',\n          name: 'ChatBot',\n          repository: 'https://github.com/containers/ai-lab-recipes',\n          ref: 'v1.1.3',\n          icon: 'natural-language-processing',\n          categories: ['natural-language-processing'],\n          basedir: 'recipes/natural_language_processing/chatbot',\n          readme: '',\n          recommended: ['hf.instructlab.granite-7b-lab-GGUF', 'hf.instructlab.merlinite-7b-lab-GGUF'],\n          backend: 'llama-cpp',\n          languages: ['lang1'],\n          frameworks: ['fw1'],\n        },\n      ],\n      models: [\n        {\n          id: 'Mistral-7B-Instruct-v0.3-Q4_K_M.gguf',\n          name: 'Mistral-7B-Instruct-v0.3-Q4_K_M',\n          description: 'Model imported from path\\\\Mistral-7B-Instruct-v0.3-Q4_K_M.gguf',\n          hw: 'CPU',\n          file: {\n            path: 'path',\n            file: 'Mistral-7B-Instruct-v0.3-Q4_K_M.gguf',\n            size: 4372812000,\n            creation: '2024-06-19T12:14:12.489Z',\n          },\n          memory: 4372812000,\n        },\n      ],\n    };\n    expect(hasCatalogWrongFormat(raw)).toBeFalsy();\n    const catalog = sanitize(raw);\n    expect(catalog.version).equals(CatalogFormat.CURRENT);\n    expect(catalog.models[0].backend).toBeUndefined();\n    expect(catalog.models[0].name).equals('Mistral-7B-Instruct-v0.3-Q4_K_M');\n    expect(catalog.recipes[0].languages).toStrictEqual(['lang1']);\n    expect(catalog.recipes[0].frameworks).toStrictEqual(['fw1']);\n  });\n});\n\ndescribe('merge', () => {\n  test('should merge catalogs correctly', () => {\n    const catalogA = {\n      version: CatalogFormat.CURRENT,\n      models: [{ id: 'model-1', name: 'Model A', description: 'Description A' }],\n      recipes: [\n        {\n          id: 'recipe-1',\n          name: 'Recipe A',\n          categories: ['cat-1'],\n          description: 'Desc A',\n          repository: 'repo',\n          readme: 'readme',\n        },\n      ],\n      categories: [{ id: 'cat-1', name: 'Category A', description: 'Desc A' }],\n    };\n\n    const catalogB = {\n      version: CatalogFormat.CURRENT,\n      models: [{ id: 'model-2', name: 'Model B', description: 'Description B' }],\n      recipes: [\n        {\n          id: 'recipe-2',\n          name: 'Recipe B',\n          categories: ['cat-2'],\n          description: 'Desc B',\n          repository: 'repo',\n          readme: 'readme',\n        },\n      ],\n      categories: [{ id: 'cat-2', name: 'Category B', description: 'Desc B' }],\n    };\n\n    const merged = merge(catalogA, catalogB);\n\n    expect(merged.models).toHaveLength(2);\n    expect(merged.recipes).toHaveLength(2);\n    expect(merged.categories).toHaveLength(2);\n  });\n\n  test('should throw error on incompatible versions', () => {\n    const catalogA = { version: CatalogFormat.CURRENT, models: [], recipes: [], categories: [] };\n    const catalogB = { version: CatalogFormat.UNKNOWN, models: [], recipes: [], categories: [] };\n\n    expect(() => merge(catalogA, catalogB)).toThrowError('cannot merge incompatible application catalog format.');\n  });\n});\n\ndescribe('isNonNullObject', () => {\n  test('should return true for non-null objects', () => {\n    expect(isNonNullObject({})).toBe(true);\n    expect(isNonNullObject({ key: 'value' })).toBe(true);\n  });\n\n  test('should return false for null or non-object values', () => {\n    expect(isNonNullObject(undefined)).toBe(false);\n    expect(isNonNullObject('string')).toBe(false);\n    expect(isNonNullObject(123)).toBe(false);\n  });\n});\n\ndescribe('sanitizeRecipe', () => {\n  test('undefined object', () => {\n    expect(() => sanitizeRecipe(undefined)).toThrowError('invalid recipe format');\n  });\n\n  test('valid recipe object', () => {\n    expect(sanitizeRecipe(validRecipe)).toEqual(validRecipe);\n  });\n\n  test('missing mandatory fields', () => {\n    const invalidRecipe = { ...validRecipe, id: undefined };\n    expect(() => sanitizeRecipe(invalidRecipe)).toThrowError('invalid recipe format');\n  });\n});\n\ndescribe('sanitizeModel', () => {\n  test('undefined object', () => {\n    expect(() => sanitizeModel(undefined)).toThrowError('invalid model format');\n  });\n\n  test('valid model object', () => {\n    expect(sanitizeModel(validModel)).toEqual(validModel);\n  });\n\n  test('missing mandatory fields', () => {\n    const invalidModel = { ...validModel, id: undefined };\n    expect(() => sanitizeModel(invalidModel)).toThrowError('invalid model format');\n  });\n});\n\ndescribe('sanitizeCategory', () => {\n  test('undefined object', () => {\n    expect(() => sanitizeCategory(undefined)).toThrowError('invalid category format');\n  });\n\n  test('valid category object', () => {\n    expect(sanitizeCategory(validCategory)).toEqual(validCategory);\n  });\n\n  test('missing mandatory fields', () => {\n    const invalidCategory = { ...validCategory, id: undefined };\n    expect(() => sanitizeCategory(invalidCategory)).toThrowError('invalid category format');\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/utils/catalogUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { Category } from '@shared/models/ICategory';\nimport type { LocalModelInfo } from '@shared/models/ILocalModelInfo';\n\nexport enum CatalogFormat {\n  CURRENT = '1.0',\n  UNKNOWN = 'unknown',\n}\n\nexport function sanitize(rawObject: object): ApplicationCatalog {\n  // if there is no version in the user catalog, we try to adapt it automatically to the CURRENT format\n  let raw: object & { version: string };\n  if (hasCatalogWrongFormat(rawObject)) {\n    raw = adaptToCurrent(rawObject);\n  } else {\n    raw = rawObject as object & { version: string };\n  }\n\n  // ensure version is valid\n  if (raw.version !== CatalogFormat.CURRENT) throw new Error('the catalog is using an invalid version');\n\n  return {\n    version: raw.version,\n    recipes: 'recipes' in raw && Array.isArray(raw.recipes) ? raw.recipes.map(recipe => sanitizeRecipe(recipe)) : [],\n    models: 'models' in raw && Array.isArray(raw.models) ? raw.models.map(model => sanitizeModel(model)) : [],\n    categories:\n      'categories' in raw && Array.isArray(raw.categories)\n        ? raw.categories.map(category => sanitizeCategory(category))\n        : [],\n  };\n}\n\nexport function hasCatalogWrongFormat(raw: object): boolean {\n  return (\n    !('version' in raw) || ('recipes' in raw && Array.isArray(raw.recipes) && !!raw.recipes.find(r => 'models' in r))\n  );\n}\n\nfunction adaptToCurrent(raw: object): object & { version: string } {\n  // for recipes - assume backend is llama-cpp and copy models field as recommended\n  if ('recipes' in raw && Array.isArray(raw.recipes)) {\n    raw.recipes.forEach(recipe => {\n      recipe.backend = recipe.backend ?? 'llama-cpp';\n      recipe.recommended = recipe.recommended ?? recipe.models ?? []; // Copy models to recommended if not present\n      delete recipe.models; // Clear models to avoid duplication\n    });\n  }\n\n  // for models - assume backend is llama-cpp\n  if ('models' in raw && Array.isArray(raw.models)) {\n    raw.models.forEach(model => {\n      model.backend = model.backend ?? 'llama-cpp';\n    });\n  }\n\n  return {\n    ...raw,\n    version: CatalogFormat.CURRENT,\n  };\n}\n\n/**\n * This method merge catalog A and B, and let the b overwrite a on conflict\n * @param a\n * @param b\n */\nexport function merge(a: ApplicationCatalog, b: ApplicationCatalog): ApplicationCatalog {\n  if (a.version !== b.version) {\n    throw new Error('cannot merge incompatible application catalog format.');\n  }\n\n  return {\n    version: a.version,\n    models: [...a.models.filter(model => !b.models.some(mModel => model.id === mModel.id)), ...b.models] as ModelInfo[],\n    recipes: [...a.recipes.filter(recipe => !b.recipes.some(mRecipe => recipe.id === mRecipe.id)), ...b.recipes],\n    categories: [\n      ...a.categories.filter(category => !b.categories.some(mCategory => category.id === mCategory.id)),\n      ...b.categories,\n    ],\n  };\n}\n\nexport function isNonNullObject(obj: unknown): obj is object {\n  return !!obj && typeof obj === 'object';\n}\n\nexport function isStringRecord(obj: unknown): obj is Record<string, string> {\n  return (\n    isNonNullObject(obj) &&\n    Object.entries(obj).every(([key, value]) => typeof key === 'string' && typeof value === 'string')\n  );\n}\n\nexport function isStringArray(obj: unknown): obj is Array<string> {\n  return Array.isArray(obj) && obj.every(item => typeof item === 'string');\n}\n\nexport function sanitizeRecipe(recipe: unknown): Recipe {\n  if (\n    isNonNullObject(recipe) &&\n    'id' in recipe &&\n    typeof recipe.id === 'string' &&\n    'name' in recipe &&\n    typeof recipe.name === 'string' &&\n    'categories' in recipe &&\n    isStringArray(recipe.categories) &&\n    'description' in recipe &&\n    typeof recipe.description === 'string' &&\n    'repository' in recipe &&\n    typeof recipe.repository === 'string' &&\n    'readme' in recipe &&\n    typeof recipe.readme === 'string'\n  )\n    return {\n      // mandatory fields\n      id: recipe.id,\n      name: recipe.name,\n      categories: recipe.categories,\n      description: recipe.description,\n      repository: recipe.repository,\n      readme: recipe.readme,\n      // optional fields\n      ref: 'ref' in recipe && typeof recipe.ref === 'string' ? recipe.ref : undefined,\n      icon: 'icon' in recipe && typeof recipe.icon === 'string' ? recipe.icon : undefined,\n      basedir: 'basedir' in recipe && typeof recipe.basedir === 'string' ? recipe.basedir : undefined,\n      recommended: 'recommended' in recipe && isStringArray(recipe.recommended) ? recipe.recommended : undefined,\n      backend: 'backend' in recipe && typeof recipe.backend === 'string' ? recipe.backend : undefined,\n      languages: 'languages' in recipe && isStringArray(recipe.languages) ? recipe.languages : undefined,\n      frameworks: 'frameworks' in recipe && isStringArray(recipe.frameworks) ? recipe.frameworks : undefined,\n    };\n  throw new Error('invalid recipe format');\n}\n\nexport function isLocalModelInfo(obj: unknown): obj is LocalModelInfo {\n  return (\n    isNonNullObject(obj) &&\n    'file' in obj &&\n    typeof obj.file === 'string' &&\n    'path' in obj &&\n    typeof obj.path === 'string'\n  );\n}\n\nexport function sanitizeModel(model: unknown): ModelInfo {\n  if (\n    isNonNullObject(model) &&\n    'id' in model &&\n    typeof model.id === 'string' &&\n    'name' in model &&\n    typeof model.name === 'string' &&\n    'description' in model &&\n    typeof model.description === 'string'\n  )\n    return {\n      // mandatory fields\n      id: model.id,\n      name: model.name,\n      description: model.description,\n      // optional fields\n      registry: 'registry' in model && typeof model.registry === 'string' ? model.registry : undefined,\n      license: 'license' in model && typeof model.license === 'string' ? model.license : undefined,\n      url: 'url' in model && typeof model.url === 'string' ? model.url : undefined,\n      memory: 'memory' in model && typeof model.memory === 'number' ? model.memory : undefined,\n      properties: 'properties' in model && isStringRecord(model.properties) ? model.properties : undefined,\n      sha256: 'sha256' in model && typeof model.sha256 === 'string' ? model.sha256 : undefined,\n      backend: 'backend' in model && typeof model.backend === 'string' ? model.backend : undefined,\n      file:\n        'file' in model && isLocalModelInfo(model.file)\n          ? {\n              ...model.file,\n              creation: new Date(model.file.creation ?? 0),\n            }\n          : undefined,\n    };\n  throw new Error('invalid model format');\n}\n\nexport function sanitizeCategory(category: unknown): Category {\n  if (\n    isNonNullObject(category) &&\n    'id' in category &&\n    typeof category.id === 'string' &&\n    'name' in category &&\n    typeof category.name === 'string' &&\n    'description' in category &&\n    typeof category.description === 'string'\n  )\n    return {\n      // mandatory fields\n      id: category.id,\n      name: category.name,\n      description: category.description,\n    };\n  throw new Error('invalid category format');\n}\n"
  },
  {
    "path": "packages/backend/src/utils/downloader.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { EventEmitter, type Event } from '@podman-desktop/api';\nimport type { BaseEvent } from '../models/baseEvent';\n\nexport abstract class Downloader {\n  protected readonly _onEvent = new EventEmitter<BaseEvent>();\n  readonly onEvent: Event<BaseEvent> = this._onEvent.event;\n\n  completed: boolean = false;\n\n  protected constructor(\n    protected url: string,\n    protected target: string,\n  ) {}\n\n  getTarget(): string {\n    return this.target;\n  }\n\n  abstract perform(id: string): Promise<void>;\n}\n"
  },
  {
    "path": "packages/backend/src/utils/imagesUtils.spec.ts",
    "content": "import { expect, test } from 'vitest';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport type { ContainerConfig } from '../models/AIConfig';\nimport { getImageTag } from './imagesUtils';\n\ntest('return recipe-container tag if container image prop is not defined', () => {\n  const recipe = {\n    id: 'recipe1',\n  } as Recipe;\n  const container = {\n    name: 'name',\n  } as ContainerConfig;\n  const imageTag = getImageTag(recipe, container);\n  expect(imageTag).equals('recipe1-name:latest');\n});\ntest('return container image prop is defined', () => {\n  const recipe = {\n    id: 'recipe1',\n  } as Recipe;\n  const container = {\n    name: 'name',\n    image: 'quay.io/repo/image:v1',\n  } as ContainerConfig;\n  const imageTag = getImageTag(recipe, container);\n  expect(imageTag).equals('quay.io/repo/image:v1');\n});\ntest('append latest tag to container image prop if it has no tag', () => {\n  const recipe = {\n    id: 'recipe1',\n  } as Recipe;\n  const container = {\n    name: 'name',\n    image: 'quay.io/repo/image',\n  } as ContainerConfig;\n  const imageTag = getImageTag(recipe, container);\n  expect(imageTag).equals('quay.io/repo/image:latest');\n});\n"
  },
  {
    "path": "packages/backend/src/utils/imagesUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Recipe } from '@shared/models/IRecipe';\nimport type { ContainerConfig } from '../models/AIConfig';\n\nexport function getImageTag(recipe: Recipe, container: ContainerConfig): string {\n  let tag = container.image ?? `${recipe.id}-${container.name}`;\n  if (!tag.includes(':')) {\n    tag += ':latest';\n  }\n  return tag;\n}\n"
  },
  {
    "path": "packages/backend/src/utils/inferenceUtils.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { vi, test, expect, describe, beforeEach } from 'vitest';\nimport { withDefaultConfiguration, isTransitioning, parseInferenceType, getInferenceType } from './inferenceUtils';\nimport { getFreeRandomPort } from './ports';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { InferenceServer, InferenceServerStatus } from '@shared/models/IInference';\nimport { InferenceType } from '@shared/models/IInference';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\n\nvi.mock('./ports', () => ({\n  getFreeRandomPort: vi.fn(),\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(getFreeRandomPort).mockResolvedValue(8888);\n});\n\ndescribe('withDefaultConfiguration', () => {\n  test('zero modelsInfo', async () => {\n    await expect(withDefaultConfiguration({ modelsInfo: [] })).rejects.toThrowError(\n      'modelsInfo need to contain at least one element.',\n    );\n  });\n\n  test('expect all default values', async () => {\n    const result = await withDefaultConfiguration({ modelsInfo: [{ id: 'dummyId' } as unknown as ModelInfo] });\n\n    expect(getFreeRandomPort).toHaveBeenCalledWith('0.0.0.0');\n\n    expect(result.port).toBe(8888);\n    expect(result.image).toBe(undefined);\n    expect(result.labels).toStrictEqual({});\n    expect(result.connection).toBe(undefined);\n  });\n\n  test('expect no default values', async () => {\n    const connectionMock = {\n      name: 'Dummy Connection',\n    } as unknown as ContainerProviderConnectionInfo;\n    const result = await withDefaultConfiguration({\n      modelsInfo: [{ id: 'dummyId' } as unknown as ModelInfo],\n      port: 9999,\n      connection: connectionMock,\n      image: 'random-image',\n      labels: { hello: 'world' },\n    });\n\n    expect(getFreeRandomPort).not.toHaveBeenCalled();\n\n    expect(result.port).toBe(9999);\n    expect(result.image).toBe('random-image');\n    expect(result.labels).toStrictEqual({ hello: 'world' });\n    expect(result.connection).toBe(connectionMock);\n  });\n});\n\ntest.each(['stopping', 'deleting', 'starting'] as InferenceServerStatus[])(\n  '%s should be a transitioning state',\n  status => {\n    expect(\n      isTransitioning({\n        status: status,\n      } as unknown as InferenceServer),\n    ).toBeTruthy();\n  },\n);\n\ntest.each(['running', 'stopped', 'error'] as InferenceServerStatus[])('%s should be a stable state', status => {\n  expect(\n    isTransitioning({\n      status: status,\n    } as unknown as InferenceServer),\n  ).toBeFalsy();\n});\n\ndescribe('parseInferenceType', () => {\n  test('undefined argument should return InferenceType.None', () => {\n    expect(parseInferenceType(undefined)).toBe(InferenceType.NONE);\n  });\n\n  test('llama-cpp should return the proper InferenceType.LLAMA_CPP', () => {\n    expect(parseInferenceType('llama-cpp')).toBe(InferenceType.LLAMA_CPP);\n  });\n});\n\ndescribe('getInferenceType', () => {\n  test('empty array should return InferenceType.None', () => {\n    expect(getInferenceType([])).toBe(InferenceType.NONE);\n  });\n\n  test('single model with undefined backend should return InferenceType.None', () => {\n    expect(\n      getInferenceType([\n        {\n          backend: undefined,\n        } as unknown as ModelInfo,\n      ]),\n    ).toBe(InferenceType.NONE);\n  });\n\n  test('single model with llamacpp backend should return InferenceType.LLAMA_CPP', () => {\n    expect(\n      getInferenceType([\n        {\n          backend: 'llama-cpp',\n        } as unknown as ModelInfo,\n      ]),\n    ).toBe(InferenceType.LLAMA_CPP);\n  });\n\n  test('multiple model with llamacpp backend should return InferenceType.LLAMA_CPP', () => {\n    expect(\n      getInferenceType([\n        {\n          backend: 'llama-cpp',\n        },\n        {\n          backend: 'llama-cpp',\n        },\n      ] as unknown as ModelInfo[]),\n    ).toBe(InferenceType.LLAMA_CPP);\n  });\n\n  test('multiple model with different backend should return InferenceType.None', () => {\n    expect(\n      getInferenceType([\n        {\n          backend: 'llama-cpp',\n        },\n        {\n          backend: 'whisper-cpp',\n        },\n      ] as unknown as ModelInfo[]),\n    ).toBe(InferenceType.NONE);\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/utils/inferenceUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport {\n  containerEngine,\n  type ContainerProviderConnection,\n  type ImageInfo,\n  type ListImagesOptions,\n  type PullEvent,\n} from '@podman-desktop/api';\nimport type { CreationInferenceServerOptions, InferenceServerConfig } from '@shared/models/InferenceServerConfig';\nimport { getFreeRandomPort } from './ports';\nimport { type InferenceServer, InferenceType } from '@shared/models/IInference';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\n\nexport const LABEL_INFERENCE_SERVER: string = 'ai-lab-inference-server';\n\n/**\n * Given an image name, it will return the ImageInspectInfo corresponding. Will raise an error if not found.\n * @param connection\n * @param image\n * @param callback\n */\nexport async function getImageInfo(\n  connection: ContainerProviderConnection,\n  image: string,\n  callback: (event: PullEvent) => void,\n): Promise<ImageInfo> {\n  let imageInfo: ImageInfo | undefined;\n  // Get image inspect\n  imageInfo = (\n    await containerEngine.listImages({\n      provider: connection,\n    } as ListImagesOptions)\n  ).find(imageInfo => imageInfo.RepoTags?.some(tag => tag === image));\n  if (!imageInfo) {\n    try {\n      // Pull image\n      await containerEngine.pullImage(connection, image, callback);\n      // Get image inspect\n      imageInfo = (\n        await containerEngine.listImages({\n          provider: connection,\n        } as ListImagesOptions)\n      ).find(imageInfo => imageInfo.RepoTags?.some(tag => tag === image));\n    } catch (err: unknown) {\n      console.warn('Something went wrong while trying to get image inspect', err);\n      throw err;\n    }\n  }\n\n  if (imageInfo === undefined) throw new Error(`image ${image} not found.`);\n\n  return imageInfo;\n}\n\nexport async function withDefaultConfiguration(\n  options: CreationInferenceServerOptions,\n): Promise<InferenceServerConfig> {\n  if (options.modelsInfo.length === 0) throw new Error('modelsInfo need to contain at least one element.');\n\n  return {\n    port: options.port ?? (await getFreeRandomPort('0.0.0.0')),\n    image: options.image,\n    labels: options.labels ?? {},\n    modelsInfo: options.modelsInfo,\n    connection: options.connection,\n    inferenceProvider: options.inferenceProvider,\n    gpuLayers: options.gpuLayers ?? 999,\n  };\n}\n\nexport function isTransitioning(server: InferenceServer): boolean {\n  switch (server.status) {\n    case 'deleting':\n    case 'stopping':\n    case 'starting':\n      return true;\n    default:\n      break;\n  }\n\n  return false;\n}\n\n/**\n * Given a primitive (string) return the InferenceType enum\n * @param value\n */\nexport function parseInferenceType(value: string | undefined): InferenceType {\n  if (!value) return InferenceType.NONE;\n  return (Object.values(InferenceType) as unknown as string[]).includes(value)\n    ? (value as unknown as InferenceType)\n    : InferenceType.NONE;\n}\n\n/**\n * Let's collect the backend required by the provided models\n * we only support one backend for all the models, if multiple are provided, NONE will be return\n */\nexport function getInferenceType(modelsInfo: ModelInfo[]): InferenceType {\n  const backends: InferenceType[] = modelsInfo.map(info => parseInferenceType(info.backend));\n  if (new Set(backends).size !== 1) return InferenceType.NONE;\n\n  return backends[0];\n}\n"
  },
  {
    "path": "packages/backend/src/utils/mcpUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { experimental_createMCPClient as createMCPClient } from '@ai-sdk/mcp';\nimport { Experimental_StdioMCPTransport as StdioClientTransport } from '@ai-sdk/mcp/mcp-stdio';\nimport { type McpClient, type McpServer, McpServerType } from '@shared/models/McpSettings';\n\nexport async function toMcpClients(...mcpServers: McpServer[]): Promise<McpClient[]> {\n  const clients: McpClient[] = [];\n  for (const server of mcpServers) {\n    switch (server.type) {\n      case McpServerType.SSE:\n        clients.push(\n          await createMCPClient({\n            name: server.name,\n            transport: {\n              type: 'sse',\n              url: server.url,\n              headers: server.headers,\n            },\n          }),\n        );\n        break;\n      case McpServerType.STDIO:\n        clients.push(\n          await createMCPClient({\n            name: server.name,\n            transport: new StdioClientTransport({\n              command: server.command,\n              args: server.args,\n            }),\n          }),\n        );\n        break;\n    }\n  }\n  return clients;\n}\n"
  },
  {
    "path": "packages/backend/src/utils/modelsUtils.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport { process as apiProcess } from '@podman-desktop/api';\nimport {\n  deleteRemoteModel,\n  getLocalModelFile,\n  getMountPath,\n  getRemoteModelFile,\n  isModelUploaded,\n  MACHINE_BASE_FOLDER,\n} from './modelsUtils';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { getPodmanCli } from './podman';\nimport { join, posix } from 'node:path';\n\nvi.mock('@podman-desktop/api', () => {\n  return {\n    process: {\n      exec: vi.fn(),\n    },\n  };\n});\n\nvi.mock('./podman', () => ({\n  getPodmanCli: vi.fn(),\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(getPodmanCli).mockReturnValue('dummyPodmanCli');\n});\n\ndescribe('getLocalModelFile', () => {\n  test('file in ModelInfo undefined', () => {\n    expect(() => {\n      getLocalModelFile({\n        id: 'dummyModelId',\n        file: undefined,\n      } as unknown as ModelInfo);\n    }).toThrowError('model is not available locally.');\n  });\n\n  test('should join path with respect to system host', () => {\n    const path = getLocalModelFile({\n      id: 'dummyModelId',\n      file: {\n        path: 'dummyPath',\n        file: 'dummy.guff',\n      },\n    } as unknown as ModelInfo);\n\n    if (process.platform === 'win32') {\n      expect(path).toBe('dummyPath\\\\dummy.guff');\n    } else {\n      expect(path).toBe('dummyPath/dummy.guff');\n    }\n  });\n});\n\ndescribe('getMountPath', () => {\n  const DUMMY_MODEL: ModelInfo = {\n    id: 'dummyModelId',\n    file: undefined,\n    properties: {},\n    description: '',\n    name: 'dummy-model',\n  };\n\n  const DOWNLOADED_MODEL: ModelInfo & { file: { path: string; file: string } } = {\n    ...DUMMY_MODEL,\n    file: {\n      path: 'dummyPath',\n      file: 'dummy.guff',\n    },\n  };\n\n  const UPLOADED_MODEL: ModelInfo & { file: { path: string; file: string } } = {\n    ...DUMMY_MODEL,\n    file: {\n      path: MACHINE_BASE_FOLDER,\n      file: 'dummy.guff',\n    },\n  };\n\n  test('file in ModelInfo undefined', () => {\n    expect(() => {\n      getMountPath(DUMMY_MODEL);\n    }).toThrowError('model is not available locally.');\n  });\n\n  test('should join path with respect to system host', () => {\n    const path = getMountPath(DOWNLOADED_MODEL);\n    expect(path).toBe(join(DOWNLOADED_MODEL.file.path, DOWNLOADED_MODEL.file.file));\n  });\n\n  test('uploaded model should use posix for join path', () => {\n    const path = getMountPath(UPLOADED_MODEL);\n    expect(path).toBe(posix.join(MACHINE_BASE_FOLDER, UPLOADED_MODEL.file.file));\n  });\n});\n\ndescribe('getRemoteModelFile', () => {\n  test('file in ModelInfo undefined', () => {\n    expect(() => {\n      getRemoteModelFile({\n        id: 'dummyModelId',\n        file: undefined,\n      } as unknown as ModelInfo);\n    }).toThrowError('model is not available locally.');\n  });\n\n  test('should join path using posix', () => {\n    const path = getRemoteModelFile({\n      id: 'dummyModelId',\n      file: {\n        path: 'dummyPath',\n        file: 'dummy.guff',\n      },\n    } as unknown as ModelInfo);\n\n    expect(path).toBe(posix.join(MACHINE_BASE_FOLDER, 'dummyModelId'));\n  });\n});\n\ndescribe('isModelUploaded', () => {\n  test('execute stat on targeted machine', async () => {\n    expect(\n      await isModelUploaded('dummyMachine', {\n        id: 'dummyModelId',\n        file: {\n          path: 'dummyPath',\n          file: 'dummy.guff',\n        },\n      } as unknown as ModelInfo),\n    ).toBeTruthy();\n\n    expect(getPodmanCli).toHaveBeenCalled();\n    expect(apiProcess.exec).toHaveBeenCalledWith('dummyPodmanCli', [\n      'machine',\n      'ssh',\n      'dummyMachine',\n      'stat',\n      expect.anything(),\n    ]);\n  });\n});\n\ndescribe('deleteRemoteModel', () => {\n  test('execute stat on targeted machine', async () => {\n    await deleteRemoteModel('dummyMachine', {\n      id: 'dummyModelId',\n      file: {\n        path: 'dummyPath',\n        file: 'dummy.guff',\n      },\n    } as unknown as ModelInfo);\n\n    expect(getPodmanCli).toHaveBeenCalled();\n    expect(apiProcess.exec).toHaveBeenCalledWith('dummyPodmanCli', [\n      'machine',\n      'ssh',\n      'dummyMachine',\n      'rm',\n      '-f',\n      expect.anything(),\n    ]);\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/utils/modelsUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { basename, dirname, join, posix } from 'node:path';\nimport { getPodmanCli } from './podman';\nimport { process } from '@podman-desktop/api';\nimport { escapeSpaces } from './pathUtils';\n\nexport const MACHINE_BASE_FOLDER = '/home/user/ai-lab/models';\n\n/**\n * Given a model info object return the path where is it located locally\n * @param modelInfo\n */\nexport function getLocalModelFile(modelInfo: ModelInfo): string {\n  if (modelInfo.file === undefined) throw new Error('model is not available locally.');\n  return join(modelInfo.file.path, modelInfo.file.file);\n}\n\n/**\n * Return the path to mount where the model is located\n * @param modelInfo\n */\nexport function getMountPath(modelInfo: ModelInfo): string {\n  if (modelInfo.file === undefined) throw new Error('model is not available locally.');\n  // if the model is uploaded we need to use posix join\n  if (modelInfo.file.path.startsWith(MACHINE_BASE_FOLDER)) {\n    return posix.join(modelInfo.file.path, modelInfo.file.file);\n  }\n  return join(modelInfo.file.path, modelInfo.file.file);\n}\n\n/**\n * Given a model info object return the theoretical path where the model\n * should be in the podman machine\n * @param modelInfo\n */\nexport function getRemoteModelFile(modelInfo: ModelInfo): string {\n  if (modelInfo.file === undefined) throw new Error('model is not available locally.');\n\n  return posix.join(MACHINE_BASE_FOLDER, modelInfo.id);\n}\n\nexport interface ModelMountInfo {\n  mount: string;\n  suffix?: string;\n}\n\nexport function getHuggingFaceModelMountInfo(modelInfo: ModelInfo): ModelMountInfo {\n  const localPath = getLocalModelFile(modelInfo);\n  const mountPath = getMountPath(modelInfo);\n  if (mountPath !== localPath) {\n    return {\n      mount: mountPath,\n    };\n  } else {\n    const snapShotsFolder = dirname(localPath);\n    const commitHash = basename(localPath);\n    const modelFolder = dirname(snapShotsFolder);\n    const snapShots = basename(snapShotsFolder);\n    return {\n      mount: modelFolder,\n      suffix: `${snapShots}/${commitHash}`,\n    };\n  }\n}\n\n/**\n * utility method to determine if a model is already uploaded to the podman machine\n * @param machine\n * @param modelInfo\n */\nexport async function isModelUploaded(machine: string, modelInfo: ModelInfo): Promise<boolean> {\n  try {\n    const remotePath = escapeSpaces(getRemoteModelFile(modelInfo));\n    await process.exec(getPodmanCli(), ['machine', 'ssh', machine, 'stat', remotePath]);\n    return true;\n  } catch (err: unknown) {\n    console.error('Something went wrong while trying to stat remote model path', err);\n    return false;\n  }\n}\n\n/**\n * Given a machine and a modelInfo, delete the corresponding file on the podman machine\n * @param machine the machine to target\n * @param modelInfo the model info\n */\nexport async function deleteRemoteModel(machine: string, modelInfo: ModelInfo): Promise<void> {\n  try {\n    const remotePath = getRemoteModelFile(modelInfo);\n    await process.exec(getPodmanCli(), ['machine', 'ssh', machine, 'rm', '-f', remotePath]);\n  } catch (err: unknown) {\n    console.error('Something went wrong while trying to stat remote model path', err);\n  }\n}\n\nexport function getModelPropertiesForEnvironment(modelInfo: ModelInfo): string[] {\n  const envs: string[] = [];\n  if (modelInfo.properties) {\n    envs.push(\n      ...Object.entries(modelInfo.properties).map(([key, value]) => {\n        const formattedKey = key.replace(/[A-Z]/g, m => `_${m}`).toUpperCase();\n        return `MODEL_${formattedKey}=${value}`;\n      }),\n    );\n  }\n  return envs;\n}\n"
  },
  {
    "path": "packages/backend/src/utils/pathUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport path from 'node:path';\n\nexport function getParentDirectory(filePath: string): string {\n  // Normalize the path to handle different platform-specific separators\n  const normalizedPath = path.normalize(filePath);\n\n  // Get the directory name using path.dirname\n  return path.dirname(normalizedPath);\n}\n\nexport function escapeSpaces(path: string): string {\n  return path.replace(/ /g, '\\\\ ');\n}\n"
  },
  {
    "path": "packages/backend/src/utils/podman.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, expect, test, describe, vi } from 'vitest';\nimport * as podmanDesktopApi from '@podman-desktop/api';\nimport * as utils from '../utils/podman';\nimport type { ProviderConnectionStatus } from '@podman-desktop/api';\n\nconst mocks = vi.hoisted(() => {\n  return {\n    getConfigurationMock: vi.fn(),\n    getContainerConnectionsMock: vi.fn(),\n  };\n});\n\nconst config: podmanDesktopApi.Configuration = {\n  get: mocks.getConfigurationMock,\n  has: () => true,\n  update: () => Promise.resolve(),\n};\n\nvi.mock('@podman-desktop/api', () => {\n  return {\n    env: {\n      isWindows: false,\n      isLinux: false,\n    },\n    configuration: {\n      getConfiguration: (): unknown => config,\n    },\n    containerEngine: {\n      info: vi.fn(),\n    },\n    navigation: {\n      navigateToResources: vi.fn(),\n    },\n    provider: {\n      getContainerConnections: mocks.getContainerConnectionsMock,\n    },\n    process: {\n      exec: vi.fn(),\n    },\n  };\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ndescribe('getPodmanCli', () => {\n  test('should return custom binary path if setting is set', () => {\n    mocks.getConfigurationMock.mockReturnValue('binary');\n    const result = utils.getPodmanCli();\n    expect(result).equals('binary');\n  });\n  test('should return exe file if on windows', () => {\n    vi.mocked(podmanDesktopApi.env).isWindows = true;\n    mocks.getConfigurationMock.mockReturnValue(undefined);\n    const result = utils.getPodmanCli();\n    expect(result).equals('podman.exe');\n  });\n  test('should return podman file if not on windows', () => {\n    vi.mocked(podmanDesktopApi.env).isWindows = false;\n    mocks.getConfigurationMock.mockReturnValue(undefined);\n    const result = utils.getPodmanCli();\n    expect(result).equals('podman');\n  });\n});\n\ndescribe('getPodmanConnection', () => {\n  test('throw error if there is no podman connection with name', () => {\n    mocks.getContainerConnectionsMock.mockReturnValue([\n      {\n        connection: {\n          name: 'Podman Machine',\n          status: (): ProviderConnectionStatus => 'started',\n          endpoint: {\n            socketPath: '/endpoint.sock',\n          },\n          type: 'podman',\n        },\n        providerId: 'podman',\n      },\n    ]);\n    expect(() => utils.getPodmanConnection('sample')).toThrowError('no podman connection found with name sample');\n  });\n  test('return connection with specified name', () => {\n    mocks.getContainerConnectionsMock.mockReturnValue([\n      {\n        connection: {\n          name: 'Podman Machine',\n          status: (): ProviderConnectionStatus => 'started',\n          endpoint: {\n            socketPath: '/endpoint.sock',\n          },\n          type: 'podman',\n        },\n        providerId: 'podman',\n      },\n    ]);\n    const engine = utils.getPodmanConnection('Podman Machine');\n    expect(engine).toBeDefined();\n    expect(engine.providerId).equals('podman');\n    expect(engine.connection.name).equals('Podman Machine');\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/utils/podman.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { ContainerProviderConnection, ProviderContainerConnection } from '@podman-desktop/api';\nimport { configuration, env, provider } from '@podman-desktop/api';\n\nexport const MIN_CPUS_VALUE = 4;\n\nexport type MachineJSON = {\n  Name: string;\n  CPUs: number;\n  Memory: string;\n  DiskSize: string;\n  Running: boolean;\n  Starting: boolean;\n  Default: boolean;\n  UserModeNetworking?: boolean;\n  VMType?: string;\n};\n\nexport function getPodmanCli(): string {\n  // If we have a custom binary path regardless if we are running Windows or not\n  const customBinaryPath = getCustomBinaryPath();\n  if (customBinaryPath) {\n    return customBinaryPath;\n  }\n\n  if (env.isWindows) {\n    return 'podman.exe';\n  }\n  return 'podman';\n}\n\n// Get the Podman binary path from configuration podman.binary.path\n// return string or undefined\nexport function getCustomBinaryPath(): string | undefined {\n  return configuration.getConfiguration('podman').get('binary.path');\n}\n\n/**\n * In the ${link ContainerProviderConnection.name} property the name is not usage, and we need to transform it\n * @param connection\n */\nexport function getPodmanMachineName(connection: ContainerProviderConnection): string {\n  const runningConnectionName = connection.name;\n  if (runningConnectionName.startsWith('Podman Machine')) {\n    const machineName = runningConnectionName.replace(/Podman Machine\\s*/, 'podman-machine-');\n    if (machineName.endsWith('-')) {\n      return `${machineName}default`;\n    }\n    return machineName;\n  } else {\n    return runningConnectionName;\n  }\n}\n\n/**\n * @deprecated uses {@link PodmanConnection.getContainerProviderConnection}\n */\nexport function getPodmanConnection(connectionName: string): ProviderContainerConnection {\n  const engine = provider\n    .getContainerConnections()\n    .filter(connection => connection.connection.type === 'podman')\n    .find(connection => connection.connection.name === connectionName);\n  if (!engine) {\n    throw new Error(`no podman connection found with name ${connectionName}`);\n  }\n  return engine;\n}\n"
  },
  {
    "path": "packages/backend/src/utils/podsUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { PodHealth } from '@shared/models/IApplicationState';\n\nexport function getPodHealth(infos: (string | undefined)[]): PodHealth {\n  const checked = infos.filter(info => !!info && info !== 'none' && info !== '');\n  if (!checked.length) {\n    return 'none';\n  }\n  if (infos.some(info => info === 'unhealthy')) {\n    return 'unhealthy';\n  }\n  if (infos.some(info => info === 'starting')) {\n    return 'starting';\n  }\n  return 'healthy';\n}\n"
  },
  {
    "path": "packages/backend/src/utils/ports.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport * as net from 'node:net';\n\nexport async function getFreeRandomPort(address: string): Promise<number> {\n  const server = net.createServer();\n  return new Promise((resolve, reject) =>\n    server\n      .on('error', (error: NodeJS.ErrnoException) => reject(error))\n      .on('listening', () => {\n        const addr = server.address();\n        if (typeof addr === 'string') {\n          // this should not happen, as it is only for pipes and unix domain sockets\n          server.close(() => reject(new Error('error getting allocated port')));\n        } else if (addr) {\n          // not sure what the call to close will do on the addr value\n          // => the port value is saved before to call close\n          const allocatedPort = addr.port;\n          server.close(() => resolve(allocatedPort));\n        } else {\n          reject(new Error('invalid server address'));\n        }\n      })\n      .listen(0, address),\n  );\n}\n\nexport async function getPortsInfo(portDescriptor: string): Promise<string | undefined> {\n  const localPort = await getPort(portDescriptor);\n  if (!localPort) {\n    return undefined;\n  }\n  return `${localPort}`;\n}\n\nasync function getPort(portDescriptor: string): Promise<number | undefined> {\n  let port: number;\n  if (portDescriptor.endsWith('/tcp') || portDescriptor.endsWith('/udp')) {\n    port = parseInt(portDescriptor.substring(0, portDescriptor.length - 4));\n  } else {\n    port = parseInt(portDescriptor);\n  }\n  // invalid port\n  if (isNaN(port)) {\n    return Promise.resolve(undefined);\n  }\n  try {\n    return await getFreeRandomPort('0.0.0.0');\n  } catch (e) {\n    console.error(e);\n    return undefined;\n  }\n}\n\nexport function getPortsFromLabel(labels: { [key: string]: string }, key: string): number[] {\n  if (!(key in labels)) {\n    return [];\n  }\n  const value = labels[key];\n  const portsStr = value.split(',');\n  const result: number[] = [];\n  for (const portStr of portsStr) {\n    const port = parseInt(portStr, 10);\n    if (isNaN(port)) {\n      // malformed label, just ignore it\n      return [];\n    }\n    result.push(port);\n  }\n  return result;\n}\n"
  },
  {
    "path": "packages/backend/src/utils/randomUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport const getRandomString = (): string => {\n  // eslint-disable-next-line sonarjs/pseudo-random\n  return (Math.random() + 1).toString(36).substring(7);\n};\n\nexport function getRandomName(prefix: string): string {\n  return `${prefix ?? ''}-${new Date().getTime()}`;\n}\n"
  },
  {
    "path": "packages/backend/src/utils/sha.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { beforeEach, expect, test, vi, describe } from 'vitest';\nimport * as fs from 'node:fs';\nimport { getHash, hasValidSha } from './sha';\nimport { Readable } from 'node:stream';\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ntest('return true if file has same hash of the expected one', () => {\n  vi.mock('node:fs');\n\n  const readable = Readable.from('test');\n\n  vi.spyOn(fs, 'createReadStream').mockImplementation(() => {\n    return readable as fs.ReadStream;\n  });\n\n  // sha of test => 9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n  const isValid = hasValidSha('file', '9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08');\n  expect(isValid).toBeTruthy();\n});\n\ntest('return false if file has different hash of the expected one', () => {\n  vi.mock('node:fs');\n  const readable = Readable.from('test');\n\n  vi.spyOn(fs, 'createReadStream').mockImplementation(() => {\n    return readable as fs.ReadStream;\n  });\n\n  // sha of test => 9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n  const isValid = hasValidSha('file', 'fakeSha');\n  expect(isValid).toBeTruthy();\n});\n\ndescribe('sha512', () => {\n  test('basic string', () => {\n    const result = getHash('hello-world');\n    expect(result).toBe(\n      '6aeefc29122a3962c90ef834f6caad0033bffcd62941b7a6205a695cc39e2767db7778a7ad76d173a083b9e14b210dc0212923f481b285c784ab1fe340d7ff4d',\n    );\n  });\n\n  test('very long string', () => {\n    const result = getHash('x'.repeat(1024));\n    expect(result).toBe(\n      'fa41ec783342d4c23e7b6550f1e96e32a16269e390449e5fdda60f05611ecb08dd56a5b8cde90024b7da934cdb9a9cc8c8a310eb20e25227699bbf6518e23360',\n    );\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/utils/sha.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport crypto from 'node:crypto';\nimport * as fs from 'node:fs';\nimport { promises } from 'node:stream';\n\nexport async function hasValidSha(filePath: string, expectedSha: string): Promise<boolean> {\n  const checkSum = crypto.createHash('sha256');\n  const input = fs.createReadStream(filePath);\n  await promises.pipeline(input, checkSum);\n\n  const actualSha = checkSum.digest('hex');\n  return actualSha === expectedSha;\n}\n\nexport function getHash(content: string): string {\n  return crypto.createHash('sha512').update(content).digest('hex');\n}\n"
  },
  {
    "path": "packages/backend/src/utils/uploader.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test, describe, vi } from 'vitest';\nimport { WSLUploader } from '../workers/uploader/WSLUploader';\nimport * as podmanDesktopApi from '@podman-desktop/api';\nimport { beforeEach } from 'node:test';\nimport { Uploader } from './uploader';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { ContainerProviderConnection } from '@podman-desktop/api';\nimport { VMType } from '@shared/models/IPodman';\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    env: {\n      isWindows: false,\n    },\n    process: {\n      exec: vi.fn(),\n    },\n    EventEmitter: vi.fn().mockImplementation(() => {\n      return {\n        fire: vi.fn(),\n      };\n    }),\n  };\n});\n\nconst connectionMock: ContainerProviderConnection = {\n  name: 'machine2',\n  type: 'podman',\n  status: () => 'started',\n  vmType: VMType.WSL,\n  endpoint: {\n    socketPath: 'socket.sock',\n  },\n};\n\nconst uploader = new Uploader(connectionMock, {\n  id: 'dummyModelId',\n  file: {\n    file: 'dummyFile.guff',\n    path: 'localpath',\n  },\n} as unknown as ModelInfo);\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ndescribe('perform', () => {\n  test('should return localModelPath if no workers for current system', async () => {\n    vi.mocked(podmanDesktopApi.env).isWindows = false;\n    const result = await uploader.perform('id');\n    expect(result.startsWith('localpath')).toBeTruthy();\n  });\n  test('should return remote path if there is a worker for current system', async () => {\n    vi.spyOn(WSLUploader.prototype, 'perform').mockResolvedValue('remote');\n    vi.mocked(podmanDesktopApi.env).isWindows = true;\n    const result = await uploader.perform('id');\n    expect(result).toBe('remote');\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/utils/uploader.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { EventEmitter, type Event, type ContainerProviderConnection } from '@podman-desktop/api';\nimport { WSLUploader } from '../workers/uploader/WSLUploader';\nimport { getDurationSecondsSince } from './utils';\nimport type { CompletionEvent, BaseEvent } from '../models/baseEvent';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { getLocalModelFile } from './modelsUtils';\nimport type { IWorker } from '../workers/IWorker';\nimport type { UploaderOptions } from '../workers/uploader/UploaderOptions';\n\nexport class Uploader {\n  readonly #_onEvent = new EventEmitter<BaseEvent>();\n  readonly onEvent: Event<BaseEvent> = this.#_onEvent.event;\n  readonly #workers: IWorker<UploaderOptions, string>[] = [];\n\n  constructor(\n    private connection: ContainerProviderConnection,\n    private modelInfo: ModelInfo,\n    private abortSignal?: AbortSignal,\n  ) {\n    this.#workers = [new WSLUploader()];\n  }\n\n  /**\n   * Performing the upload action\n   * @param id tracking id\n   *\n   * @return the path to model after the operation (either on the podman machine or local if not compatible)\n   */\n  async perform(id: string): Promise<string> {\n    // Find the uploader for the current operating system\n    const worker: IWorker<UploaderOptions, string> | undefined = this.#workers.find(w => w.enabled());\n\n    // If none are found, we return the current path\n    if (worker === undefined) {\n      console.warn('There is no workers compatible. Using default local mounting');\n      this.#_onEvent.fire({\n        id,\n        status: 'completed',\n        message: `Use local model`,\n      } as CompletionEvent);\n\n      return getLocalModelFile(this.modelInfo);\n    }\n\n    try {\n      // measure performance\n      const startTime = performance.now();\n      // get new path\n      const remotePath = await worker.perform({\n        connection: this.connection,\n        model: this.modelInfo,\n      });\n      // compute full time\n      const durationSeconds = getDurationSecondsSince(startTime);\n      // fire events\n      this.#_onEvent.fire({\n        id,\n        status: 'completed',\n        message: `Duration ${durationSeconds}s.`,\n        duration: durationSeconds,\n      } as CompletionEvent);\n\n      // return the new path on the podman machine\n      return remotePath;\n    } catch (err) {\n      if (!this.abortSignal?.aborted) {\n        this.#_onEvent.fire({\n          id,\n          status: 'error',\n          message: `Something went wrong: ${String(err)}.`,\n        });\n      } else {\n        this.#_onEvent.fire({\n          id,\n          status: 'canceled',\n          message: `Request cancelled: ${String(err)}.`,\n        });\n      }\n      throw new Error(`Unable to upload model. Error: ${String(err)}`);\n    }\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/utils/urldownloader.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { vi, test, expect, beforeEach } from 'vitest';\nimport { EventEmitter } from '@podman-desktop/api';\nimport { createWriteStream, existsSync, type WriteStream } from 'node:fs';\nimport { rename, rm } from 'node:fs/promises';\nimport https, { type RequestOptions } from 'node:https';\nimport type { ClientRequest, IncomingMessage } from 'node:http';\nimport { URLDownloader } from './urldownloader';\n\nvi.mock('@podman-desktop/api', () => {\n  return {\n    EventEmitter: vi.fn(),\n  };\n});\n\nvi.mock('node:https', () => {\n  return {\n    default: {\n      get: vi.fn(),\n    },\n  };\n});\n\nvi.mock('node:fs', () => {\n  return {\n    createWriteStream: vi.fn(),\n    existsSync: vi.fn(),\n  };\n});\n\nvi.mock('node:fs/promises', () => {\n  return {\n    rename: vi.fn(),\n    rm: vi.fn(),\n  };\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  const listeners: ((value: unknown) => void)[] = [];\n\n  vi.mocked(EventEmitter).mockReturnValue({\n    event: vi.fn().mockImplementation(callback => {\n      listeners.push(callback);\n    }),\n    fire: vi.fn().mockImplementation((content: unknown) => {\n      listeners.forEach(listener => listener(content));\n    }),\n  } as unknown as EventEmitter<unknown>);\n\n  vi.mocked(rm).mockResolvedValue(undefined);\n  vi.mocked(rename).mockResolvedValue(undefined);\n});\n\ntest('Downloader constructor', async () => {\n  const downloader = new URLDownloader('dummyUrl', 'dummyTarget');\n  expect(downloader.getTarget()).toBe('dummyTarget');\n});\n\ntest('perform download failed', async () => {\n  const downloader = new URLDownloader('dummyUrl', 'dummyTarget');\n\n  let onResponse: ((msg: IncomingMessage) => void) | undefined;\n  vi.mocked(\n    https.get as (url: string | URL, options: RequestOptions, callback: (_: IncomingMessage) => void) => ClientRequest,\n  ).mockImplementation((_url, _options, callback) => {\n    onResponse = callback;\n    return {} as unknown as ClientRequest;\n  });\n\n  const closeMock = vi.fn();\n  const onMock = vi.fn();\n  vi.mocked(createWriteStream).mockReturnValue({\n    close: closeMock,\n    on: onMock,\n  } as unknown as WriteStream);\n  vi.mocked(existsSync).mockReturnValue(true);\n\n  onMock.mockImplementation((event: string, callback: (err: Error) => void) => {\n    if (event === 'error') {\n      callback(new Error('dummyError'));\n    }\n  });\n  // capture downloader event(s)\n  const listenerMock = vi.fn();\n  downloader.onEvent(listenerMock);\n\n  const rejectSpy = vi.fn();\n\n  // perform download logic (do not wait)\n  downloader.perform('followUpId').catch((e: unknown) => rejectSpy(e));\n\n  // wait for listener to be registered\n  await vi.waitFor(() => {\n    expect(onResponse).toBeDefined();\n  });\n\n  if (onResponse === undefined) throw new Error('onResponse undefined');\n\n  onResponse({\n    pipe: vi.fn(),\n    on: vi.fn(),\n    headers: { location: undefined },\n  } as unknown as IncomingMessage);\n\n  await vi.waitFor(() => {\n    expect(downloader.completed).toBeTruthy();\n  });\n\n  expect(listenerMock).toHaveBeenCalledWith({\n    id: 'followUpId',\n    message: 'Something went wrong: dummyError.',\n    status: 'error',\n  });\n  expect(rm).toHaveBeenCalledWith('dummyTarget.tmp');\n\n  expect(rejectSpy).toHaveBeenCalledWith('dummyError');\n});\n\ntest('perform download successfully', async () => {\n  const downloader = new URLDownloader('dummyUrl', 'dummyTarget');\n  let onResponse: ((msg: IncomingMessage) => void) | undefined;\n  vi.mocked(\n    https.get as (url: string | URL, options: RequestOptions, callback: (_: IncomingMessage) => void) => ClientRequest,\n  ).mockImplementation((_url, _options, callback) => {\n    onResponse = callback;\n    return {} as unknown as ClientRequest;\n  });\n\n  const closeMock = vi.fn();\n  const onMock = vi.fn();\n  vi.mocked(createWriteStream).mockReturnValue({\n    close: closeMock,\n    on: onMock,\n  } as unknown as WriteStream);\n  vi.mocked(existsSync).mockReturnValue(true);\n\n  onMock.mockImplementation((event: string, callback: () => void) => {\n    if (event === 'finish') {\n      callback();\n    }\n  });\n\n  // capture downloader event(s)\n  const listenerMock = vi.fn();\n  downloader.onEvent(listenerMock);\n\n  // perform download logic\n  downloader.perform('followUpId').catch((err: unknown) => console.error(err));\n\n  // wait for listener to be registered\n  await vi.waitFor(() => {\n    expect(onResponse).toBeDefined();\n  });\n\n  if (onResponse === undefined) throw new Error('onResponse undefined');\n\n  onResponse({\n    pipe: vi.fn(),\n    on: vi.fn(),\n    headers: { location: undefined },\n  } as unknown as IncomingMessage);\n\n  await vi.waitFor(() => {\n    expect(downloader.completed).toBeTruthy();\n  });\n\n  expect(rename).toHaveBeenCalledWith('dummyTarget.tmp', 'dummyTarget');\n  expect(downloader.completed).toBeTruthy();\n  expect(listenerMock).toHaveBeenCalledWith({\n    id: 'followUpId',\n    duration: expect.anything(),\n    message: expect.anything(),\n    status: 'completed',\n  });\n  expect(rm).not.toHaveBeenCalled();\n});\n\nclass DownloaderTest extends URLDownloader {\n  public override getRedirect(url: string, location: string): string {\n    return super.getRedirect(url, location);\n  }\n}\n\nconst SITE_EXAMPLE = 'https://example.com/hello';\nconst SITE_DUMMY = 'https://dummy.com/world';\n\ntest('redirect should use location if parsable', () => {\n  const downloader = new DownloaderTest(SITE_EXAMPLE, '/home/file.guff');\n  const result = downloader.getRedirect(SITE_EXAMPLE, SITE_DUMMY);\n  expect(result).toBe(SITE_DUMMY);\n});\n\ntest('redirect should concat base url and location if not parsable', () => {\n  const downloader = new DownloaderTest(SITE_EXAMPLE, '/home/file.guff');\n  const result = downloader.getRedirect(SITE_EXAMPLE, '/world');\n  expect(result).toBe('https://example.com/world');\n});\n"
  },
  {
    "path": "packages/backend/src/utils/urldownloader.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { getDurationSecondsSince } from './utils';\nimport { createWriteStream, existsSync } from 'node:fs';\nimport { mkdir, rename, rm } from 'node:fs/promises';\nimport { dirname } from 'node:path';\nimport crypto from 'node:crypto';\nimport https from 'node:https';\nimport type { CompletionEvent, ProgressEvent } from '../models/baseEvent';\nimport { Downloader } from './downloader';\n\nexport class URLDownloader extends Downloader {\n  private requestedIdentifier: string | undefined;\n\n  constructor(\n    url: string,\n    target: string,\n    private sha256?: string,\n    private abortSignal?: AbortSignal,\n  ) {\n    super(url, target);\n  }\n\n  async perform(id: string): Promise<void> {\n    //ensure parent folder exists\n    if (!existsSync(dirname(this.target))) {\n      await mkdir(dirname(this.target), { recursive: true });\n    }\n    this.requestedIdentifier = id;\n    const startTime = performance.now();\n\n    try {\n      await this.download(this.url);\n      const durationSeconds = getDurationSecondsSince(startTime);\n      this._onEvent.fire({\n        id: this.requestedIdentifier,\n        status: 'completed',\n        message: `Duration ${durationSeconds}s.`,\n        duration: durationSeconds,\n      } as CompletionEvent);\n    } catch (err: unknown) {\n      if (!this.abortSignal?.aborted) {\n        this._onEvent.fire({\n          id: this.requestedIdentifier,\n          status: 'error',\n          message: `Something went wrong: ${String(err)}.`,\n        });\n      } else {\n        this._onEvent.fire({\n          id: this.requestedIdentifier,\n          status: 'canceled',\n          message: `Request cancelled: ${String(err)}.`,\n        });\n      }\n      throw err;\n    } finally {\n      this.completed = true;\n    }\n  }\n\n  private download(url: string): Promise<void> {\n    return new Promise((resolve, reject) => {\n      const callback = (result: { ok?: boolean; error?: string }): void => {\n        if (result.ok) {\n          resolve();\n        } else {\n          reject(result.error);\n        }\n      };\n      this.followRedirects(url, callback);\n    });\n  }\n\n  /**\n   * This file takes as argument a location, either a full url or a path\n   * if a path is provided, the url will be used as origin.\n   * @param url\n   * @param location\n   * @protected\n   */\n  protected getRedirect(url: string, location: string): string {\n    if (URL.canParse(location)) return location;\n\n    const origin = new URL(url).origin;\n    if (URL.canParse(location, origin)) return new URL(location, origin).href;\n\n    return location;\n  }\n\n  private followRedirects(url: string, callback: (message: { ok?: boolean; error?: string }) => void): void {\n    const tmpFile = `${this.target}.tmp`;\n\n    let totalFileSize = 0;\n    let progress = 0;\n    let previousProgressValue = -1;\n    let checkSum: crypto.Hash;\n    if (this.sha256) {\n      checkSum = crypto.createHash('sha256');\n    }\n\n    https.get(url, { signal: this.abortSignal }, resp => {\n      // Determine the total size\n      if (resp.headers.location) {\n        const redirect = this.getRedirect(url, resp.headers.location);\n        this.followRedirects(redirect, callback);\n        return;\n      }\n\n      if (totalFileSize === 0 && resp.headers['content-length']) {\n        totalFileSize = parseFloat(resp.headers['content-length']);\n      }\n\n      const stream = createWriteStream(tmpFile, {\n        signal: this.abortSignal,\n      });\n\n      // Capture potential errors\n      resp.on('error', (err: Error) => {\n        stream.destroy(err); // propagate to stream\n      });\n\n      // On data\n      resp.on('data', chunk => {\n        if (checkSum) {\n          checkSum.update(chunk);\n        }\n        progress += chunk.length;\n        const progressValue = (progress * 100) / totalFileSize;\n\n        // Only fire events for progress greater than 1\n        if (progressValue === 100 || progressValue - previousProgressValue > 1) {\n          previousProgressValue = progressValue;\n          this._onEvent.fire({\n            id: this.requestedIdentifier,\n            status: 'progress',\n            value: progressValue,\n            total: totalFileSize,\n          } as ProgressEvent);\n        }\n      });\n      // Pipe to stream\n      resp.pipe(stream);\n\n      // Handle error case\n      stream.on('error', (err: Error) => {\n        rm(tmpFile)\n          .then(() => {\n            callback({\n              error: err.message,\n            });\n          })\n          .catch((err: unknown) => {\n            console.error(`Something went wrong while trying to delete ${tmpFile}`, err);\n          });\n      });\n\n      // On close event\n      stream.on('finish', () => {\n        // check if _parent_ is errored\n        if (resp.errored) {\n          return;\n        }\n\n        if (checkSum) {\n          const actualSha = checkSum.digest('hex');\n          if (this.sha256 !== actualSha) {\n            callback({\n              error: `The file's security hash (SHA-256) does not match the expected value. The file may have been altered or corrupted during the download process`,\n            });\n            rm(tmpFile).catch((err: unknown) => {\n              console.error(`Something went wrong while trying to delete ${tmpFile}`, err);\n            });\n            return;\n          }\n        }\n\n        // If everything is fine we simply rename the tmp file to the expected one\n        rename(tmpFile, this.target)\n          .then(() => {\n            callback({ ok: true });\n          })\n          .catch((err: unknown) => {\n            callback({ error: `Something went wrong while trying to rename downloaded file: ${String(err)}.` });\n          });\n      });\n    });\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/utils/utils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport * as http from 'node:http';\n\nexport async function timeout(time: number): Promise<void> {\n  return new Promise<void>(resolve => {\n    setTimeout(resolve, time);\n  });\n}\n\nexport async function isEndpointAlive(endPoint: string): Promise<boolean> {\n  return new Promise<boolean>(resolve => {\n    const req = http.get(endPoint, res => {\n      res.on('data', () => {\n        // do nothing\n      });\n\n      res.on('end', () => {\n        console.log(res);\n        if (res.statusCode === 200) {\n          resolve(true);\n        } else {\n          resolve(false);\n        }\n      });\n    });\n    req.once('error', err => {\n      console.log('Error while pinging endpoint', err);\n      resolve(false);\n    });\n  });\n}\n\nexport function getDurationSecondsSince(startTimeMs: number): number {\n  return Math.round((performance.now() - startTimeMs) / 1000);\n}\n\nexport const DISABLE_SELINUX_LABEL_SECURITY_OPTION = 'label=disable';\n"
  },
  {
    "path": "packages/backend/src/webviewUtils.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { initWebview } from './webviewUtils';\nimport type { Uri } from '@podman-desktop/api';\nimport { type PathLike, promises } from 'node:fs';\n\nvi.mock('@podman-desktop/api', async () => {\n  return {\n    Uri: class {\n      static readonly joinPath = (): unknown => ({ fsPath: '.' });\n    },\n    window: {\n      createWebviewPanel: (): unknown => ({\n        webview: {\n          html: '',\n          onDidReceiveMessage: vi.fn(),\n          postMessage: vi.fn(),\n          asWebviewUri: () => 'dummy-src',\n        },\n        onDidChangeViewState: vi.fn(),\n      }),\n    },\n  };\n});\n\nvi.mock('node:fs', () => ({\n  promises: {\n    readFile: vi.fn(),\n  },\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ntest('panel should have file content as html', async () => {\n  vi.mocked(promises.readFile as (path: PathLike) => Promise<string>).mockImplementation(() => {\n    return Promise.resolve('<html></html>');\n  });\n\n  const panel = await initWebview({} as unknown as Uri);\n  expect(panel.webview.html).toBe('<html></html>');\n});\n\ntest('script src should be replaced with asWebviewUri result', async () => {\n  vi.mocked(promises.readFile as (path: PathLike) => Promise<string>).mockImplementation(() => {\n    return Promise.resolve('<script type=\"module\" crossorigin src=\"./index-RKnfBG18.js\"></script>');\n  });\n\n  const panel = await initWebview({} as unknown as Uri);\n  expect(panel.webview.html).toBe('<script type=\"module\" crossorigin src=\"dummy-src\"></script>');\n});\n\ntest('links src should be replaced with asWebviewUri result', async () => {\n  vi.mocked(promises.readFile as (path: PathLike) => Promise<string>).mockImplementation(() => {\n    return Promise.resolve('<link rel=\"stylesheet\" href=\"./styles.css\">');\n  });\n\n  const panel = await initWebview({} as unknown as Uri);\n  expect(panel.webview.html).toBe('<link rel=\"stylesheet\" href=\"dummy-src\">');\n});\n"
  },
  {
    "path": "packages/backend/src/webviewUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { Uri, type WebviewOptions, type WebviewPanel, window } from '@podman-desktop/api';\nimport { promises } from 'node:fs';\n\nfunction getWebviewOptions(extensionUri: Uri): WebviewOptions {\n  return {\n    // Enable javascript in the webview\n    // enableScripts: true,\n\n    // And restrict the webview to only loading content from our extension's `media` directory.\n    localResourceRoots: [Uri.joinPath(extensionUri, 'media')],\n  };\n}\n\nexport async function initWebview(extensionUri: Uri): Promise<WebviewPanel> {\n  // register webview\n  const panel = window.createWebviewPanel('studio', 'AI Lab', getWebviewOptions(extensionUri));\n\n  // update html\n  const indexHtmlUri = Uri.joinPath(extensionUri, 'media', 'index.html');\n  const indexHtmlPath = indexHtmlUri.fsPath;\n\n  let indexHtml = await promises.readFile(indexHtmlPath, 'utf8');\n\n  // replace links with webView Uri links\n  // in the content <script type=\"module\" crossorigin src=\"./index-RKnfBG18.js\"></script> replace src with webview.asWebviewUri\n  // eslint-disable-next-line sonarjs/slow-regex\n  const scriptLink = indexHtml.match(/<script.*?src=\"(.*?)\".*?>/g);\n  if (scriptLink) {\n    scriptLink.forEach(link => {\n      const src = RegExp(/src=\"(.*?)\"/).exec(link);\n      if (src) {\n        const webviewSrc = panel.webview.asWebviewUri(Uri.joinPath(extensionUri, 'media', src[1]));\n        if (!webviewSrc) throw new Error('undefined webviewSrc');\n        indexHtml = indexHtml.replace(src[1], webviewSrc.toString());\n      }\n    });\n  }\n\n  // and now replace for css file as well\n  // eslint-disable-next-line sonarjs/slow-regex\n  const cssLink = indexHtml.match(/<link.*?href=\"(.*?)\".*?>/g);\n  if (cssLink) {\n    cssLink.forEach(link => {\n      const href = RegExp(/href=\"(.*?)\"/).exec(link);\n      if (href) {\n        const webviewHref = panel.webview.asWebviewUri(Uri.joinPath(extensionUri, 'media', href[1]));\n        if (!webviewHref)\n          throw new Error('Something went wrong while replacing links with webView Uri links: undefined webviewHref');\n        indexHtml = indexHtml.replace(href[1], webviewHref.toString());\n      }\n    });\n  }\n\n  panel.webview.html = indexHtml;\n\n  return panel;\n}\n"
  },
  {
    "path": "packages/backend/src/workers/IWorker.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport interface IWorker<T, R> {\n  enabled(): boolean;\n  perform(args: T): Promise<R>;\n}\n"
  },
  {
    "path": "packages/backend/src/workers/WindowsWorker.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { env } from '@podman-desktop/api';\nimport type { IWorker } from './IWorker';\n\nexport abstract class WindowsWorker<T, R> implements IWorker<T, R> {\n  enabled(): boolean {\n    return env.isWindows;\n  }\n\n  abstract perform(content: T): Promise<R>;\n}\n"
  },
  {
    "path": "packages/backend/src/workers/provider/InferenceProvider.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport { type BetterContainerCreateResult, InferenceProvider } from './InferenceProvider';\nimport type { InferenceServerConfig } from '@shared/models/InferenceServerConfig';\nimport type { ContainerCreateOptions, ContainerProviderConnection, ImageInfo } from '@podman-desktop/api';\nimport { containerEngine } from '@podman-desktop/api';\nimport { getImageInfo } from '../../utils/inferenceUtils';\nimport type { TaskState } from '@shared/models/ITask';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport { InferenceType } from '@shared/models/IInference';\n\nvi.mock('../../utils/inferenceUtils', () => ({\n  getImageInfo: vi.fn(),\n  LABEL_INFERENCE_SERVER: 'ai-lab-inference-server',\n}));\n\nvi.mock('@podman-desktop/api', () => ({\n  containerEngine: {\n    createContainer: vi.fn(),\n  },\n}));\n\nconst DummyImageInfo: ImageInfo = {\n  Id: 'dummy-image-id',\n  engineId: 'dummy-engine-id',\n} as unknown as ImageInfo;\n\nconst taskRegistry: TaskRegistry = {\n  createTask: vi.fn(),\n  updateTask: vi.fn(),\n} as unknown as TaskRegistry;\n\nconst connectionMock: ContainerProviderConnection = {\n  name: 'Dummy Connection',\n  type: 'podman',\n} as unknown as ContainerProviderConnection;\n\nclass TestInferenceProvider extends InferenceProvider {\n  constructor() {\n    super(taskRegistry, InferenceType.NONE, 'test-inference-provider');\n  }\n\n  enabled(): boolean {\n    throw new Error('not implemented');\n  }\n\n  publicPullImage(\n    connection: ContainerProviderConnection,\n    image: string,\n    labels: { [id: string]: string },\n  ): Promise<ImageInfo> {\n    return super.pullImage(connection, image, labels);\n  }\n\n  async publicCreateContainer(\n    engineId: string,\n    containerCreateOptions: ContainerCreateOptions,\n    labels: { [id: string]: string } = {},\n  ): Promise<BetterContainerCreateResult> {\n    const result = await this.createContainer(engineId, containerCreateOptions, labels);\n    return {\n      id: result.id,\n      engineId: engineId,\n    };\n  }\n\n  async perform(_config: InferenceServerConfig): Promise<InferenceServer> {\n    throw new Error('not implemented');\n  }\n  dispose(): void {}\n}\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(getImageInfo).mockResolvedValue(DummyImageInfo);\n  vi.mocked(taskRegistry.createTask).mockImplementation(\n    (name: string, state: TaskState, labels: { [id: string]: string } = {}) => ({\n      id: 'dummy-task-id',\n      name: name,\n      state: state,\n      labels: labels,\n    }),\n  );\n  vi.mocked(containerEngine.createContainer).mockResolvedValue({\n    id: 'dummy-container-id',\n    engineId: 'dummy-engine-id',\n  });\n});\n\ndescribe('pullImage', () => {\n  test('should create a task and mark as success on completion', async () => {\n    const provider = new TestInferenceProvider();\n    await provider.publicPullImage(connectionMock, 'dummy-image', {\n      key: 'value',\n    });\n\n    expect(taskRegistry.createTask).toHaveBeenCalledWith('Pulling dummy-image.', 'loading', {\n      key: 'value',\n    });\n\n    expect(taskRegistry.updateTask).toHaveBeenCalledWith({\n      id: 'dummy-task-id',\n      name: 'Pulling dummy-image.',\n      labels: {\n        key: 'value',\n      },\n      state: 'success',\n    });\n  });\n\n  test('should mark the task as error when pulling failed', async () => {\n    const provider = new TestInferenceProvider();\n    vi.mocked(getImageInfo).mockRejectedValue(new Error('dummy test error'));\n\n    await expect(\n      provider.publicPullImage(connectionMock, 'dummy-image', {\n        key: 'value',\n      }),\n    ).rejects.toThrowError('dummy test error');\n\n    expect(taskRegistry.updateTask).toHaveBeenCalledWith({\n      id: 'dummy-task-id',\n      name: 'Pulling dummy-image.',\n      labels: {\n        key: 'value',\n      },\n      state: 'error',\n      error: 'Something went wrong while pulling dummy-image: Error: dummy test error',\n    });\n  });\n});\n\ndescribe('createContainer', () => {\n  test('should create a task and mark as success on completion', async () => {\n    const provider = new TestInferenceProvider();\n    await provider.publicCreateContainer(\n      'dummy-engine-id',\n      {\n        name: 'dummy-container-name',\n      },\n      {\n        key: 'value',\n      },\n    );\n\n    expect(taskRegistry.createTask).toHaveBeenCalledWith('Creating container.', 'loading', {\n      key: 'value',\n    });\n\n    expect(taskRegistry.updateTask).toHaveBeenCalledWith({\n      id: 'dummy-task-id',\n      name: 'Creating container.',\n      labels: {\n        key: 'value',\n      },\n      state: 'success',\n    });\n  });\n\n  test('should mark the task as error when creation failed', async () => {\n    const provider = new TestInferenceProvider();\n    vi.mocked(containerEngine.createContainer).mockRejectedValue(new Error('dummy test error'));\n\n    await expect(\n      provider.publicCreateContainer(\n        'dummy-provider-id',\n        {\n          name: 'dummy-container-name',\n        },\n        {\n          key: 'value',\n        },\n      ),\n    ).rejects.toThrowError('dummy test error');\n\n    expect(taskRegistry.updateTask).toHaveBeenCalledWith({\n      id: 'dummy-task-id',\n      name: 'Creating container.',\n      labels: {\n        key: 'value',\n      },\n      state: 'error',\n      error: 'Something went wrong while creating container: Error: dummy test error',\n    });\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/workers/provider/InferenceProvider.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type {\n  ContainerCreateOptions,\n  ContainerCreateResult,\n  ContainerProviderConnection,\n  Disposable,\n  ImageInfo,\n  PullEvent,\n} from '@podman-desktop/api';\nimport { containerEngine } from '@podman-desktop/api';\nimport type { InferenceServerConfig } from '@shared/models/InferenceServerConfig';\nimport type { IWorker } from '../IWorker';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport { getImageInfo } from '../../utils/inferenceUtils';\nimport type { InferenceServer, InferenceType } from '@shared/models/IInference';\n\nexport type BetterContainerCreateResult = ContainerCreateResult & { engineId: string };\n\nexport abstract class InferenceProvider implements IWorker<InferenceServerConfig, InferenceServer>, Disposable {\n  readonly type: InferenceType;\n  readonly name: string;\n\n  protected constructor(\n    private taskRegistry: TaskRegistry,\n    type: InferenceType,\n    name: string,\n  ) {\n    this.type = type;\n    this.name = name;\n  }\n\n  abstract enabled(): boolean;\n  prePerform(_config: InferenceServerConfig): Promise<void> {\n    return Promise.resolve();\n  }\n  abstract perform(config: InferenceServerConfig): Promise<InferenceServer>;\n  abstract dispose(): void;\n\n  protected async createContainer(\n    engineId: string,\n    containerCreateOptions: ContainerCreateOptions,\n    labels: { [id: string]: string },\n  ): Promise<BetterContainerCreateResult> {\n    const containerTask = this.taskRegistry.createTask(`Creating container.`, 'loading', labels);\n\n    try {\n      const result = await containerEngine.createContainer(engineId, containerCreateOptions);\n      // update the task\n      containerTask.state = 'success';\n      containerTask.progress = undefined;\n      // return the ContainerCreateResult\n      return {\n        id: result.id,\n        engineId: engineId,\n      };\n    } catch (err: unknown) {\n      containerTask.state = 'error';\n      containerTask.progress = undefined;\n      containerTask.error = `Something went wrong while creating container: ${String(err)}`;\n      throw err;\n    } finally {\n      this.taskRegistry.updateTask(containerTask);\n    }\n  }\n\n  /**\n   * This method allows to pull the image, while creating a task for the user to follow progress\n   * @param connection\n   * @param image\n   * @param labels\n   * @protected\n   */\n  protected pullImage(\n    connection: ContainerProviderConnection,\n    image: string,\n    labels: { [id: string]: string },\n  ): Promise<ImageInfo> {\n    // Creating a task to follow pulling progress\n    const pullingTask = this.taskRegistry.createTask(`Pulling ${image}.`, 'loading', labels);\n\n    // get the default image info for this provider\n    return getImageInfo(connection, image, (_event: PullEvent) => {})\n      .catch((err: unknown) => {\n        pullingTask.state = 'error';\n        pullingTask.progress = undefined;\n        pullingTask.error = `Something went wrong while pulling ${image}: ${String(err)}`;\n        throw err;\n      })\n      .then(imageInfo => {\n        pullingTask.state = 'success';\n        pullingTask.progress = undefined;\n        return imageInfo;\n      })\n      .finally(() => {\n        this.taskRegistry.updateTask(pullingTask);\n      });\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/workers/provider/LlamaCppPython.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport { LlamaCppPython, SECOND } from './LlamaCppPython';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { getImageInfo, LABEL_INFERENCE_SERVER } from '../../utils/inferenceUtils';\nimport type { ContainerProviderConnection, ImageInfo } from '@podman-desktop/api';\nimport { containerEngine } from '@podman-desktop/api';\nimport type { GPUManager } from '../../managers/GPUManager';\nimport type { PodmanConnection } from '../../managers/podmanConnection';\nimport { VMType } from '@shared/models/IPodman';\nimport type { ConfigurationRegistry } from '../../registries/ConfigurationRegistry';\nimport { GPUVendor } from '@shared/models/IGPUInfo';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport { InferenceType } from '@shared/models/IInference';\nimport { llamacpp } from '../../assets/inference-images.json';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { join } from 'node:path';\n\nvi.mock('@podman-desktop/api', () => ({\n  containerEngine: {\n    createContainer: vi.fn(),\n  },\n}));\n\nvi.mock('../../utils/inferenceUtils', () => ({\n  getProviderContainerConnection: vi.fn(),\n  getImageInfo: vi.fn(),\n  LABEL_INFERENCE_SERVER: 'ai-lab-inference-server',\n}));\n\nconst taskRegistry: TaskRegistry = {\n  createTask: vi.fn(),\n  updateTask: vi.fn(),\n} as unknown as TaskRegistry;\n\nconst gpuManager: GPUManager = {\n  collectGPUs: vi.fn(),\n} as unknown as GPUManager;\n\nconst DummyModel: ModelInfo = {\n  name: 'dummy model',\n  id: 'dummy-model-id',\n  file: {\n    file: 'dummy-file.guff',\n    path: 'dummy-path',\n  },\n  properties: {},\n  description: 'dummy-desc',\n};\n\nconst dummyConnection: ContainerProviderConnection = {\n  name: 'dummy-provider-connection',\n  type: 'podman',\n  vmType: VMType.WSL,\n  status: () => 'started',\n  endpoint: {\n    socketPath: 'dummy-socket',\n  },\n};\n\nconst DummyImageInfo: ImageInfo = {\n  Id: 'dummy-image-id',\n  engineId: 'dummy-engine-id',\n} as unknown as ImageInfo;\n\nconst podmanConnection: PodmanConnection = {\n  findRunningContainerProviderConnection: vi.fn(),\n  getContainerProviderConnection: vi.fn(),\n} as unknown as PodmanConnection;\n\nconst configurationRegistry: ConfigurationRegistry = {\n  getExtensionConfiguration: vi.fn(),\n} as unknown as ConfigurationRegistry;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n    experimentalGPU: false,\n    modelsPath: 'model-path',\n    apiPort: 10434,\n    inferenceRuntime: 'llama-cpp',\n    experimentalTuning: false,\n    modelUploadDisabled: false,\n    showGPUPromotion: false,\n    appearance: 'dark',\n  });\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue(dummyConnection);\n  vi.mocked(podmanConnection.getContainerProviderConnection).mockReturnValue(dummyConnection);\n  vi.mocked(getImageInfo).mockResolvedValue(DummyImageInfo);\n  vi.mocked(taskRegistry.createTask).mockReturnValue({ id: 'dummy-task-id', name: '', labels: {}, state: 'loading' });\n  vi.mocked(containerEngine.createContainer).mockResolvedValue({\n    id: 'dummy-container-id',\n    engineId: 'dummy-engine-id',\n  });\n});\n\ntest('LlamaCppPython being the default, it should always be enable', () => {\n  const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n  expect(provider.enabled()).toBeTruthy();\n});\n\ndescribe('perform', () => {\n  test('config without image should use defined image', async () => {\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n\n    await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(podmanConnection.findRunningContainerProviderConnection).toHaveBeenCalled();\n    expect(getImageInfo).toHaveBeenCalledWith(dummyConnection, llamacpp.default, expect.anything());\n  });\n\n  test('config without models should throw an error', async () => {\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n\n    await expect(\n      provider.perform({\n        port: 8000,\n        image: undefined,\n        labels: {},\n        modelsInfo: [],\n        connection: undefined,\n      }),\n    ).rejects.toThrowError('Need at least one model info to start an inference server.');\n  });\n\n  test('config model without file should throw an error', async () => {\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n\n    await expect(\n      provider.perform({\n        port: 8000,\n        image: undefined,\n        labels: {},\n        modelsInfo: [\n          {\n            id: 'invalid',\n          } as unknown as ModelInfo,\n        ],\n        connection: undefined,\n      }),\n    ).rejects.toThrowError('The model info file provided is undefined');\n  });\n\n  test('valid config should produce expected CreateContainerOptions', async () => {\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n\n    const server = await provider.perform({\n      port: 8888,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(server).toStrictEqual<InferenceServer>({\n      container: {\n        containerId: 'dummy-container-id',\n        engineId: DummyImageInfo.engineId,\n      },\n      labels: {\n        [LABEL_INFERENCE_SERVER]: `[\"${DummyModel.id}\"]`,\n        api: 'http://localhost:8888/v1',\n        docs: 'http://localhost:10434/api-docs/8888',\n      },\n      models: [DummyModel],\n      status: 'running',\n      type: InferenceType.LLAMA_CPP,\n      connection: {\n        port: 8888,\n      },\n    });\n\n    expect(containerEngine.createContainer).toHaveBeenCalledWith(DummyImageInfo.engineId, {\n      Cmd: [],\n      Detach: true,\n      Env: ['MODEL_PATH=/models/dummy-file.guff', 'HOST=0.0.0.0', 'PORT=8000'],\n      ExposedPorts: {\n        '8888': {},\n      },\n      HealthCheck: {\n        Interval: SECOND * 5,\n        Retries: 20,\n        Test: ['CMD-SHELL', 'curl -sSf localhost:8000 > /dev/null'],\n      },\n      HostConfig: {\n        AutoRemove: false,\n        Mounts: [\n          {\n            Source: join('dummy-path', 'dummy-file.guff'),\n            Target: '/models/dummy-file.guff',\n            Type: 'bind',\n          },\n        ],\n        DeviceRequests: [],\n        Devices: [],\n        PortBindings: {\n          '8000/tcp': [\n            {\n              HostPort: '8888',\n            },\n          ],\n        },\n        SecurityOpt: ['label=disable'],\n      },\n      Image: DummyImageInfo.Id,\n      Labels: {\n        [LABEL_INFERENCE_SERVER]: `[\"${DummyModel.id}\"]`,\n        api: 'http://localhost:8888/v1',\n        docs: 'http://localhost:10434/api-docs/8888',\n      },\n    });\n  });\n\n  test('model properties should be made uppercased', async () => {\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n\n    await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [\n        {\n          ...DummyModel,\n          properties: {\n            basicProp: 'basicProp',\n            lotOfCamelCases: 'lotOfCamelCases',\n            lowercase: 'lowercase',\n            chatFormat: 'dummyChatFormat',\n          },\n        },\n      ],\n      connection: undefined,\n    });\n\n    expect(containerEngine.createContainer).toHaveBeenCalledWith(DummyImageInfo.engineId, {\n      Env: expect.arrayContaining([\n        'MODEL_BASIC_PROP=basicProp',\n        'MODEL_LOT_OF_CAMEL_CASES=lotOfCamelCases',\n        'MODEL_LOWERCASE=lowercase',\n        'MODEL_CHAT_FORMAT=dummyChatFormat',\n      ]),\n      Cmd: expect.anything(),\n      HealthCheck: expect.anything(),\n      HostConfig: expect.anything(),\n      ExposedPorts: expect.anything(),\n      Labels: expect.anything(),\n      Image: DummyImageInfo.Id,\n      Detach: true,\n    });\n  });\n\n  test('gpu experimental should collect GPU data', async () => {\n    vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n      experimentalGPU: true,\n      modelsPath: '',\n      apiPort: 10434,\n      inferenceRuntime: 'llama-cpp',\n      experimentalTuning: false,\n      modelUploadDisabled: false,\n      showGPUPromotion: false,\n      appearance: 'dark',\n    });\n\n    vi.mocked(gpuManager.collectGPUs).mockResolvedValue([\n      {\n        vram: 1024,\n        model: 'nvidia',\n        vendor: GPUVendor.NVIDIA,\n      },\n    ]);\n\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n    const server = await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(containerEngine.createContainer).toHaveBeenCalledWith(\n      DummyImageInfo.engineId,\n      expect.objectContaining({\n        Cmd: [\n          '-c',\n          '/usr/bin/ln -sfn /usr/lib/wsl/lib/* /usr/lib64/ && PATH=\"${PATH}:/usr/lib/wsl/lib/\" && /usr/bin/llama-server.sh',\n        ],\n      }),\n    );\n    expect(gpuManager.collectGPUs).toHaveBeenCalled();\n    expect(getImageInfo).toHaveBeenCalledWith(expect.anything(), llamacpp.cuda, expect.any(Function));\n    expect('gpu' in server.labels).toBeTruthy();\n    expect(server.labels['gpu']).toBe('nvidia');\n  });\n\n  test('gpu experimental should collect GPU data and find first supported gpu - entry 1 supported', async () => {\n    vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n      experimentalGPU: true,\n      modelsPath: '',\n      apiPort: 10434,\n      inferenceRuntime: 'llama-cpp',\n      experimentalTuning: false,\n      modelUploadDisabled: false,\n      showGPUPromotion: false,\n      appearance: 'dark',\n    });\n\n    vi.mocked(gpuManager.collectGPUs).mockResolvedValue([\n      {\n        vram: 1024,\n        model: 'dummy-model',\n        vendor: GPUVendor.UNKNOWN,\n      },\n      {\n        vram: 1024,\n        model: 'nvidia',\n        vendor: GPUVendor.NVIDIA,\n      },\n    ]);\n\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n    const server = await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(containerEngine.createContainer).toHaveBeenCalledWith(\n      DummyImageInfo.engineId,\n      expect.objectContaining({\n        Cmd: [\n          '-c',\n          '/usr/bin/ln -sfn /usr/lib/wsl/lib/* /usr/lib64/ && PATH=\"${PATH}:/usr/lib/wsl/lib/\" && /usr/bin/llama-server.sh',\n        ],\n      }),\n    );\n    expect(gpuManager.collectGPUs).toHaveBeenCalled();\n    expect(getImageInfo).toHaveBeenCalledWith(expect.anything(), llamacpp.cuda, expect.any(Function));\n    expect('gpu' in server.labels).toBeTruthy();\n    expect(server.labels['gpu']).toBe('nvidia');\n  });\n\n  test('gpu experimental should collect GPU data and find first supported gpu - entry 0 supported', async () => {\n    vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n      experimentalGPU: true,\n      modelsPath: '',\n      apiPort: 10434,\n      inferenceRuntime: 'llama-cpp',\n      experimentalTuning: false,\n      modelUploadDisabled: false,\n      showGPUPromotion: false,\n      appearance: 'dark',\n    });\n\n    vi.mocked(gpuManager.collectGPUs).mockResolvedValue([\n      {\n        vram: 1024,\n        model: 'nvidia',\n        vendor: GPUVendor.NVIDIA,\n      },\n      {\n        vram: 1024,\n        model: 'dummy-model',\n        vendor: GPUVendor.UNKNOWN,\n      },\n    ]);\n\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n    const server = await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(containerEngine.createContainer).toHaveBeenCalledWith(\n      DummyImageInfo.engineId,\n      expect.objectContaining({\n        Cmd: [\n          '-c',\n          '/usr/bin/ln -sfn /usr/lib/wsl/lib/* /usr/lib64/ && PATH=\"${PATH}:/usr/lib/wsl/lib/\" && /usr/bin/llama-server.sh',\n        ],\n      }),\n    );\n    expect(gpuManager.collectGPUs).toHaveBeenCalled();\n    expect(getImageInfo).toHaveBeenCalledWith(expect.anything(), llamacpp.cuda, expect.any(Function));\n    expect('gpu' in server.labels).toBeTruthy();\n    expect(server.labels['gpu']).toBe('nvidia');\n  });\n\n  test('unknown gpu on unsupported vmtype should not provide gpu labels', async () => {\n    vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n      experimentalGPU: true,\n      modelsPath: '',\n      apiPort: 10434,\n      inferenceRuntime: 'llama-cpp',\n      experimentalTuning: false,\n      modelUploadDisabled: false,\n      showGPUPromotion: false,\n      appearance: 'dark',\n    });\n\n    vi.mocked(gpuManager.collectGPUs).mockResolvedValue([\n      {\n        vram: 1024,\n        model: 'dummy-model',\n        vendor: GPUVendor.UNKNOWN,\n      },\n    ]);\n\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n    const server = await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(gpuManager.collectGPUs).toHaveBeenCalled();\n    expect('gpu' in server.labels).toBeFalsy();\n  });\n\n  test('LIBKRUN vmtype should uses llamacpp.default image with gpu layers 999', async () => {\n    vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n      ...dummyConnection,\n      vmType: VMType.LIBKRUN,\n    });\n    vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n      experimentalGPU: true,\n      modelsPath: '',\n      apiPort: 10434,\n      inferenceRuntime: 'llama-cpp',\n      experimentalTuning: false,\n      modelUploadDisabled: false,\n      showGPUPromotion: false,\n      appearance: 'dark',\n    });\n\n    vi.mocked(gpuManager.collectGPUs).mockResolvedValue([\n      {\n        vram: 1024,\n        model: 'dummy-model',\n        vendor: GPUVendor.APPLE,\n      },\n    ]);\n\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n    const server = await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(getImageInfo).toHaveBeenCalledWith(expect.anything(), llamacpp.default, expect.any(Function));\n    expect(gpuManager.collectGPUs).toHaveBeenCalled();\n    expect('gpu' in server.labels).toBeTruthy();\n\n    expect(containerEngine.createContainer).toHaveBeenCalledWith(\n      DummyImageInfo.engineId,\n      expect.objectContaining({\n        Env: expect.arrayContaining(['GPU_LAYERS=999']),\n      }),\n    );\n  });\n\n  test('UNKNOWN vmtype should use llamacpp.default image - if not gpu accelerated', async () => {\n    vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n      ...dummyConnection,\n      vmType: VMType.UNKNOWN,\n    });\n\n    vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n      experimentalGPU: true,\n      modelsPath: '',\n      apiPort: 10434,\n      inferenceRuntime: 'llama-cpp',\n      experimentalTuning: false,\n      modelUploadDisabled: false,\n      showGPUPromotion: false,\n      appearance: 'dark',\n    });\n\n    vi.mocked(gpuManager.collectGPUs).mockResolvedValue([\n      {\n        vram: 1024,\n        model: 'dummy-model',\n        vendor: GPUVendor.UNKNOWN,\n      },\n    ]);\n\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n\n    const server = await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(getImageInfo).toHaveBeenCalledWith(expect.anything(), llamacpp.default, expect.any(Function));\n    expect(gpuManager.collectGPUs).toHaveBeenCalled();\n    expect('gpu' in server.labels).toBeFalsy();\n  });\n\n  test('UNKNOWN vmtype should use llamacpp.cuda image - if gpu accelerated and cdi configured', async () => {\n    vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n      ...dummyConnection,\n      vmType: VMType.UNKNOWN,\n    });\n\n    vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n      experimentalGPU: true,\n      modelsPath: '',\n      apiPort: 10434,\n      inferenceRuntime: 'llama-cpp',\n      experimentalTuning: false,\n      modelUploadDisabled: false,\n      showGPUPromotion: false,\n      appearance: 'dark',\n    });\n\n    vi.mocked(gpuManager.collectGPUs).mockResolvedValue([\n      {\n        vram: 1024,\n        model: 'dummy-model',\n        vendor: GPUVendor.NVIDIA,\n      },\n    ]);\n\n    class CDILlamaCppPython extends LlamaCppPython {\n      override isNvidiaCDIConfigured(): boolean {\n        return true;\n      }\n    }\n\n    const provider = new CDILlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n    const server = await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(getImageInfo).toHaveBeenCalledWith(expect.anything(), llamacpp.cuda, expect.any(Function));\n    expect('gpu' in server.labels).toBeTruthy();\n  });\n\n  test('WSL vmtype with Intel GPU should use llamacpp.intel image and no custom entrypoint', async () => {\n    vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n      ...dummyConnection,\n      vmType: VMType.WSL,\n    });\n    vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n      experimentalGPU: true,\n      modelsPath: '',\n      apiPort: 10434,\n      inferenceRuntime: 'llama-cpp',\n      experimentalTuning: false,\n      modelUploadDisabled: false,\n      showGPUPromotion: false,\n      appearance: 'dark',\n    });\n\n    vi.mocked(gpuManager.collectGPUs).mockResolvedValue([\n      {\n        vram: 1024,\n        model: 'intel-gpu',\n        vendor: GPUVendor.INTEL,\n      },\n    ]);\n\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n    await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(getImageInfo).toHaveBeenCalledWith(expect.anything(), llamacpp.intel, expect.any(Function));\n    expect(containerEngine.createContainer).toHaveBeenCalledWith(\n      DummyImageInfo.engineId,\n      expect.objectContaining({\n        Entrypoint: undefined,\n        Cmd: [],\n        Env: expect.arrayContaining(['ZES_ENABLE_SYSMAN=1']),\n      }),\n    );\n  });\n\n  test('UNKNOWN vmtype with Intel GPU should use llamacpp.intel image and no custom entrypoint', async () => {\n    vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n      ...dummyConnection,\n      vmType: VMType.UNKNOWN,\n    });\n    vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n      experimentalGPU: true,\n      modelsPath: '',\n      apiPort: 10434,\n      inferenceRuntime: 'llama-cpp',\n      experimentalTuning: false,\n      modelUploadDisabled: false,\n      showGPUPromotion: false,\n      appearance: 'dark',\n    });\n\n    vi.mocked(gpuManager.collectGPUs).mockResolvedValue([\n      {\n        vram: 1024,\n        model: 'intel-gpu',\n        vendor: GPUVendor.INTEL,\n      },\n    ]);\n\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n    await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(getImageInfo).toHaveBeenCalledWith(expect.anything(), llamacpp.intel, expect.any(Function));\n    expect(containerEngine.createContainer).toHaveBeenCalledWith(\n      DummyImageInfo.engineId,\n      expect.objectContaining({\n        Entrypoint: undefined,\n        User: '0',\n        Cmd: [],\n        HostConfig: expect.objectContaining({\n          DeviceRequests: [],\n          Devices: expect.arrayContaining([\n            expect.objectContaining({\n              PathOnHost: '/dev/dri',\n            }),\n          ]),\n        }),\n      }),\n    );\n  });\n\n  test('UNKNOWN vmtype should use llamacpp.default image - if gpu but cdi not configured', async () => {\n    vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue({\n      ...dummyConnection,\n      vmType: VMType.UNKNOWN,\n    });\n\n    vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n      experimentalGPU: true,\n      modelsPath: '',\n      apiPort: 10434,\n      inferenceRuntime: 'llama-cpp',\n      experimentalTuning: false,\n      modelUploadDisabled: false,\n      showGPUPromotion: false,\n      appearance: 'dark',\n    });\n\n    vi.mocked(gpuManager.collectGPUs).mockResolvedValue([\n      {\n        vram: 1024,\n        model: 'dummy-model',\n        vendor: GPUVendor.NVIDIA,\n      },\n    ]);\n\n    class NoCDILlamaCppPython extends LlamaCppPython {\n      override isNvidiaCDIConfigured(): boolean {\n        return false;\n      }\n    }\n    const provider = new NoCDILlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n    const server = await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(getImageInfo).toHaveBeenCalledWith(expect.anything(), llamacpp.default, expect.any(Function));\n    expect(gpuManager.collectGPUs).toHaveBeenCalled();\n    expect('gpu' in server.labels).toBeFalsy();\n  });\n\n  test('provided connection should be used for pulling the image', async () => {\n    const connection: ContainerProviderConnectionInfo = {\n      name: 'Dummy Podman',\n      type: 'podman',\n      vmType: VMType.WSL,\n      status: 'started',\n      providerId: 'fakeProviderId',\n    };\n    const provider = new LlamaCppPython(taskRegistry, podmanConnection, gpuManager, configurationRegistry);\n\n    await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: connection,\n    });\n\n    expect(podmanConnection.getContainerProviderConnection).toHaveBeenCalledWith(connection);\n    expect(podmanConnection.findRunningContainerProviderConnection).not.toHaveBeenCalled();\n    expect(getImageInfo).toHaveBeenCalledWith(dummyConnection, llamacpp.default, expect.anything());\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/workers/provider/LlamaCppPython.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type {\n  ContainerCreateOptions,\n  ContainerProviderConnection,\n  DeviceRequest,\n  ImageInfo,\n  MountConfig,\n} from '@podman-desktop/api';\nimport type { InferenceServerConfig } from '@shared/models/InferenceServerConfig';\nimport { InferenceProvider } from './InferenceProvider';\nimport { getModelPropertiesForEnvironment, getMountPath } from '../../utils/modelsUtils';\nimport { DISABLE_SELINUX_LABEL_SECURITY_OPTION } from '../../utils/utils';\nimport { LABEL_INFERENCE_SERVER } from '../../utils/inferenceUtils';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport { type InferenceServer, InferenceType } from '@shared/models/IInference';\nimport type { GPUManager } from '../../managers/GPUManager';\nimport { GPUVendor, type IGPUInfo } from '@shared/models/IGPUInfo';\nimport { VMType } from '@shared/models/IPodman';\nimport type { PodmanConnection } from '../../managers/podmanConnection';\nimport type { ConfigurationRegistry } from '../../registries/ConfigurationRegistry';\nimport { llamacpp } from '../../assets/inference-images.json';\nimport * as fs from 'node:fs';\n\nexport const SECOND: number = 1_000_000_000;\n\ninterface Device {\n  PathOnHost: string;\n  PathInContainer: string;\n  CgroupPermissions: string;\n}\n\nexport class LlamaCppPython extends InferenceProvider {\n  constructor(\n    taskRegistry: TaskRegistry,\n    private podmanConnection: PodmanConnection,\n    private gpuManager: GPUManager,\n    private configurationRegistry: ConfigurationRegistry,\n  ) {\n    super(taskRegistry, InferenceType.LLAMA_CPP, 'LLama-cpp');\n  }\n\n  dispose(): void {}\n\n  public enabled = (): boolean => true;\n\n  protected async getContainerCreateOptions(\n    config: InferenceServerConfig,\n    imageInfo: ImageInfo,\n    vmType: VMType,\n    gpu?: IGPUInfo,\n  ): Promise<ContainerCreateOptions> {\n    if (config.modelsInfo.length === 0) throw new Error('Need at least one model info to start an inference server.');\n\n    if (config.modelsInfo.length > 1) {\n      throw new Error('Currently the inference server does not support multiple models serving.');\n    }\n\n    const modelInfo = config.modelsInfo[0];\n\n    if (modelInfo.file === undefined) {\n      throw new Error('The model info file provided is undefined');\n    }\n\n    const labels: Record<string, string> = {\n      ...config.labels,\n      [LABEL_INFERENCE_SERVER]: JSON.stringify(config.modelsInfo.map(model => model.id)),\n    };\n\n    // get model mount settings\n    const filename = getMountPath(modelInfo);\n    const target = `/models/${modelInfo.file.file}`;\n\n    // mount the file directory to avoid adding other files to the containers\n    const mounts: MountConfig = [\n      {\n        Target: target,\n        Source: filename,\n        Type: 'bind',\n      },\n    ];\n\n    // provide envs\n    const envs: string[] = [`MODEL_PATH=${target}`, 'HOST=0.0.0.0', 'PORT=8000'];\n    envs.push(...getModelPropertiesForEnvironment(modelInfo));\n\n    const deviceRequests: DeviceRequest[] = [];\n    const devices: Device[] = [];\n    let entrypoint: string | undefined = undefined;\n    let cmd: string[] = [];\n    let user: string | undefined = undefined;\n\n    if (gpu) {\n      let supported: boolean = false;\n      switch (vmType) {\n        case VMType.WSL:\n          if (gpu.vendor === GPUVendor.NVIDIA) {\n            supported = true;\n            mounts.push({\n              Target: '/usr/lib/wsl',\n              Source: '/usr/lib/wsl',\n              Type: 'bind',\n            });\n\n            devices.push({\n              PathOnHost: '/dev/dxg',\n              PathInContainer: '/dev/dxg',\n              CgroupPermissions: 'r',\n            });\n\n            user = '0';\n\n            entrypoint = '/usr/bin/sh';\n            cmd = [\n              '-c',\n              '/usr/bin/ln -sfn /usr/lib/wsl/lib/* /usr/lib64/ && PATH=\"${PATH}:/usr/lib/wsl/lib/\" && /usr/bin/llama-server.sh',\n            ];\n          } else if (gpu.vendor === GPUVendor.INTEL) {\n            supported = true;\n            mounts.push({\n              Target: '/usr/lib/wsl',\n              Source: '/usr/lib/wsl',\n              Type: 'bind',\n            });\n\n            devices.push({\n              PathOnHost: '/dev/dxg',\n              PathInContainer: '/dev/dxg',\n              CgroupPermissions: 'r',\n            });\n\n            user = '0';\n          }\n          break;\n        case VMType.LIBKRUN:\n        case VMType.LIBKRUN_LABEL:\n          supported = true;\n          devices.push({\n            PathOnHost: '/dev/dri',\n            PathInContainer: '/dev/dri',\n            CgroupPermissions: '',\n          });\n          break;\n        case VMType.UNKNOWN:\n          // This is linux with podman locally installed\n\n          // Linux GPU support currently requires NVIDIA GPU with CDI configured\n          if (this.isNvidiaCDIConfigured(gpu)) {\n            supported = true;\n            devices.push({\n              PathOnHost: 'nvidia.com/gpu=all',\n              PathInContainer: '',\n              CgroupPermissions: '',\n            });\n\n            user = '0';\n          } else if (gpu.vendor === GPUVendor.INTEL) {\n            // Intel GPU support via /dev/dri device passthrough\n            supported = true;\n            devices.push({\n              PathOnHost: '/dev/dri',\n              PathInContainer: '/dev/dri',\n              CgroupPermissions: 'rwm',\n            });\n\n            user = '0';\n          }\n\n          break;\n      }\n\n      // adding gpu capabilities in supported architectures\n      if (supported) {\n        if (gpu.vendor !== GPUVendor.INTEL || vmType !== VMType.UNKNOWN) {\n          deviceRequests.push({\n            Capabilities: [['gpu']],\n            Count: -1, // -1: all\n          });\n        }\n\n        // label the container\n        labels['gpu'] = gpu.model;\n        envs.push(`GPU_LAYERS=${config.gpuLayers ?? 999}`);\n\n        // Add Intel-specific environment variables\n        if (gpu.vendor === GPUVendor.INTEL) {\n          envs.push('ZES_ENABLE_SYSMAN=1');\n          // Add the library path for the Unified Memory Framework (UMF) which is required for the Level Zero adapter\n          // This is a workaround for the missing LD_LIBRARY_PATH in the ramalama image\n          envs.push('LD_LIBRARY_PATH=/opt/intel/oneapi/umf/0.11/lib/');\n        }\n      } else {\n        console.warn(`gpu ${gpu.model} is not supported on ${vmType}.`);\n      }\n    }\n\n    // add the link to our openAPI instance using the instance as the host\n    const aiLabPort = this.configurationRegistry.getExtensionConfiguration().apiPort;\n    // add in the URL the port of the inference server\n    const aiLabDocsLink = `http://localhost:${aiLabPort}/api-docs/${config.port}`;\n    // adding labels to inference server\n    labels['docs'] = aiLabDocsLink;\n    labels['api'] = `http://localhost:${config.port}/v1`;\n\n    return {\n      Image: imageInfo.Id,\n      Detach: true,\n      Entrypoint: entrypoint,\n      User: user,\n      ExposedPorts: { [`${config.port}`]: {} },\n      HostConfig: {\n        AutoRemove: false,\n        Devices: devices,\n        Mounts: mounts,\n        DeviceRequests: deviceRequests,\n        SecurityOpt: [DISABLE_SELINUX_LABEL_SECURITY_OPTION],\n        PortBindings: {\n          '8000/tcp': [\n            {\n              HostPort: `${config.port}`,\n            },\n          ],\n        },\n      },\n      HealthCheck: {\n        // must be the port INSIDE the container not the exposed one\n        Test: ['CMD-SHELL', `curl -sSf localhost:8000 > /dev/null`],\n        Interval: SECOND * 5,\n        Retries: 4 * 5,\n      },\n      Labels: labels,\n      Env: envs,\n      Cmd: cmd,\n    };\n  }\n\n  async perform(config: InferenceServerConfig): Promise<InferenceServer> {\n    if (!this.enabled()) throw new Error('not enabled');\n\n    let gpu: IGPUInfo | undefined = undefined;\n\n    // get the first GPU if option is enabled\n    if (this.configurationRegistry.getExtensionConfiguration().experimentalGPU) {\n      const gpus: IGPUInfo[] = await this.gpuManager.collectGPUs();\n      if (gpus.length === 0) throw new Error('no gpu was found.');\n\n      // Look for a GPU that is of a known type, use the first one found.\n      // Fall back to the first one if no GPUs are of known type.\n      gpu = gpus.find(({ vendor }) => vendor !== GPUVendor.UNKNOWN) ?? gpus[0];\n    }\n\n    let connection: ContainerProviderConnection | undefined = undefined;\n    if (config.connection) {\n      connection = this.podmanConnection.getContainerProviderConnection(config.connection);\n    } else {\n      connection = this.podmanConnection.findRunningContainerProviderConnection();\n    }\n\n    if (!connection) throw new Error('no running connection could be found');\n\n    const vmType: VMType = (connection.vmType ?? VMType.UNKNOWN) as VMType;\n\n    // pull the image\n    const imageInfo: ImageInfo = await this.pullImage(\n      connection,\n      config.image ?? this.getLlamaCppInferenceImage(vmType, gpu),\n      config.labels,\n    );\n\n    // Get the container creation options\n    const containerCreateOptions: ContainerCreateOptions = await this.getContainerCreateOptions(\n      config,\n      imageInfo,\n      vmType,\n      gpu,\n    );\n\n    // Create the container\n    const { engineId, id } = await this.createContainer(imageInfo.engineId, containerCreateOptions, config.labels);\n\n    return {\n      container: {\n        engineId: engineId,\n        containerId: id,\n      },\n      connection: {\n        port: config.port,\n      },\n      status: 'running',\n      models: config.modelsInfo,\n      type: InferenceType.LLAMA_CPP,\n      labels: containerCreateOptions.Labels ?? {},\n    };\n  }\n\n  protected getLlamaCppInferenceImage(vmType: VMType, gpu?: IGPUInfo): string {\n    switch (vmType) {\n      case VMType.WSL:\n        if (gpu?.vendor === GPUVendor.NVIDIA) return llamacpp.cuda;\n        if (gpu?.vendor === GPUVendor.INTEL) return llamacpp.intel;\n        return llamacpp.default;\n      case VMType.LIBKRUN:\n      case VMType.LIBKRUN_LABEL:\n        return llamacpp.default;\n      // no GPU support\n      case VMType.UNKNOWN:\n        if (this.isNvidiaCDIConfigured(gpu)) return llamacpp.cuda;\n        if (gpu?.vendor === GPUVendor.INTEL) return llamacpp.intel;\n        return llamacpp.default;\n      default:\n        return llamacpp.default;\n    }\n  }\n\n  protected isNvidiaCDIConfigured(gpu?: IGPUInfo): boolean {\n    // NVIDIA cdi must be set up to use GPU acceleration on Linux.\n    // Check the known locations for the configuration file\n    const knownLocations = [\n      '/etc/cdi/nvidia.yaml', // Fedora\n    ];\n\n    if (gpu?.vendor !== GPUVendor.NVIDIA) return false;\n\n    let cdiSetup = false;\n    for (const location of knownLocations) {\n      if (fs.existsSync(location)) {\n        cdiSetup = true;\n        break;\n      }\n    }\n    return cdiSetup;\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/workers/provider/OpenVINO.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { getImageInfo, LABEL_INFERENCE_SERVER } from '../../utils/inferenceUtils';\nimport type { ContainerProviderConnection, ImageInfo } from '@podman-desktop/api';\nimport { containerEngine } from '@podman-desktop/api';\nimport type { PodmanConnection } from '../../managers/podmanConnection';\nimport { VMType } from '@shared/models/IPodman';\nimport type { ConfigurationRegistry } from '../../registries/ConfigurationRegistry';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport { InferenceType } from '@shared/models/IInference';\nimport { openvino } from '../../assets/inference-images.json';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { OpenVINO, SECOND } from './OpenVINO';\nimport type { ModelsManager } from '../../managers/modelsManager';\n\nvi.mock('@podman-desktop/api', () => ({\n  containerEngine: {\n    createContainer: vi.fn(),\n  },\n}));\n\nvi.mock('../../utils/inferenceUtils', () => ({\n  getProviderContainerConnection: vi.fn(),\n  getImageInfo: vi.fn(),\n  LABEL_INFERENCE_SERVER: 'ai-lab-inference-server',\n}));\n\nconst taskRegistry: TaskRegistry = {\n  createTask: vi.fn(),\n  updateTask: vi.fn(),\n} as unknown as TaskRegistry;\n\nconst DummyModel: ModelInfo = {\n  name: 'dummy model',\n  id: 'dummy-model-id',\n  file: {\n    file: '',\n    path: 'dummy-path/snapshots/032c17573f64eacffe8514e7ee47cc0e532ed9a2',\n  },\n  properties: {},\n  description: 'dummy-desc',\n};\n\nconst dummyConnection: ContainerProviderConnection = {\n  name: 'dummy-provider-connection',\n  type: 'podman',\n  vmType: VMType.WSL,\n  status: () => 'started',\n  endpoint: {\n    socketPath: 'dummy-socket',\n  },\n};\n\nconst DummyImageInfo: ImageInfo = {\n  Id: 'dummy-image-id',\n  engineId: 'dummy-engine-id',\n} as unknown as ImageInfo;\n\nconst podmanConnection: PodmanConnection = {\n  findRunningContainerProviderConnection: vi.fn(),\n  getContainerProviderConnection: vi.fn(),\n} as unknown as PodmanConnection;\n\nconst configurationRegistry: ConfigurationRegistry = {\n  getExtensionConfiguration: vi.fn(),\n} as unknown as ConfigurationRegistry;\n\nconst modelsManager: ModelsManager = {\n  getModelInfo: vi.fn(),\n} as unknown as ModelsManager;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(configurationRegistry.getExtensionConfiguration).mockReturnValue({\n    experimentalGPU: false,\n    modelsPath: 'model-path',\n    apiPort: 10434,\n    inferenceRuntime: 'llama-cpp',\n    experimentalTuning: false,\n    modelUploadDisabled: false,\n    showGPUPromotion: false,\n    appearance: 'dark',\n  });\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue(dummyConnection);\n  vi.mocked(podmanConnection.getContainerProviderConnection).mockReturnValue(dummyConnection);\n  vi.mocked(getImageInfo).mockResolvedValue(DummyImageInfo);\n  vi.mocked(taskRegistry.createTask).mockReturnValue({ id: 'dummy-task-id', name: '', labels: {}, state: 'loading' });\n  vi.mocked(containerEngine.createContainer).mockResolvedValue({\n    id: 'dummy-container-id',\n    engineId: 'dummy-engine-id',\n  });\n});\n\ntest('OpenVINO being the default, it should always be enable', () => {\n  const provider = new OpenVINO(taskRegistry, podmanConnection, modelsManager, configurationRegistry);\n  expect(provider.enabled()).toBeTruthy();\n});\n\ndescribe('perform', () => {\n  test('config without image should use defined image', async () => {\n    const provider = new OpenVINO(taskRegistry, podmanConnection, modelsManager, configurationRegistry);\n\n    await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(podmanConnection.findRunningContainerProviderConnection).toHaveBeenCalled();\n    expect(getImageInfo).toHaveBeenCalledWith(dummyConnection, openvino.default, expect.anything());\n  });\n\n  test('config without models should throw an error', async () => {\n    const provider = new OpenVINO(taskRegistry, podmanConnection, modelsManager, configurationRegistry);\n\n    await expect(\n      provider.perform({\n        port: 8000,\n        image: undefined,\n        labels: {},\n        modelsInfo: [],\n        connection: undefined,\n      }),\n    ).rejects.toThrowError('Need at least one model info to start an inference server.');\n  });\n\n  test('config model without file should throw an error', async () => {\n    const provider = new OpenVINO(taskRegistry, podmanConnection, modelsManager, configurationRegistry);\n\n    await expect(\n      provider.perform({\n        port: 8000,\n        image: undefined,\n        labels: {},\n        modelsInfo: [\n          {\n            id: 'invalid',\n          } as unknown as ModelInfo,\n        ],\n        connection: undefined,\n      }),\n    ).rejects.toThrowError('The model info file provided is undefined');\n  });\n\n  test('valid config should produce expected CreateContainerOptions', async () => {\n    const provider = new OpenVINO(taskRegistry, podmanConnection, modelsManager, configurationRegistry);\n\n    vi.mocked(modelsManager.getModelInfo).mockReturnValue(DummyModel);\n\n    const server = await provider.perform({\n      port: 8888,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: undefined,\n    });\n\n    expect(server).toStrictEqual<InferenceServer>({\n      container: {\n        containerId: 'dummy-container-id',\n        engineId: DummyImageInfo.engineId,\n      },\n      labels: {\n        [LABEL_INFERENCE_SERVER]: `[\"${DummyModel.id}\"]`,\n        api: 'http://localhost:8888/v3',\n        docs: 'http://localhost:10434/api-docs/8888',\n      },\n      models: [DummyModel],\n      status: 'running',\n      type: InferenceType.OPENVINO,\n      connection: {\n        port: 8888,\n      },\n    });\n\n    expect(containerEngine.createContainer).toHaveBeenCalledWith(DummyImageInfo.engineId, {\n      Cmd: [\n        'ovms',\n        '--rest_port',\n        '8000',\n        '--config_path',\n        '/model/snapshots/032c17573f64eacffe8514e7ee47cc0e532ed9a2/config-all.json',\n        '--metrics_enable',\n      ],\n      Detach: true,\n      Env: ['MODEL_PATH=/model', 'HOST=0.0.0.0', 'PORT=8000'],\n      ExposedPorts: {\n        '8888': {},\n      },\n      HealthCheck: {\n        Interval: SECOND * 5,\n        Retries: 20,\n        Test: ['CMD-SHELL', 'curl -sSf localhost:8000/metrics > /dev/null'],\n      },\n      HostConfig: {\n        AutoRemove: false,\n        Mounts: [\n          {\n            Source: 'dummy-path',\n            Target: '/model',\n            Type: 'bind',\n          },\n        ],\n        PortBindings: {\n          '8000/tcp': [\n            {\n              HostPort: '8888',\n            },\n          ],\n        },\n        SecurityOpt: ['label=disable'],\n      },\n      Image: DummyImageInfo.Id,\n      Labels: {\n        [LABEL_INFERENCE_SERVER]: `[\"${DummyModel.id}\"]`,\n        api: 'http://localhost:8888/v3',\n        docs: 'http://localhost:10434/api-docs/8888',\n      },\n    });\n  });\n\n  test('model properties should be made uppercased', async () => {\n    const provider = new OpenVINO(taskRegistry, podmanConnection, modelsManager, configurationRegistry);\n\n    await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [\n        {\n          ...DummyModel,\n          properties: {\n            basicProp: 'basicProp',\n            lotOfCamelCases: 'lotOfCamelCases',\n            lowercase: 'lowercase',\n            chatFormat: 'dummyChatFormat',\n          },\n        },\n      ],\n      connection: undefined,\n    });\n\n    expect(containerEngine.createContainer).toHaveBeenCalledWith(DummyImageInfo.engineId, {\n      Env: expect.arrayContaining([\n        'MODEL_BASIC_PROP=basicProp',\n        'MODEL_LOT_OF_CAMEL_CASES=lotOfCamelCases',\n        'MODEL_LOWERCASE=lowercase',\n        'MODEL_CHAT_FORMAT=dummyChatFormat',\n      ]),\n      Cmd: expect.anything(),\n      HealthCheck: expect.anything(),\n      HostConfig: expect.anything(),\n      ExposedPorts: expect.anything(),\n      Labels: expect.anything(),\n      Image: DummyImageInfo.Id,\n      Detach: true,\n    });\n  });\n\n  test('provided connection should be used for pulling the image', async () => {\n    const connection: ContainerProviderConnectionInfo = {\n      name: 'Dummy Podman',\n      type: 'podman',\n      vmType: VMType.WSL,\n      status: 'started',\n      providerId: 'fakeProviderId',\n    };\n    const provider = new OpenVINO(taskRegistry, podmanConnection, modelsManager, configurationRegistry);\n\n    await provider.perform({\n      port: 8000,\n      image: undefined,\n      labels: {},\n      modelsInfo: [DummyModel],\n      connection: connection,\n    });\n\n    expect(podmanConnection.getContainerProviderConnection).toHaveBeenCalledWith(connection);\n    expect(podmanConnection.findRunningContainerProviderConnection).not.toHaveBeenCalled();\n    expect(getImageInfo).toHaveBeenCalledWith(dummyConnection, openvino.default, expect.anything());\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/workers/provider/OpenVINO.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { ContainerCreateOptions, ContainerProviderConnection, ImageInfo, MountConfig } from '@podman-desktop/api';\nimport type { InferenceServerConfig } from '@shared/models/InferenceServerConfig';\nimport { InferenceProvider } from './InferenceProvider';\nimport { getHuggingFaceModelMountInfo, getModelPropertiesForEnvironment } from '../../utils/modelsUtils';\nimport { DISABLE_SELINUX_LABEL_SECURITY_OPTION } from '../../utils/utils';\nimport { LABEL_INFERENCE_SERVER } from '../../utils/inferenceUtils';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport { type InferenceServer, InferenceType } from '@shared/models/IInference';\nimport { VMType } from '@shared/models/IPodman';\nimport type { PodmanConnection } from '../../managers/podmanConnection';\nimport type { ConfigurationRegistry } from '../../registries/ConfigurationRegistry';\nimport { openvino } from '../../assets/inference-images.json';\nimport { existsSync } from 'node:fs';\nimport { writeFile } from 'node:fs/promises';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { ModelsManager } from '../../managers/modelsManager';\n\nexport const SECOND: number = 1_000_000_000;\n\nconst CONFIG_FILE_NAME = `config-all.json`;\n\nconst GRAPH_CONTENT = `input_stream: \"HTTP_REQUEST_PAYLOAD:input\"\noutput_stream: \"HTTP_RESPONSE_PAYLOAD:output\"\n\nnode: {\n  name: \"LLMExecutor\"\n  calculator: \"HttpLLMCalculator\"\n  input_stream: \"LOOPBACK:loopback\"\n  input_stream: \"HTTP_REQUEST_PAYLOAD:input\"\n  input_side_packet: \"LLM_NODE_RESOURCES:llm\"\n  output_stream: \"LOOPBACK:loopback\"\n  output_stream: \"HTTP_RESPONSE_PAYLOAD:output\"\n  input_stream_info: {\n    tag_index: 'LOOPBACK:0',\n    back_edge: true\n  }\n  node_options: {\n      [type.googleapis.com / mediapipe.LLMCalculatorOptions]: {\n          models_path: \"./\",\n          plugin_config: '{ \"KV_CACHE_PRECISION\": \"u8\"}',\n          enable_prefix_caching: false,\n          cache_size: 10,\n          max_num_seqs: 256,\n          device: \"CPU\",\n      }\n  }\n  input_stream_handler {\n    input_stream_handler: \"SyncSetInputStreamHandler\",\n    options {\n      [mediapipe.SyncSetInputStreamHandlerOptions.ext] {\n        sync_set {\n          tag_index: \"LOOPBACK:0\"\n        }\n      }\n    }\n  }\n}`;\n\nexport class OpenVINO extends InferenceProvider {\n  constructor(\n    taskRegistry: TaskRegistry,\n    private podmanConnection: PodmanConnection,\n    private modelsManager: ModelsManager,\n    private configurationRegistry: ConfigurationRegistry,\n  ) {\n    super(taskRegistry, InferenceType.OPENVINO, 'OpenVINO');\n  }\n\n  dispose(): void {}\n\n  public enabled = (): boolean => true;\n\n  protected async getContainerCreateOptions(\n    config: InferenceServerConfig,\n    imageInfo: ImageInfo,\n    modelInfo: ModelInfo,\n  ): Promise<ContainerCreateOptions> {\n    const labels: Record<string, string> = {\n      ...config.labels,\n      [LABEL_INFERENCE_SERVER]: JSON.stringify(config.modelsInfo.map(model => model.id)),\n    };\n\n    // get model mount settings\n    const mountInfo = getHuggingFaceModelMountInfo(modelInfo);\n    const target = `/model`;\n\n    // mount the file directory to avoid adding other files to the containers\n    const mounts: MountConfig = [\n      {\n        Target: target,\n        Source: mountInfo.mount,\n        Type: 'bind',\n      },\n    ];\n    const configFilePath = mountInfo.suffix\n      ? `/model/${mountInfo.suffix}/${CONFIG_FILE_NAME}`\n      : `/model/${CONFIG_FILE_NAME}`;\n\n    // provide envs\n    const envs: string[] = [`MODEL_PATH=${target}`, 'HOST=0.0.0.0', 'PORT=8000'];\n    envs.push(...getModelPropertiesForEnvironment(modelInfo));\n\n    const cmd: string[] = ['ovms', '--rest_port', '8000', '--config_path', configFilePath, '--metrics_enable'];\n\n    // add the link to our openAPI instance using the instance as the host\n    const aiLabPort = this.configurationRegistry.getExtensionConfiguration().apiPort;\n    // add in the URL the port of the inference server\n    const aiLabDocsLink = `http://localhost:${aiLabPort}/api-docs/${config.port}`;\n    // adding labels to inference server\n    labels['docs'] = aiLabDocsLink;\n    labels['api'] = `http://localhost:${config.port}/v3`;\n\n    return {\n      Image: imageInfo.Id,\n      Detach: true,\n      ExposedPorts: { [`${config.port}`]: {} },\n      HostConfig: {\n        AutoRemove: false,\n        Mounts: mounts,\n        SecurityOpt: [DISABLE_SELINUX_LABEL_SECURITY_OPTION],\n        PortBindings: {\n          '8000/tcp': [\n            {\n              HostPort: `${config.port}`,\n            },\n          ],\n        },\n      },\n      HealthCheck: {\n        // must be the port INSIDE the container not the exposed one\n        Test: ['CMD-SHELL', `curl -sSf localhost:8000/metrics > /dev/null`],\n        Interval: SECOND * 5,\n        Retries: 4 * 5,\n      },\n      Labels: labels,\n      Env: envs,\n      Cmd: cmd,\n    };\n  }\n\n  override async prePerform(config: InferenceServerConfig): Promise<void> {\n    const modelInfo = this.validateAndGetModelInfo(config);\n\n    if (modelInfo.file === undefined) {\n      throw new Error('The model info file provided is undefined');\n    }\n\n    await this.ensureGraphFile(modelInfo.file.path);\n\n    await this.ensureConfigFile(modelInfo);\n  }\n\n  async perform(config: InferenceServerConfig): Promise<InferenceServer> {\n    const modelInfo = this.validateAndGetModelInfo(config);\n\n    if (modelInfo.file === undefined) {\n      throw new Error('The model info file provided is undefined');\n    }\n\n    let connection: ContainerProviderConnection | undefined = undefined;\n    if (config.connection) {\n      connection = this.podmanConnection.getContainerProviderConnection(config.connection);\n    } else {\n      connection = this.podmanConnection.findRunningContainerProviderConnection();\n    }\n\n    if (!connection) throw new Error('no running connection could be found');\n\n    const vmType: VMType = (connection.vmType ?? VMType.UNKNOWN) as VMType;\n\n    // pull the image\n    const imageInfo: ImageInfo = await this.pullImage(\n      connection,\n      config.image ?? this.getOpenVINOInferenceImage(vmType),\n      config.labels,\n    );\n\n    // Get the container creation options\n    const containerCreateOptions: ContainerCreateOptions = await this.getContainerCreateOptions(\n      config,\n      imageInfo,\n      modelInfo,\n    );\n\n    // Create the container\n    const { engineId, id } = await this.createContainer(imageInfo.engineId, containerCreateOptions, config.labels);\n\n    return {\n      container: {\n        engineId: engineId,\n        containerId: id,\n      },\n      connection: {\n        port: config.port,\n      },\n      status: 'running',\n      models: config.modelsInfo.map(model => this.modelsManager.getModelInfo(model.id)),\n      type: InferenceType.OPENVINO,\n      labels: containerCreateOptions.Labels ?? {},\n    };\n  }\n\n  private validateAndGetModelInfo(config: InferenceServerConfig): ModelInfo {\n    if (!this.enabled()) throw new Error('not enabled');\n\n    if (config.modelsInfo.length === 0) throw new Error('Need at least one model info to start an inference server.');\n\n    if (config.modelsInfo.length > 1) {\n      throw new Error('Currently the inference server does not support multiple models serving.');\n    }\n\n    return config.modelsInfo[0];\n  }\n\n  private async ensureGraphFile(modelFolder: string): Promise<string> {\n    // check if the file exists\n    const graphFile = `${modelFolder}/graph.pbtxt`;\n    // check if the graph file exists\n    if (!existsSync(graphFile)) {\n      // create the graph file\n      await writeFile(graphFile, GRAPH_CONTENT);\n    }\n    return graphFile;\n  }\n\n  private async ensureConfigFile(modelInfo: ModelInfo): Promise<string> {\n    const configFile = `${modelInfo.file?.path}/${CONFIG_FILE_NAME}`;\n    if (!existsSync(configFile)) {\n      const config = {\n        mediapipe_config_list: [\n          {\n            name: modelInfo.name,\n            base_path: '.',\n          },\n        ],\n        model_config_list: [],\n      };\n      await writeFile(configFile, JSON.stringify(config));\n    }\n    return configFile;\n  }\n\n  protected getOpenVINOInferenceImage(_vmType: VMType): string {\n    return openvino.default;\n  }\n}\n"
  },
  {
    "path": "packages/backend/src/workers/provider/WhisperCpp.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { vi, test, expect, beforeEach } from 'vitest';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport { WhisperCpp } from './WhisperCpp';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport { InferenceType } from '@shared/models/IInference';\nimport type { ContainerProviderConnection, ImageInfo } from '@podman-desktop/api';\nimport { containerEngine } from '@podman-desktop/api';\nimport { getImageInfo } from '../../utils/inferenceUtils';\nimport type { PodmanConnection } from '../../managers/podmanConnection';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { VMType } from '@shared/models/IPodman';\nimport { join } from 'node:path';\n\nvi.mock('@podman-desktop/api', () => ({\n  containerEngine: {\n    createContainer: vi.fn(),\n  },\n}));\n\nvi.mock('../../utils/inferenceUtils', () => ({\n  getProviderContainerConnection: vi.fn(),\n  getImageInfo: vi.fn(),\n  LABEL_INFERENCE_SERVER: 'ai-lab-inference-server',\n}));\n\nconst connectionMock: ContainerProviderConnection = {\n  name: 'dummy-provider-connection',\n  type: 'podman',\n} as unknown as ContainerProviderConnection;\n\nconst DummyImageInfo: ImageInfo = {\n  Id: 'dummy-image-id',\n  engineId: 'dummy-engine-id',\n} as unknown as ImageInfo;\n\nconst taskRegistry: TaskRegistry = {\n  createTask: vi.fn(),\n  updateTask: vi.fn(),\n} as unknown as TaskRegistry;\n\nconst podmanConnection: PodmanConnection = {\n  findRunningContainerProviderConnection: vi.fn(),\n  getContainerProviderConnection: vi.fn(),\n} as unknown as PodmanConnection;\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(podmanConnection.findRunningContainerProviderConnection).mockReturnValue(connectionMock);\n  vi.mocked(podmanConnection.getContainerProviderConnection).mockReturnValue(connectionMock);\n  vi.mocked(taskRegistry.createTask).mockReturnValue({ id: 'dummy-task-id', name: '', labels: {}, state: 'loading' });\n\n  vi.mocked(getImageInfo).mockResolvedValue(DummyImageInfo);\n  vi.mocked(containerEngine.createContainer).mockResolvedValue({\n    id: 'dummy-container-id',\n    engineId: 'dummy-engine-id',\n  });\n});\n\ntest('provider requires at least one model', async () => {\n  const provider = new WhisperCpp(taskRegistry, podmanConnection);\n\n  await expect(() => {\n    return provider.perform({\n      port: 8888,\n      labels: {},\n      modelsInfo: [],\n    });\n  }).rejects.toThrowError('Need at least one model info to start an inference server.');\n});\n\ntest('provider requires a downloaded model', async () => {\n  const provider = new WhisperCpp(taskRegistry, podmanConnection);\n\n  await expect(() => {\n    return provider.perform({\n      port: 8888,\n      labels: {},\n      modelsInfo: [\n        {\n          id: 'whisper-cpp',\n          name: 'Whisper',\n          properties: {},\n          description: 'whisper desc',\n        },\n      ],\n    });\n  }).rejects.toThrowError('The model info file provided is undefined');\n});\n\ntest('provider requires a model with backend type Whisper', async () => {\n  const provider = new WhisperCpp(taskRegistry, podmanConnection);\n\n  await expect(() => {\n    return provider.perform({\n      port: 8888,\n      labels: {},\n      modelsInfo: [\n        {\n          id: 'whisper-cpp',\n          name: 'Whisper',\n          properties: {},\n          description: 'whisper desc',\n          file: {\n            file: 'random-file',\n            path: 'path-to-file',\n          },\n          backend: InferenceType.LLAMA_CPP,\n        },\n      ],\n    });\n  }).rejects.toThrowError(\n    `Whisper requires models with backend type ${InferenceType.WHISPER_CPP} got ${InferenceType.LLAMA_CPP}.`,\n  );\n});\n\ntest('custom image in inference server config should overwrite default', async () => {\n  const provider = new WhisperCpp(taskRegistry, podmanConnection);\n\n  const model = {\n    id: 'whisper-cpp',\n    name: 'Whisper',\n    properties: {},\n    description: 'whisper desc',\n    file: {\n      file: 'random-file',\n      path: 'path-to-file',\n    },\n    backend: InferenceType.WHISPER_CPP,\n  };\n\n  await provider.perform({\n    port: 8888,\n    labels: {\n      hello: 'world',\n    },\n    image: 'localhost/whisper-cpp:custom',\n    modelsInfo: [model],\n  });\n\n  expect(getImageInfo).toHaveBeenCalledWith(connectionMock, 'localhost/whisper-cpp:custom', expect.any(Function));\n});\n\ntest('provider should propagate labels', async () => {\n  const provider = new WhisperCpp(taskRegistry, podmanConnection);\n\n  const model = {\n    id: 'whisper-cpp',\n    name: 'Whisper',\n    properties: {},\n    description: 'whisper desc',\n    file: {\n      file: 'random-file',\n      path: 'path-to-file',\n    },\n    backend: InferenceType.WHISPER_CPP,\n  };\n\n  const server: InferenceServer = await provider.perform({\n    port: 8888,\n    labels: {\n      hello: 'world',\n    },\n    modelsInfo: [model],\n  });\n\n  expect(server).toStrictEqual({\n    connection: {\n      port: 8888,\n    },\n    container: {\n      containerId: 'dummy-container-id',\n      engineId: 'dummy-engine-id',\n    },\n    labels: {\n      'ai-lab-inference-server': '[\"whisper-cpp\"]',\n      api: 'http://localhost:8888/inference',\n      hello: 'world',\n    },\n    models: [model],\n    status: 'running',\n    type: InferenceType.WHISPER_CPP,\n  });\n});\n\ntest('provided connection should be used for pulling the image', async () => {\n  const connection: ContainerProviderConnectionInfo = {\n    name: 'Dummy Podman',\n    type: 'podman',\n    vmType: VMType.WSL,\n    status: 'started',\n    providerId: 'fakeProviderId',\n  };\n  const provider = new WhisperCpp(taskRegistry, podmanConnection);\n\n  const model = {\n    id: 'whisper-cpp',\n    name: 'Whisper',\n    properties: {},\n    description: 'whisper desc',\n    file: {\n      file: 'random-file',\n      path: 'path-to-file',\n    },\n    backend: InferenceType.WHISPER_CPP,\n  };\n\n  await provider.perform({\n    connection: connection,\n    port: 8888,\n    labels: {\n      hello: 'world',\n    },\n    image: 'localhost/whisper-cpp:custom',\n    modelsInfo: [model],\n  });\n\n  expect(getImageInfo).toHaveBeenCalledWith(connectionMock, 'localhost/whisper-cpp:custom', expect.any(Function));\n  expect(podmanConnection.getContainerProviderConnection).toHaveBeenCalledWith(connection);\n  expect(podmanConnection.findRunningContainerProviderConnection).not.toHaveBeenCalled();\n  // ensure the create container is called with appropriate arguments\n  expect(containerEngine.createContainer).toHaveBeenCalledWith('dummy-engine-id', {\n    Detach: true,\n    Env: ['MODEL_PATH=/models/random-file', 'HOST=0.0.0.0', 'PORT=8000'],\n    HostConfig: {\n      AutoRemove: false,\n      Mounts: [\n        {\n          Source: join('path-to-file', 'random-file'),\n          Target: '/models/random-file',\n          Type: 'bind',\n        },\n      ],\n      PortBindings: {\n        '8000/tcp': [\n          {\n            HostPort: '8888',\n          },\n        ],\n      },\n      SecurityOpt: ['label=disable'],\n    },\n    Image: 'dummy-image-id',\n    Labels: {\n      'ai-lab-inference-server': '[\"whisper-cpp\"]',\n      api: 'http://localhost:8888/inference',\n      hello: 'world',\n    },\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/workers/provider/WhisperCpp.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { InferenceProvider } from './InferenceProvider';\nimport type { TaskRegistry } from '../../registries/TaskRegistry';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport { InferenceType } from '@shared/models/IInference';\nimport type { InferenceServerConfig } from '@shared/models/InferenceServerConfig';\nimport { LABEL_INFERENCE_SERVER } from '../../utils/inferenceUtils';\nimport type { ContainerProviderConnection, MountConfig } from '@podman-desktop/api';\nimport { DISABLE_SELINUX_LABEL_SECURITY_OPTION } from '../../utils/utils';\nimport { whispercpp } from '../../assets/inference-images.json';\nimport type { PodmanConnection } from '../../managers/podmanConnection';\nimport { getMountPath } from '../../utils/modelsUtils';\n\nexport class WhisperCpp extends InferenceProvider {\n  constructor(\n    taskRegistry: TaskRegistry,\n    private podmanConnection: PodmanConnection,\n  ) {\n    super(taskRegistry, InferenceType.WHISPER_CPP, 'Whisper-cpp');\n  }\n\n  override enabled(): boolean {\n    return true;\n  }\n\n  override async perform(config: InferenceServerConfig): Promise<InferenceServer> {\n    if (config.modelsInfo.length === 0) throw new Error('Need at least one model info to start an inference server.');\n\n    const modelInfo = config.modelsInfo[0];\n\n    if (modelInfo.file === undefined) {\n      throw new Error('The model info file provided is undefined');\n    }\n\n    if (modelInfo.backend !== InferenceType.WHISPER_CPP) {\n      throw new Error(\n        `Whisper requires models with backend type ${InferenceType.WHISPER_CPP} got ${modelInfo.backend}.`,\n      );\n    }\n\n    const labels: Record<string, string> = {\n      ...config.labels,\n      [LABEL_INFERENCE_SERVER]: JSON.stringify(config.modelsInfo.map(model => model.id)),\n    };\n\n    let connection: ContainerProviderConnection | undefined = undefined;\n    if (config.connection) {\n      connection = this.podmanConnection.getContainerProviderConnection(config.connection);\n    } else {\n      connection = this.podmanConnection.findRunningContainerProviderConnection();\n    }\n\n    if (!connection) throw new Error('no running connection could be found');\n\n    // get model mount settings\n    const filename = getMountPath(modelInfo);\n    const target = `/models/${modelInfo.file.file}`;\n\n    // mount the file directory to avoid adding other files to the containers\n    const mounts: MountConfig = [\n      {\n        Target: target,\n        Source: filename,\n        Type: 'bind',\n      },\n    ];\n\n    const imageInfo = await this.pullImage(connection, config.image ?? whispercpp.default, labels);\n    const envs: string[] = [`MODEL_PATH=${target}`, 'HOST=0.0.0.0', 'PORT=8000'];\n\n    labels['api'] = `http://localhost:${config.port}/inference`;\n\n    const containerInfo = await this.createContainer(\n      imageInfo.engineId,\n      {\n        Image: imageInfo.Id,\n        Detach: true,\n        Labels: labels,\n        HostConfig: {\n          AutoRemove: false,\n          Mounts: mounts,\n          PortBindings: {\n            '8000/tcp': [\n              {\n                HostPort: `${config.port}`,\n              },\n            ],\n          },\n          SecurityOpt: [DISABLE_SELINUX_LABEL_SECURITY_OPTION],\n        },\n        Env: envs,\n      },\n      labels,\n    );\n\n    return {\n      models: [modelInfo],\n      status: 'running',\n      connection: {\n        port: config.port,\n      },\n      container: {\n        containerId: containerInfo.id,\n        engineId: containerInfo.engineId,\n      },\n      type: InferenceType.WHISPER_CPP,\n      labels: labels,\n    };\n  }\n  override dispose(): void {}\n}\n"
  },
  {
    "path": "packages/backend/src/workers/uploader/UploaderOptions.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { ContainerProviderConnection } from '@podman-desktop/api';\n\nexport interface UploaderOptions {\n  model: ModelInfo;\n  connection: ContainerProviderConnection;\n}\n"
  },
  {
    "path": "packages/backend/src/workers/uploader/WSLUploader.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test, describe, vi, beforeEach } from 'vitest';\nimport { WSLUploader } from './WSLUploader';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { configuration, env, process, type ContainerProviderConnection, type RunResult } from '@podman-desktop/api';\nimport { VMType } from '@shared/models/IPodman';\n\nvi.mock('@podman-desktop/api', () => ({\n  env: {\n    isWindows: false,\n  },\n  process: {\n    exec: vi.fn(),\n  },\n  configuration: {\n    getConfiguration: vi.fn(),\n  },\n}));\n\nconst connectionMock: ContainerProviderConnection = {\n  name: 'machine2',\n  type: 'podman',\n  status: () => 'started',\n  vmType: VMType.WSL,\n  endpoint: {\n    socketPath: 'socket.sock',\n  },\n};\n\nconst wslUploader = new WSLUploader();\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(configuration.getConfiguration).mockReturnValue({\n    get: () => 'podman.exe',\n    has: vi.fn(),\n    update: vi.fn(),\n  });\n});\n\ndescribe('canUpload', () => {\n  test('should return false if system is not windows', () => {\n    vi.mocked(env).isWindows = false;\n    const result = wslUploader.enabled();\n    expect(result).toBeFalsy();\n  });\n  test('should return true if system is windows', () => {\n    vi.mocked(env).isWindows = true;\n    const result = wslUploader.enabled();\n    expect(result).toBeTruthy();\n  });\n});\n\ndescribe('upload', () => {\n  test('throw if localpath is not defined', async () => {\n    await expect(\n      wslUploader.perform({\n        connection: connectionMock,\n        model: {\n          file: undefined,\n        } as unknown as ModelInfo,\n      }),\n    ).rejects.toThrowError('model is not available locally.');\n  });\n\n  test('non-WSL VMType should return the original path', async () => {\n    vi.mocked(process.exec).mockRejectedValueOnce('error');\n    const result = await wslUploader.perform({\n      connection: {\n        ...connectionMock,\n        vmType: VMType.UNKNOWN,\n      },\n      model: {\n        id: 'dummyId',\n        file: { path: 'C:\\\\Users\\\\podman\\\\folder', file: 'dummy.guff' },\n      } as unknown as ModelInfo,\n    });\n    expect(process.exec).not.toHaveBeenCalled();\n    expect(result.startsWith('C:\\\\Users\\\\podman\\\\folder')).toBeTruthy();\n  });\n\n  test('copy model if not exists on podman machine', async () => {\n    vi.mocked(process.exec).mockRejectedValueOnce('error');\n    await wslUploader.perform({\n      connection: connectionMock,\n      model: {\n        id: 'dummyId',\n        file: { path: 'C:\\\\Users\\\\podman\\\\folder', file: 'dummy.guff' },\n      } as unknown as ModelInfo,\n    });\n    expect(process.exec).toBeCalledWith('podman.exe', [\n      'machine',\n      'ssh',\n      'machine2',\n      'stat',\n      '/home/user/ai-lab/models/dummyId',\n    ]);\n    expect(process.exec).toBeCalledWith('podman.exe', [\n      'machine',\n      'ssh',\n      'machine2',\n      'mkdir',\n      '-p',\n      '/home/user/ai-lab/models',\n    ]);\n    expect(process.exec).toBeCalledWith('podman.exe', [\n      'machine',\n      'ssh',\n      'machine2',\n      'cp',\n      '-r',\n      '-L',\n      '/mnt/c/Users/podman/folder/dummy.guff',\n      '/home/user/ai-lab/models/dummyId',\n    ]);\n  });\n\n  test('copy model if not exists on podman machine with space handling', async () => {\n    vi.mocked(process.exec).mockRejectedValueOnce('error');\n    await wslUploader.perform({\n      connection: connectionMock,\n      model: {\n        id: 'dummyId',\n        file: { path: 'C:\\\\Users\\\\podman folder', file: 'dummy.guff' },\n      } as unknown as ModelInfo,\n    });\n    expect(process.exec).toBeCalledWith('podman.exe', [\n      'machine',\n      'ssh',\n      'machine2',\n      'stat',\n      '/home/user/ai-lab/models/dummyId',\n    ]);\n    expect(process.exec).toBeCalledWith('podman.exe', [\n      'machine',\n      'ssh',\n      'machine2',\n      'mkdir',\n      '-p',\n      '/home/user/ai-lab/models',\n    ]);\n    expect(process.exec).toBeCalledWith('podman.exe', [\n      'machine',\n      'ssh',\n      'machine2',\n      'cp',\n      '-r',\n      '-L',\n      '/mnt/c/Users/podman\\\\ folder/dummy.guff',\n      '/home/user/ai-lab/models/dummyId',\n    ]);\n  });\n\n  test('do not copy model if it exists on podman machine', async () => {\n    vi.mocked(process.exec).mockResolvedValue({} as RunResult);\n    await wslUploader.perform({\n      connection: connectionMock,\n      model: {\n        id: 'dummyId',\n        file: { path: 'C:\\\\Users\\\\podman\\\\folder', file: 'dummy.guff' },\n      } as unknown as ModelInfo,\n    });\n    expect(process.exec).toBeCalledWith('podman.exe', [\n      'machine',\n      'ssh',\n      'machine2',\n      'stat',\n      '/home/user/ai-lab/models/dummyId',\n    ]);\n    expect(process.exec).toBeCalledTimes(1);\n  });\n});\n"
  },
  {
    "path": "packages/backend/src/workers/uploader/WSLUploader.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport * as podmanDesktopApi from '@podman-desktop/api';\nimport { getPodmanCli, getPodmanMachineName } from '../../utils/podman';\nimport { getLocalModelFile, getRemoteModelFile, isModelUploaded } from '../../utils/modelsUtils';\nimport { WindowsWorker } from '../WindowsWorker';\nimport { VMType } from '@shared/models/IPodman';\nimport type { UploaderOptions } from './UploaderOptions';\nimport { escapeSpaces } from '../../utils/pathUtils';\nimport { dirname } from 'node:path';\n\nexport class WSLUploader extends WindowsWorker<UploaderOptions, string> {\n  async perform(options: UploaderOptions): Promise<string> {\n    const localPath = getLocalModelFile(options.model);\n\n    // ensure the connection type is WSL\n    if (options.connection.vmType !== VMType.WSL) {\n      console.warn('cannot upload on non-WSL machine');\n      return localPath;\n    }\n\n    // the connection name cannot be used as it is\n    const machineName = getPodmanMachineName(options.connection);\n\n    const driveLetter = localPath.charAt(0);\n    const convertToMntPath = escapeSpaces(\n      localPath.replace(`${driveLetter}:\\\\`, `/mnt/${driveLetter.toLowerCase()}/`).replace(/\\\\/g, '/'),\n    );\n\n    // check if model already loaded on the podman machine\n    const existsRemote = await isModelUploaded(machineName, options.model);\n    const remoteFile = escapeSpaces(getRemoteModelFile(options.model));\n    const baseFolder = dirname(remoteFile);\n\n    // if not exists remotely it copies it from the local path\n    if (!existsRemote) {\n      await podmanDesktopApi.process.exec(getPodmanCli(), ['machine', 'ssh', machineName, 'mkdir', '-p', baseFolder]);\n      await podmanDesktopApi.process.exec(getPodmanCli(), [\n        'machine',\n        'ssh',\n        machineName,\n        'cp',\n        '-r',\n        '-L',\n        convertToMntPath,\n        remoteFile,\n      ]);\n    }\n\n    return remoteFile;\n  }\n}\n"
  },
  {
    "path": "packages/backend/tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"target\": \"esnext\",\n    \"module\": \"esnext\",\n    \"moduleResolution\": \"bundler\",\n    \"resolveJsonModule\": true,\n    \"lib\": [\"ES2022\", \"webworker\", \"dom\"],\n    \"sourceMap\": true,\n    \"outDir\": \"dist\",\n    \"allowSyntheticDefaultImports\": true,\n    \"skipLibCheck\": true,\n    \"types\": [\"node\"],\n    \"paths\": {\n      \"@shared/*\": [\"../shared/src/*\"]\n    },\n    \"strict\": true,\n    \"noImplicitOverride\": true,\n    \"noImplicitReturns\": true,\n    \"noUnusedLocals\": true\n  },\n  \"include\": [\"src\", \"types/*.d.ts\", \"../../types/*.d.ts\", \"../shared/*.ts\", \"../shared/**/*.ts\"]\n}\n"
  },
  {
    "path": "packages/backend/vite.config.js",
    "content": "/**********************************************************************\n * Copyright (C) 2023 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { join, resolve } from 'node:path';\nimport { builtinModules } from 'module';\nimport { existsSync } from 'node:fs';\nimport replace from '@rollup/plugin-replace';\nimport { cp, mkdir } from 'node:fs/promises';\n\nconst PACKAGE_ROOT = __dirname;\n\n/**\n * @type {import('vite').UserConfig}\n * @see https://vitejs.dev/config/\n */\nconst config = {\n  mode: process.env.MODE,\n  root: PACKAGE_ROOT,\n  envDir: process.cwd(),\n  resolve: {\n    alias: {\n      '/@/': join(PACKAGE_ROOT, 'src') + '/',\n      '/@gen/': join(PACKAGE_ROOT, 'src-generated') + '/',\n      '@shared/': join(PACKAGE_ROOT, '../shared', 'src') + '/',\n    },\n    mainFields: ['module', 'jsnext:main', 'jsnext'], //https://github.com/vitejs/vite/issues/16444\n  },\n  build: {\n    sourcemap: 'inline',\n    target: 'esnext',\n    outDir: 'dist',\n    assetsDir: '.',\n    minify: process.env.MODE === 'production' ? 'esbuild' : false,\n    lib: {\n      entry: 'src/extension.ts',\n      formats: ['cjs'],\n    },\n    rollupOptions: {\n      external: ['@podman-desktop/api', ...builtinModules.flatMap(p => [p, `node:${p}`])],\n      output: {\n        entryFileNames: '[name].cjs',\n      },\n      plugins: [\n        {\n          // copy the swagger-ui-dist files to the dist folder as we need the files to be served\n          name: 'copy-swagger-ui',\n          async buildStart() {\n            const start = performance.now();\n            const source = resolve('../../node_modules/swagger-ui-dist');\n            const destination = resolve('dist/swagger-ui');\n\n            // Ensure destination directory exists\n            if (!existsSync(destination)) {\n              await mkdir(destination, { recursive: true });\n            }\n\n            // Copy files\n            await cp(source, destination, {\n              recursive: true,\n              filter: source => !source.includes('.map'),\n            });\n            console.info(`Swagger UI files copied in ${Math.round(performance.now() - start)}ms to dist/swagger-ui`);\n          },\n        },\n      ],\n    },\n    emptyOutDir: false,\n    reportCompressedSize: false,\n  },\n  plugins: [\n    // This is to apply the patch https://github.com/JS-DevTools/ono/pull/20\n    // can be removed when the patch is merged\n    replace({\n      delimiters: ['', ''],\n      preventAssignment: true,\n      values: {\n        'if (typeof module === \"object\" && typeof module.exports === \"object\") {':\n          'if (typeof module === \"object\" && typeof module.exports === \"object\" && typeof module.exports.default === \"object\") {',\n      },\n    }),\n  ],\n};\n\nexport default config;\n"
  },
  {
    "path": "packages/backend/vitest.config.js",
    "content": "/**********************************************************************\n * Copyright (C) 2023 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport path from 'node:path';\nimport { join } from 'path';\n\nconst PACKAGE_ROOT = __dirname;\n\nconst config = {\n  test: {\n    include: ['**/*.{test,spec}.?(c|m)[jt]s?(x)', '../shared/**/*.{test,spec}.?(c|m)[jt]s?(x)'],\n    coverage: {\n      provider: 'v8',\n      reporter: ['lcov', 'text'],\n      extension: '.ts',\n    },\n  },\n  resolve: {\n    alias: {\n      '@podman-desktop/api': path.resolve(__dirname, '__mocks__/@podman-desktop/api.js'),\n      '/@/': join(PACKAGE_ROOT, 'src') + '/',\n      '/@gen/': join(PACKAGE_ROOT, 'src-generated') + '/',\n      '@shared/': join(PACKAGE_ROOT, '../shared', 'src') + '/',\n    },\n  },\n};\n\nexport default config;\n"
  },
  {
    "path": "packages/frontend/index.html",
    "content": "<!DOCTYPE html>\n<html class=\"fixed\" lang=\"en\">\n  <head>\n    <meta charset=\"UTF-8\" />\n    <link rel=\"icon\" href=\"/favicon.ico\" />\n    <meta name=\"viewport\" content=\"width=device-width, height=device-height, initial-scale=1.0\" />\n    <title>Podman Desktop</title>\n  </head>\n  <body class=\"overflow-hidden text-white\">\n    <div id=\"app\"></div>\n    <script type=\"module\" src=\"./src/main.ts\"></script>\n  </body>\n</html>\n"
  },
  {
    "path": "packages/frontend/package.json",
    "content": "{\n  \"name\": \"frontend-app\",\n  \"displayName\": \"UI for AI Lab\",\n  \"version\": \"1.10.0-next\",\n  \"type\": \"module\",\n  \"license\": \"Apache-2.0\",\n  \"scripts\": {\n    \"preview\": \"vite preview\",\n    \"build\": \"vite build\",\n    \"test\": \"vitest run --coverage\",\n    \"test:watch\": \"vitest watch --coverage\",\n    \"watch\": \"vite --mode development build -w\"\n  },\n  \"dependencies\": {\n    \"@fortawesome/fontawesome-free\": \"^7.2.0\",\n    \"@fortawesome/free-brands-svg-icons\": \"^7.2.0\",\n    \"@fortawesome/free-solid-svg-icons\": \"^7.2.0\",\n    \"@fortawesome/free-regular-svg-icons\": \"^7.2.0\",\n    \"@podman-desktop/ui-svelte\": \"1.21.0\",\n    \"tinro\": \"^0.6.12\",\n    \"filesize\": \"^11.0.17\",\n    \"humanize-duration\": \"^3.33.2\",\n    \"moment\": \"^2.30.1\",\n    \"semver\": \"^7.7.4\"\n  },\n  \"devDependencies\": {\n    \"@sveltejs/vite-plugin-svelte\": \"6.2.4\",\n    \"@tailwindcss/typography\": \"^0.5.19\",\n    \"@tailwindcss/vite\": \"^4.2.4\",\n    \"@testing-library/dom\": \"^10.4.1\",\n    \"@testing-library/jest-dom\": \"^6.9.1\",\n    \"@testing-library/svelte\": \"^5.3.1\",\n    \"@testing-library/user-event\": \"^14.6.1\",\n    \"@tsconfig/svelte\": \"^5.0.8\",\n    \"@types/humanize-duration\": \"^3.27.4\",\n    \"@typescript-eslint/eslint-plugin\": \"8.59.1\",\n    \"jsdom\": \"^29.1.0\",\n    \"monaco-editor\": \"^0.55.1\",\n    \"postcss\": \"^8.5.12\",\n    \"postcss-load-config\": \"^6.0.1\",\n    \"svelte\": \"5.55.5\",\n    \"svelte-fa\": \"^4.0.4\",\n    \"svelte-select\": \"^5.8.3\",\n    \"svelte-markdown\": \"^0.4.1\",\n    \"svelte-preprocess\": \"^6.0.3\",\n    \"tailwindcss\": \"^4.2.4\",\n    \"vitest\": \"^3.0.5\"\n  }\n}\n"
  },
  {
    "path": "packages/frontend/src/App.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { vi, beforeEach, test, expect } from 'vitest';\nimport { render } from '@testing-library/svelte';\nimport App from '/@/App.svelte';\nimport { getRouterState, rpcBrowser } from '/@/utils/client';\nimport { configuration } from '/@/stores/extensionConfiguration';\nimport { MSG_NAVIGATION_ROUTE_UPDATE } from '@shared/Messages';\n\nvi.mock('tinro', () => ({\n  router: {\n    goto: vi.fn(),\n    mode: {\n      hash: vi.fn(),\n    },\n    location: {\n      query: new Map(),\n    },\n  },\n}));\n// mock monaco\nvi.mock('/@/lib/monaco-editor/MonacoEditor.svelte');\n\nvi.mock('./stores/extensionConfiguration.ts', () => ({\n  configuration: {\n    subscribe: vi.fn(),\n  },\n}));\n\nvi.mock('/@/lib/RecipeCardTags', () => ({\n  isDarkMode: vi.fn().mockReturnValue(false),\n}));\n\nvi.mock('./utils/client', async () => ({\n  studioClient: {\n    getExtensionConfiguration: vi.fn(),\n  },\n  instructlabClient: {},\n  rpcBrowser: {\n    subscribe: vi.fn(),\n  },\n  getRouterState: vi.fn(),\n  saveRouterState: vi.fn(),\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(getRouterState).mockResolvedValue({ url: '/' });\n  vi.mocked(rpcBrowser.subscribe).mockReturnValue({ unsubscribe: vi.fn() });\n  vi.mocked(configuration.subscribe).mockReturnValue(vi.fn());\n});\n\ntest('should subscribe to navigation update route on mount', async () => {\n  render(App, {});\n\n  await vi.waitFor(() => {\n    expect(rpcBrowser.subscribe).toHaveBeenCalledWith(MSG_NAVIGATION_ROUTE_UPDATE, expect.any(Function));\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/App.svelte",
    "content": "<script lang=\"ts\">\nimport './app.css';\nimport '@fortawesome/fontawesome-free/css/all.min.css';\nimport { router } from 'tinro';\nimport Route from '/@/Route.svelte';\nimport Navigation from '/@/lib/Navigation.svelte';\nimport Dashboard from '/@/pages/Dashboard.svelte';\nimport Recipes from '/@/pages/Recipes.svelte';\nimport Applications from './pages/Applications.svelte';\nimport Preferences from '/@/pages/Preferences.svelte';\nimport Models from '/@/pages/Models.svelte';\nimport Recipe from '/@/pages/Recipe.svelte';\nimport Model from './pages/Model.svelte';\nimport { onDestroy, onMount } from 'svelte';\nimport { getRouterState, rpcBrowser } from '/@/utils/client';\nimport CreateService from '/@/pages/CreateService.svelte';\nimport Services from '/@/pages/InferenceServers.svelte';\nimport ServiceDetails from '/@/pages/InferenceServerDetails.svelte';\nimport Playgrounds from './pages/Playgrounds.svelte';\nimport Playground from './pages/Playground.svelte';\nimport PlaygroundCreate from './pages/PlaygroundCreate.svelte';\nimport ImportModels from './pages/ImportModel.svelte';\nimport StartRecipe from '/@/pages/StartRecipe.svelte';\nimport TuneSessions from './pages/TuneSessions.svelte';\nimport { configuration } from './stores/extensionConfiguration';\nimport type { ExtensionConfiguration } from '@shared/models/IExtensionConfiguration';\nimport type { Unsubscriber } from 'svelte/store';\nimport { MSG_NAVIGATION_ROUTE_UPDATE } from '@shared/Messages';\nimport GPUPromotion from '/@/lib/notification/GPUPromotion.svelte';\nimport NewInstructLabSession from '/@/pages/NewInstructLabSession.svelte';\nimport LocalServer from './pages/server-information/LocalServer.svelte';\nimport AboutInstructLab from './pages/instructlab/AboutInstructLab.svelte';\nimport StartInstructLabContainer from '/@/pages/instructlab/StartInstructLabContainer.svelte';\nimport StartLlamaStackContainer from './pages/llama-stack/StartLlamaStackContainer.svelte';\n\nrouter.mode.hash();\n\nlet isMounted = false;\n\nlet experimentalTuning: boolean = false;\nconst unsubscribers: Unsubscriber[] = [];\n\nonMount(async () => {\n  // Load router state on application startup\n  const state = await getRouterState();\n  router.goto(state.url);\n  isMounted = true;\n\n  unsubscribers.push(\n    configuration.subscribe((val: ExtensionConfiguration | undefined) => {\n      experimentalTuning = val?.experimentalTuning ?? false;\n    }),\n  );\n\n  unsubscribers.push(\n    rpcBrowser.subscribe(MSG_NAVIGATION_ROUTE_UPDATE, location => {\n      router.goto(location);\n    }).unsubscribe,\n  );\n});\n\nonDestroy(() => {\n  unsubscribers.forEach(unsubscriber => unsubscriber());\n});\n</script>\n\n<Route path=\"/*\" isAppMounted={isMounted} let:meta>\n  <main class=\"flex flex-col w-screen h-screen overflow-hidden bg-[var(--pd-content-bg)] text-base\">\n    <div class=\"flex flex-row w-full h-full overflow-hidden\">\n      <Navigation meta={meta} />\n\n      <div class=\"flex flex-col w-full h-full min-w-0\">\n        <GPUPromotion />\n\n        <!-- Dashboard -->\n        <Route path=\"/\">\n          <Dashboard />\n        </Route>\n\n        <!-- Recipes Catalog -->\n        <Route path=\"/recipes\">\n          <Recipes />\n        </Route>\n\n        <!-- Applications -->\n        <Route path=\"/applications\">\n          <Applications />\n        </Route>\n\n        <!-- Playgrounds -->\n        <Route path=\"/playgrounds\">\n          <Playgrounds />\n        </Route>\n        <Route path=\"/playground/:id/*\" let:meta>\n          {#if meta.params.id === 'create'}\n            <PlaygroundCreate />\n          {:else}\n            <Playground playgroundId={meta.params.id} />\n          {/if}\n        </Route>\n        <Route path=\"/llamastack/*\" firstmatch>\n          <Route path=\"/try\">\n            <StartLlamaStackContainer />\n          </Route>\n        </Route>\n        {#if experimentalTuning}\n          <!-- Tune with InstructLab -->\n          <Route path=\"/tune/*\" firstmatch>\n            <Route path=\"/start\">\n              <NewInstructLabSession />\n            </Route>\n            <Route path=\"/*\">\n              <TuneSessions />\n            </Route>\n          </Route>\n        {/if}\n        <Route path=\"/about-instructlab\">\n          <AboutInstructLab />\n        </Route>\n        <Route path=\"/instructlab/*\" firstmatch>\n          <Route path=\"/try\">\n            <StartInstructLabContainer />\n          </Route>\n        </Route>\n        <!-- Preferences -->\n        <Route path=\"/preferences\">\n          <Preferences />\n        </Route>\n\n        <!-- Local Server -->\n        <Route path=\"/local-server\">\n          <LocalServer />\n        </Route>\n\n        <!-- Recipes -->\n        <Route path=\"/recipe/:id/*\" firstmatch let:meta>\n          <Route path=\"/start\">\n            <StartRecipe recipeId={meta.params.id} trackingId={meta.query.trackingId} />\n          </Route>\n          <Route path=\"/*\">\n            <Recipe recipeId={meta.params.id} />\n          </Route>\n        </Route>\n\n        <!-- Models -->\n        <Route path=\"/models/*\" firstmatch>\n          <Route path=\"/import\">\n            <ImportModels />\n          </Route>\n          <Route path=\"/*\">\n            <Models />\n          </Route>\n        </Route>\n\n        <Route path=\"/model/:id/*\" let:meta>\n          <Model modelId={decodeURIComponent(meta.params.id)} />\n        </Route>\n\n        <!-- services -->\n        <Route path=\"/services/*\">\n          <Services />\n        </Route>\n\n        <Route path=\"/service/:id/*\" let:meta>\n          {#if meta.params.id === 'create'}\n            <CreateService trackingId={meta.query.trackingId} />\n          {:else}\n            <ServiceDetails containerId={meta.params.id} />\n          {/if}\n        </Route>\n      </div>\n    </div>\n  </main>\n</Route>\n"
  },
  {
    "path": "packages/frontend/src/Route.svelte",
    "content": "<script lang=\"ts\">\nimport { createRouteObject } from 'tinro/dist/tinro_lib';\nimport type { TinroRouteMeta } from 'tinro';\nimport { saveRouterState } from '/@/utils/client';\n\nexport let path = '/*';\nexport let fallback = false;\nexport let redirect = false;\nexport let firstmatch = false;\n\nexport let isAppMounted: boolean = false;\n\nlet showContent = false;\nlet params: Record<string, string> = {};\nlet meta: TinroRouteMeta = {} as TinroRouteMeta;\n\nconst route = createRouteObject({\n  fallback,\n  onShow() {\n    showContent = true;\n  },\n  onHide() {\n    showContent = false;\n  },\n  onMeta(newMeta: TinroRouteMeta) {\n    meta = newMeta;\n    params = meta.params;\n\n    if (isAppMounted) {\n      saveRouterState({ url: newMeta.url });\n    }\n  },\n});\n\n$: route.update({\n  path,\n  redirect,\n  firstmatch,\n});\n</script>\n\n{#if showContent}\n  <slot params={params} meta={meta} />\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/app.css",
    "content": "@import 'tailwindcss';\n@config '../tailwind.config.cjs';\n"
  },
  {
    "path": "packages/frontend/src/index.html",
    "content": "<!doctype html>\n<html lang=\"en\">\n\t<head>\n\t\t<meta charset=\"utf-8\" />\n\t\t<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n    <title>AI Lab</title>\n\n\t</head>\n\t<body>\n    <div id=\"app\"></div>\n    <script type=\"module\" src=\"/src/main.ts\"></script>\n\t</body>\n</html>\n"
  },
  {
    "path": "packages/frontend/src/lib/ApplicationActions.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { expect, test, vi, beforeEach, describe } from 'vitest';\n\nimport { render, screen, fireEvent } from '@testing-library/svelte';\nimport { studioClient } from '../utils/client';\nimport ApplicationActions from '/@/lib/ApplicationActions.svelte';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\nimport { router } from 'tinro';\n\nvi.mock('../utils/client', async () => ({\n  studioClient: {\n    requestStopApplication: vi.fn(),\n    requestStartApplication: vi.fn(),\n    requestRemoveApplication: vi.fn(),\n    requestRestartApplication: vi.fn(),\n    requestOpenApplication: vi.fn(),\n  },\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(studioClient.requestStopApplication).mockResolvedValue(undefined);\n  vi.mocked(studioClient.requestStartApplication).mockResolvedValue(undefined);\n  vi.mocked(studioClient.requestRemoveApplication).mockResolvedValue(undefined);\n  vi.mocked(studioClient.requestRestartApplication).mockResolvedValue(undefined);\n  vi.mocked(studioClient.requestOpenApplication).mockResolvedValue(undefined);\n});\n\ntest('deletion action should call requestRemoveApplication', async () => {\n  render(ApplicationActions, {\n    object: {\n      pod: {\n        Containers: [],\n      },\n    } as unknown as ApplicationState,\n    recipeId: 'dummy-recipe-id',\n    modelId: 'dummy-model-id',\n  });\n\n  const deleteBtn = screen.getByTitle('Delete AI App');\n  expect(deleteBtn).toBeVisible();\n\n  await fireEvent.click(deleteBtn);\n  expect(studioClient.requestRemoveApplication).toHaveBeenCalledWith('dummy-recipe-id', 'dummy-model-id');\n});\n\ndescribe('open action', () => {\n  test('open action should call requestOpenApplication', async () => {\n    render(ApplicationActions, {\n      object: {\n        pod: {\n          Containers: [\n            {\n              Status: 'running',\n            },\n          ],\n        },\n      } as unknown as ApplicationState,\n      recipeId: 'dummy-recipe-id',\n      modelId: 'dummy-model-id',\n    });\n\n    const openBtn = screen.getByTitle('Open AI App');\n    expect(openBtn).toBeVisible();\n\n    await fireEvent.click(openBtn);\n    expect(studioClient.requestOpenApplication).toHaveBeenCalledWith('dummy-recipe-id', 'dummy-model-id');\n  });\n\n  test('open action should not be visible when all container exited', async () => {\n    render(ApplicationActions, {\n      object: {\n        pod: {\n          Containers: [\n            {\n              Status: 'exited',\n            },\n          ],\n        },\n      } as unknown as ApplicationState,\n      recipeId: 'dummy-recipe-id',\n      modelId: 'dummy-model-id',\n    });\n\n    const openBtn = screen.queryByTitle('Open AI App');\n    expect(openBtn).toBeNull();\n  });\n});\n\ndescribe('start action', () => {\n  test('start action should be visible when all container exited', async () => {\n    render(ApplicationActions, {\n      object: {\n        pod: {\n          Containers: [\n            {\n              Status: 'exited',\n            },\n          ],\n        },\n      } as unknown as ApplicationState,\n      recipeId: 'dummy-recipe-id',\n      modelId: 'dummy-model-id',\n    });\n\n    const startBtn = screen.getByTitle('Start AI App');\n    expect(startBtn).toBeDefined();\n\n    await fireEvent.click(startBtn);\n    expect(studioClient.requestStartApplication).toHaveBeenCalledWith('dummy-recipe-id', 'dummy-model-id');\n  });\n\n  test('start action should be hidden when one container is not exited', async () => {\n    render(ApplicationActions, {\n      object: {\n        pod: {\n          Containers: [\n            {\n              Status: 'exited',\n            },\n            {\n              Status: 'running',\n            },\n          ],\n        },\n      } as unknown as ApplicationState,\n      recipeId: 'dummy-recipe-id',\n      modelId: 'dummy-model-id',\n    });\n\n    const startBtn = screen.queryByTitle('Start AI App');\n    expect(startBtn).toBeNull();\n  });\n});\n\ntest('restart action should call requestRestartApplication', async () => {\n  render(ApplicationActions, {\n    object: {\n      pod: {\n        Containers: [],\n      },\n    } as unknown as ApplicationState,\n    recipeId: 'dummy-recipe-id',\n    modelId: 'dummy-model-id',\n  });\n\n  const restartBtn = screen.getByTitle('Restart AI App');\n  expect(restartBtn).toBeVisible();\n\n  await fireEvent.click(restartBtn);\n  expect(studioClient.requestRestartApplication).toHaveBeenCalledWith('dummy-recipe-id', 'dummy-model-id');\n});\n\ntest('open recipe action should redirect to recipe page', async () => {\n  const routerSpy = vi.spyOn(router, 'goto');\n  render(ApplicationActions, {\n    object: {\n      pod: {\n        Containers: [],\n      },\n    } as unknown as ApplicationState,\n    recipeId: 'dummy-recipe-id',\n    modelId: 'dummy-model-id',\n    enableGoToRecipeAction: true,\n  });\n\n  const openRecipeBtn = screen.getByTitle('Open Recipe');\n  expect(openRecipeBtn).toBeVisible();\n\n  await fireEvent.click(openRecipeBtn);\n  expect(routerSpy).toHaveBeenCalledWith('/recipe/dummy-recipe-id');\n});\n\ntest('open recipe action should not be visible by default', async () => {\n  render(ApplicationActions, {\n    object: {\n      pod: {\n        Containers: [],\n      },\n    } as unknown as ApplicationState,\n    recipeId: 'dummy-recipe-id',\n    modelId: 'dummy-model-id',\n  });\n\n  const openRecipeBtn = screen.getByTitle('Open Recipe');\n  expect(openRecipeBtn).toHaveClass('hidden');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/ApplicationActions.svelte",
    "content": "<script lang=\"ts\">\nimport {\n  faRotateForward,\n  faArrowUpRightFromSquare,\n  faTrash,\n  faBookOpen,\n  faStop,\n  faPlay,\n} from '@fortawesome/free-solid-svg-icons';\nimport ListItemButtonIcon from '/@/lib/button/ListItemButtonIcon.svelte';\nimport { studioClient } from '/@/utils/client';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\nimport { router } from 'tinro';\nimport { DropdownMenu } from '@podman-desktop/ui-svelte';\nimport FlatMenu from './FlatMenu.svelte';\n\nexport let object: ApplicationState | undefined;\nexport let recipeId: string;\nexport let modelId: string;\nexport let dropdownMenu = false;\nexport let enableGoToRecipeAction = false;\n\nfunction deleteApplication(): void {\n  studioClient.requestRemoveApplication(recipeId, modelId).catch(err => {\n    console.error(`Something went wrong while trying to delete AI App: ${String(err)}.`);\n  });\n}\n\nfunction startApplication(): void {\n  studioClient.requestStartApplication(recipeId, modelId).catch(err => {\n    console.error(`Something went wrong while trying to start AI App: ${String(err)}.`);\n  });\n}\n\nfunction stopApplication(): void {\n  studioClient.requestStopApplication(recipeId, modelId).catch(err => {\n    console.error(`Something went wrong while trying to delete AI App: ${String(err)}.`);\n  });\n}\n\nfunction restartApplication(): void {\n  studioClient.requestRestartApplication(recipeId, modelId).catch(err => {\n    console.error(`Something went wrong while trying to restart AI App: ${String(err)}.`);\n  });\n}\n\nfunction openApplication(): void {\n  studioClient.requestOpenApplication(recipeId, modelId).catch(err => {\n    console.error(`Something went wrong while trying to open AI App: ${String(err)}.`);\n  });\n}\n\nfunction redirectToRecipe(): void {\n  router.goto(`/recipe/${recipeId}`);\n}\n\nlet actionsStyle: typeof DropdownMenu | typeof FlatMenu;\nif (dropdownMenu) {\n  actionsStyle = DropdownMenu;\n} else {\n  actionsStyle = FlatMenu;\n}\n\nlet exited: boolean | undefined = undefined;\n\n$: {\n  exited = object?.pod?.Containers?.every(container => container.Status === 'exited');\n}\n</script>\n\n{#if object?.pod !== undefined}\n  {#if exited}\n    <ListItemButtonIcon icon={faPlay} onClick={startApplication} title=\"Start AI App\" />\n  {:else}\n    <ListItemButtonIcon icon={faStop} onClick={stopApplication} title=\"Stop AI App\" />\n    <ListItemButtonIcon icon={faArrowUpRightFromSquare} onClick={openApplication} title=\"Open AI App\" />\n  {/if}\n\n  <svelte:component this={actionsStyle}>\n    <ListItemButtonIcon\n      icon={faRotateForward}\n      onClick={restartApplication}\n      title=\"Restart AI App\"\n      menu={dropdownMenu} />\n\n    <ListItemButtonIcon\n      icon={faBookOpen}\n      onClick={redirectToRecipe}\n      title=\"Open Recipe\"\n      hidden={!enableGoToRecipeAction}\n      menu={dropdownMenu} />\n\n    <ListItemButtonIcon icon={faTrash} onClick={deleteApplication} title=\"Delete AI App\" menu={dropdownMenu} />\n  </svelte:component>\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/Badge.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { expect, test } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport Badge from './Badge.svelte';\nimport { faTrash } from '@fortawesome/free-solid-svg-icons';\n\ntest('print Badge with custom text and default background', async () => {\n  render(Badge, { icon: faTrash, content: 'custom-text' });\n\n  const badgeContent = screen.getByText('custom-text');\n  expect(badgeContent).toBeInTheDocument();\n  expect(badgeContent).toHaveClass('bg-[var(--pd-label-bg)]');\n});\n\ntest('print Badge with custom text and custom background', async () => {\n  render(Badge, { icon: faTrash, content: 'custom-text', class: 'bg-[var(--pd-label-text)]' });\n\n  const badgeContent = screen.getByText('custom-text');\n  expect(badgeContent).toBeInTheDocument();\n  expect(badgeContent).toHaveClass('bg-[var(--pd-label-text)]');\n  expect(badgeContent).not.toHaveClass('bg-[var(--pd-label-bg)]');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/Badge.svelte",
    "content": "<script lang=\"ts\">\nimport type { IconDefinition } from '@fortawesome/free-regular-svg-icons';\nimport Fa from 'svelte-fa';\n\ninterface Props {\n  icon?: IconDefinition;\n  content: string;\n  class?: string;\n}\n\nlet { icon, content, class: classes = 'bg-[var(--pd-label-bg)] text-[var(--pd-label-text)]' }: Props = $props();\n</script>\n\n<div class=\"{classes} rounded-md px-2 py-1 flex flex-row w-min h-min text-nowrap items-center text-sm\">\n  {#if icon}\n    <Fa class=\"mr-2\" icon={icon} />\n  {/if}\n  {content}\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/Card.svelte",
    "content": "<script lang=\"ts\">\nimport Fa from 'svelte-fa';\nimport type { IconDefinition } from '@fortawesome/free-regular-svg-icons';\nimport { createEventDispatcher } from 'svelte';\n\nconst dispatch = createEventDispatcher();\n\nexport let title: string | undefined = undefined;\nexport let description: string | undefined = undefined;\nexport let classes: string = '';\n\nexport let href: string | undefined = undefined;\n\nexport let icon: IconDefinition | undefined = undefined;\n\nexport let primaryBackground: string = 'bg-charcoal-800';\n\nfunction handleClick(): void {\n  dispatch('click');\n}\n</script>\n\n<a class=\"no-underline\" href={href}>\n  <div class=\"{classes} rounded-md flex-nowrap overflow-hidden\" role=\"region\" aria-label={title ?? 'Card'}>\n    <div class=\"flex flex-row\">\n      <div class=\"flex flex-row items-start\">\n        {#if icon}\n          <button\n            on:click={handleClick}\n            class=\"{primaryBackground} rounded-full min-w-7 min-h-7 w-7 h-7 flex items-center justify-center mr-3\">\n            <Fa size=\"1x\" class=\"text-purple-500 cursor-pointer\" icon={icon} />\n          </button>\n        {/if}\n        <div\n          class=\"flex flex-col text-[var(--pd-content-card-text)] whitespace-normal space-y-2\"\n          aria-label=\"context-name\">\n          {#if title}\n            <div>\n              {title}\n            </div>\n          {/if}\n          {#if description}\n            <div>\n              {description}\n            </div>\n          {/if}\n        </div>\n      </div>\n    </div>\n    <div class=\"flex overflow-hidden\" role=\"region\" aria-label=\"content\">\n      <slot name=\"content\" />\n    </div>\n  </div>\n</a>\n"
  },
  {
    "path": "packages/frontend/src/lib/ContentDetailsLayout.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { expect, test } from 'vitest';\nimport ContentDetailsLayoutTest from './ContentDetailsLayoutTest.svelte';\nimport { render, screen } from '@testing-library/svelte';\nimport userEvent from '@testing-library/user-event';\n\ntest('should open/close details panel when clicking on toggle button', async () => {\n  render(ContentDetailsLayoutTest);\n\n  const panelOpenDetails = screen.getByLabelText('toggle a label');\n  expect(panelOpenDetails).toHaveClass('hidden');\n  const panelAppDetails = screen.getByLabelText('a label panel');\n  expect(panelAppDetails).toHaveClass('block');\n\n  const btnShowPanel = screen.getByRole('button', { name: 'show a label' });\n  const btnHidePanel = screen.getByRole('button', { name: 'hide a label' });\n\n  await userEvent.click(btnHidePanel);\n\n  expect(panelAppDetails).toHaveClass('hidden');\n  expect(panelOpenDetails).toHaveClass('block');\n\n  await userEvent.click(btnShowPanel);\n\n  expect(panelAppDetails).toHaveClass('block');\n  expect(panelOpenDetails).toHaveClass('hidden');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/ContentDetailsLayout.svelte",
    "content": "<script lang=\"ts\">\nexport let detailsTitle: string;\nexport let detailsLabel: string;\nexport let detailsSummary: string = '';\nlet open: boolean = true;\n\nconst toggle = (): void => {\n  open = !open;\n};\n</script>\n\n<div class=\"flex flex-col w-full overflow-y-auto\">\n  <slot name=\"header\" />\n  <div class=\"grid w-full lg:grid-cols-[1fr_auto] max-lg:grid-cols-[auto]\">\n    <div class=\"p-5 inline-grid\">\n      <slot name=\"content\" />\n    </div>\n    <div class=\"inline-grid max-lg:order-first\">\n      <div class=\"max-lg:w-full max-lg:min-w-full\" class:w-[375px]={open} class:min-w-[375px]={open}>\n        <div\n          class:hidden={!open}\n          class:block={open}\n          class=\"h-fit lg:bg-[var(--pd-content-card-bg)] text-[var(--pd-content-card-title)] lg:rounded-l-md lg:mt-5 lg:py-4 max-lg:block\"\n          aria-label={`${detailsLabel} panel`}>\n          <div class=\"flex flex-col lg:px-4 space-y-4 mx-auto\">\n            <div class=\"w-full flex flex-row justify-between max-lg:hidden\">\n              <span>{detailsTitle}</span>\n              <button on:click={toggle} aria-label={`hide ${detailsLabel}`}\n                ><i class=\"fas fa-angle-right text-[var(--pd-content-card-icon)]\"></i></button>\n            </div>\n            <slot name=\"details\" />\n          </div>\n        </div>\n        <div\n          class:hidden={open}\n          class:block={!open}\n          class=\"bg-[var(--pd-content-card-bg)] mt-5 p-4 rounded-md h-fit max-lg:hidden\"\n          aria-label={`toggle ${detailsLabel}`}>\n          <button on:click={toggle} aria-label={`show ${detailsLabel}`} title={detailsSummary}\n            ><i class=\"fas fa-angle-left text-[var(--pd-content-card-icon)]\"></i></button>\n        </div>\n      </div>\n    </div>\n  </div>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/ContentDetailsLayoutTest.svelte",
    "content": "<script>\nimport ContentDetailsLayout from './ContentDetailsLayout.svelte';\n</script>\n\n<ContentDetailsLayout detailsTitle=\"a title\" detailsLabel=\"a label\" detailsSummary=\"a summary\">\n  <svelte:fragment slot=\"content\">A Content</svelte:fragment>\n  <svelte:fragment slot=\"details\">Details...</svelte:fragment>\n</ContentDetailsLayout>\n"
  },
  {
    "path": "packages/frontend/src/lib/ExpandableMessage.svelte",
    "content": "<script lang=\"ts\">\nexport let message: string | undefined = undefined;\nexport let title: string = 'Expand';\nlet showMessage: boolean = false;\n\nfunction handleClick(): void {\n  showMessage = !showMessage;\n}\n</script>\n\n{#if message}\n  <div role=\"note\" class:hidden={!showMessage} class=\"my-2 break-words\">{message}</div>\n  <div class=\"flex flex-col w-full items-end\">\n    <button on:click={handleClick} class=\"text-[var(--pd-link)] text-xs\">\n      {title}\n      {#if showMessage}\n        <i class=\"fas fa-chevron-up\"></i>\n      {:else}\n        <i class=\"fas fa-chevron-down\"></i>\n      {/if}\n    </button>\n  </div>\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/FlatMenu.svelte",
    "content": "<slot />\n"
  },
  {
    "path": "packages/frontend/src/lib/Navigation.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { test, expect, vi } from 'vitest';\nimport { screen, render } from '@testing-library/svelte';\nimport Navigation from './Navigation.svelte';\nimport type { TinroRouteMeta } from 'tinro';\n\nvi.mock('../utils/client', async () => ({\n  studioClient: {\n    getExtensionConfiguration: vi.fn().mockResolvedValue({}),\n  },\n  rpcBrowser: {\n    subscribe: (): unknown => {\n      return {\n        unsubscribe: (): void => {},\n      };\n    },\n  },\n}));\n\ntest('Expect panel to have correct styling', async () => {\n  render(Navigation, { meta: { url: 'test' } as TinroRouteMeta });\n\n  const panel = screen.getByLabelText('PreferencesNavigation');\n  expect(panel).toBeInTheDocument();\n  expect(panel).toHaveClass('bg-[var(--pd-secondary-nav-bg)]');\n  expect(panel).toHaveClass('border-[var(--pd-global-nav-bg-border)]');\n  expect(panel).toHaveClass('border-r-[1px]');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/Navigation.svelte",
    "content": "<script lang=\"ts\">\nimport type { TinroRouteMeta } from 'tinro';\nimport Fa from 'svelte-fa';\nimport {\n  faBookOpen,\n  faBrain,\n  faGaugeHigh,\n  faMessage,\n  faRocket,\n  faServer,\n  faHouse,\n  faGear,\n  faCircleDown,\n} from '@fortawesome/free-solid-svg-icons';\nimport InstructLabIcon from '/@/lib/icons/InstructLabIcon.svelte';\nimport { SettingsNavItem } from '@podman-desktop/ui-svelte';\nimport { onDestroy, onMount } from 'svelte';\nimport { configuration } from '../stores/extensionConfiguration';\nimport type { ExtensionConfiguration } from '@shared/models/IExtensionConfiguration';\nimport type { Unsubscriber } from 'svelte/store';\nimport type { IconDefinition } from '@fortawesome/free-regular-svg-icons';\n\nexport let meta: TinroRouteMeta;\nlet experimentalTuning: boolean = false;\nlet cfgUnsubscribe: Unsubscriber;\n\n// By default, faBookOpen is 576x512, but we want it to be 512x512\n// we cannot modify the width and height in SettingsNavItem so just modify the icon instead\nlet copyFaBookOpenIcon: IconDefinition | undefined = undefined;\n\nonMount(() => {\n  copyFaBookOpenIcon = structuredClone(faBookOpen);\n  copyFaBookOpenIcon.icon[0] = 512;\n\n  cfgUnsubscribe = configuration.subscribe((val: ExtensionConfiguration | undefined) => {\n    experimentalTuning = val?.experimentalTuning ?? false;\n  });\n});\n\nonDestroy(() => {\n  cfgUnsubscribe?.();\n});\n</script>\n\n<nav\n  class=\"z-1 w-leftsidebar min-w-leftsidebar shadow-xs flex-col justify-between flex transition-all duration-500 ease-in-out bg-[var(--pd-secondary-nav-bg)] border-[var(--pd-global-nav-bg-border)] border-r-[1px]\"\n  aria-label=\"PreferencesNavigation\">\n  <div class=\"flex items-center\">\n    <div class=\"pt-4 pl-3 px-5 mb-10 flex items-center ml-[4px]\">\n      <Fa size=\"1.5x\" class=\"text-purple-500 cursor-pointer mr-4\" icon={faBrain} />\n      <p class=\"text-xl first-letter:uppercase text-[color:var(--pd-secondary-nav-header-text)]\">AI Lab</p>\n    </div>\n  </div>\n  <div class=\"h-full overflow-hidden hover:overflow-y-auto\" style=\"margin-bottom:auto\">\n    <SettingsNavItem icon={faHouse} title=\"Dashboard\" selected={meta.url === '/'} href=\"/\" />\n    <!-- AI Apps -->\n    <div class=\"pl-3 mt-2 ml-[4px]\">\n      <span class=\"text-[color:var(--pd-secondary-nav-header-text)]\">AI APPS</span>\n    </div>\n    <SettingsNavItem\n      icon={copyFaBookOpenIcon}\n      title=\"Recipe Catalog\"\n      selected={meta.url === '/recipes'}\n      href=\"/recipes\" />\n    <SettingsNavItem icon={faServer} title=\"Running\" selected={meta.url === '/applications'} href=\"/applications\" />\n\n    <!-- Models -->\n    <div class=\"pl-3 mt-2 ml-[4px]\">\n      <span class=\"text-[color:var(--pd-secondary-nav-header-text)]\">MODELS</span>\n    </div>\n    <SettingsNavItem icon={copyFaBookOpenIcon} title=\"Catalog\" selected={meta.url === '/models'} href=\"/models\" />\n    <SettingsNavItem icon={faRocket} title=\"Services\" selected={meta.url === '/services'} href=\"/services\" />\n    <SettingsNavItem icon={faMessage} title=\"Playgrounds\" selected={meta.url === '/playgrounds'} href=\"/playgrounds\" />\n    <SettingsNavItem\n      icon={faRocket}\n      title=\"Llama Stack\"\n      selected={meta.url.startsWith('/llamastack/try')}\n      href=\"/llamastack/try\" />\n\n    <!-- Server Information -->\n    <div class=\"pl-3 mt-2 ml-[4px]\">\n      <span class=\"text-[color:var(--pd-secondary-nav-header-text)]\">SERVER INFORMATION</span>\n    </div>\n    <SettingsNavItem icon={faGear} title=\"Local Server\" selected={meta.url === '/local-server'} href=\"/local-server\" />\n\n    <!-- Tuning -->\n    <div class=\"pl-3 mt-2 ml-[4px]\">\n      <span class=\"text-[color:var(--pd-secondary-nav-header-text)]\">TUNING</span>\n    </div>\n    <SettingsNavItem\n      icon={InstructLabIcon}\n      title=\"About InstructLab\"\n      selected={meta.url.startsWith('/about-instructlab')}\n      href=\"/about-instructlab\" />\n    {#if experimentalTuning}\n      <SettingsNavItem\n        icon={faGaugeHigh}\n        title=\"Tune with InstructLab\"\n        selected={meta.url.startsWith('/tune')}\n        href=\"/tune\" />\n    {/if}\n    <SettingsNavItem\n      icon={faCircleDown}\n      title=\"Try InstructLab\"\n      selected={meta.url.startsWith('/instructlab/try')}\n      href=\"/instructlab/try\" />\n  </div>\n</nav>\n"
  },
  {
    "path": "packages/frontend/src/lib/RangeInput.svelte",
    "content": "<script lang=\"ts\">\nexport let name: string;\nexport let min: string;\nexport let max: string;\nexport let step: string;\nexport let value: number;\n</script>\n\n<div>\n  <div class=\"flex flex-row items-center\">\n    <span class=\"w-full uppercase text-[var(--pd-content-card-text)]\">{name}</span>\n    <input\n      bind:value={value}\n      type=\"number\"\n      name={`${name}-input`}\n      min={min}\n      max={max}\n      step={step}\n      placeholder={name}\n      class=\"p-2 w-24 text-right outline-hidden text-sm bg-[var(--pd-content-card-bg)] rounded-xs text-[var(--pd-content-card-text)] placeholder-[var(--pd-content-card-text)]\" />\n  </div>\n  <div class=\"w-full\">\n    <input\n      type=\"range\"\n      name={`${name}-range`}\n      min={min}\n      max={max}\n      step={step}\n      placeholder={name}\n      bind:value={value}\n      aria-label={`${name} slider`}\n      class=\"w-full h-1 bg-[var(--pd-button-primary-bg)] rounded-lg appearance-none accent-[var(--pd-button-primary-bg)] cursor-pointer range-xs mt-2\" />\n  </div>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/RecipeCard.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { vi, test, expect, beforeEach, beforeAll } from 'vitest';\nimport { screen, render } from '@testing-library/svelte';\nimport { findLocalRepositoryByRecipeId } from '/@/utils/localRepositoriesUtils';\nimport RecipeCard from './RecipeCard.svelte';\nimport { writable, type Writable } from 'svelte/store';\nimport type { LocalRepository } from '@shared/models/ILocalRepository';\nimport { localRepositories } from '../stores/localRepositories';\n\nvi.mock('/@/utils/localRepositoriesUtils', () => ({\n  findLocalRepositoryByRecipeId: vi.fn(),\n}));\n\nvi.mock('../stores/localRepositories', () => ({\n  localRepositories: {\n    subscribe: vi.fn(),\n    unsubscribe: vi.fn(),\n  },\n}));\n\nvi.mock('../utils/client', async () => {\n  return {\n    studioClient: {},\n  };\n});\n\nconst mockLocalRepositories: Writable<LocalRepository[]> = writable([]);\n\nconst recipe = {\n  id: 'recipe 1',\n  name: 'Recipe 1',\n  readme: 'readme 1',\n  categories: [],\n  recommended: ['model1', 'model2'],\n  description: 'description 1',\n  repository: 'repo 1',\n};\n\nclass ResizeObserver {\n  observe = vi.fn();\n  disconnect = vi.fn();\n  unobserve = vi.fn();\n}\n\nbeforeAll(() => {\n  Object.defineProperty(window, 'ResizeObserver', { value: ResizeObserver });\n});\n\nvi.mock('/@/lib/RecipeCardTags', () => ({\n  isDarkMode: vi.fn().mockReturnValue(false),\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(localRepositories).subscribe.mockImplementation(run => mockLocalRepositories.subscribe(run));\n});\n\ntest('recipe name and description', async () => {\n  // eslint-disable-next-line sonarjs/publicly-writable-directories\n  vi.mocked(findLocalRepositoryByRecipeId).mockReturnValue({ path: 'recipe1', sourcePath: '/tmp/recipe1', labels: {} });\n  render(RecipeCard, {\n    recipe,\n  });\n\n  const name = screen.queryByLabelText('Recipe 1 name');\n  expect(name).toBeInTheDocument();\n\n  const description = screen.queryByLabelText('Recipe 1 description');\n  expect(description).toBeInTheDocument();\n\n  const reference = screen.queryByLabelText('Recipe 1 ref');\n  expect(reference).not.toBeInTheDocument();\n});\n\ntest('recipe name, description and reference', async () => {\n  // eslint-disable-next-line sonarjs/publicly-writable-directories\n  vi.mocked(findLocalRepositoryByRecipeId).mockReturnValue({ path: 'recipe1', sourcePath: '/tmp/recipe1', labels: {} });\n  render(RecipeCard, {\n    recipe: { ...recipe, ref: 'myref' },\n  });\n\n  const name = screen.queryByLabelText('Recipe 1 name');\n  expect(name).toBeInTheDocument();\n\n  const description = screen.queryByLabelText('Recipe 1 description');\n  expect(description).toBeInTheDocument();\n\n  const reference = screen.queryByLabelText('Recipe 1 ref');\n  expect(reference).toBeInTheDocument();\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/RecipeCard.svelte",
    "content": "<script lang=\"ts\">\nimport type { Recipe } from '@shared/models/IRecipe';\nimport { router } from 'tinro';\nimport { faArrowUpRightFromSquare } from '@fortawesome/free-solid-svg-icons';\nimport Fa from 'svelte-fa';\nimport { localRepositories } from '../stores/localRepositories';\nimport { findLocalRepositoryByRecipeId } from '/@/utils/localRepositoriesUtils';\nimport type { LocalRepository } from '@shared/models/ILocalRepository';\nimport RecipeStatus from '/@/lib/RecipeStatus.svelte';\nimport RecipeCardTags from '/@/lib/RecipeCardTags.svelte';\n\nexport let recipe: Recipe;\n\nlet localPath: LocalRepository | undefined = undefined;\n$: localPath = findLocalRepositoryByRecipeId($localRepositories, recipe.id);\n\nfunction handleClick(): void {\n  router.goto(`/recipe/${recipe.id}`);\n}\n</script>\n\n<div class=\"no-underline\">\n  <div\n    class=\"bg-[var(--pd-content-card-bg)] hover:bg-[var(--pd-content-card-hover-bg)] grow p-4 h-full rounded-md flex-nowrap flex flex-col\"\n    role=\"region\"\n    aria-label={recipe.name}>\n    <!-- body -->\n    <div class=\"flex flex-col grow\">\n      <div class=\"flex flex-row text-base\">\n        <!-- left column -->\n        <div class=\"flex flex-col\">\n          <span class=\"text-[var(--pd-content-card-header-text)]\" aria-label=\"{recipe.name} name\">{recipe.name}</span>\n          <span class=\"text-sm text-[var(--pd-content-card-text)]\" aria-label=\"{recipe.name} description\"\n            >{recipe.description}</span>\n        </div>\n\n        <!-- right column -->\n        <RecipeStatus recipe={recipe} localRepository={localPath} />\n      </div>\n\n      <!-- tags -->\n      <div class=\"flex flex-row gap-2 py-2 items-center\">\n        <RecipeCardTags recipe={recipe} />\n      </div>\n    </div>\n\n    <!-- footer -->\n    <div class=\"flex flex-row\">\n      <!-- version -->\n      <div class=\"grow text-[var(--pd-content-card-text)] opacity-50 whitespace-nowrap overflow-x-hidden text-ellipsis\">\n        {#if recipe.ref}\n          <span aria-label=\"{recipe.name} ref\">{recipe.ref}</span>\n        {/if}\n      </div>\n\n      <!-- more details -->\n      <button on:click={handleClick}>\n        <div class=\"flex flex-row items-center text-[var(--pd-link)]\">\n          <Fa class=\"mr-2\" icon={faArrowUpRightFromSquare} />\n          <span> More details </span>\n        </div>\n      </button>\n    </div>\n  </div>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/RecipeCardTags.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { screen, render } from '@testing-library/svelte';\nimport { beforeAll, expect, test, vi } from 'vitest';\nimport RecipeCardTags from '/@/lib/RecipeCardTags.svelte';\nimport userEvent from '@testing-library/user-event';\n\nconst recipe = {\n  id: 'recipe1',\n  name: 'recipe1',\n  description: 'description',\n  repository: 'repository',\n  readme: 'readme',\n  categories: ['natural-language-processing', 'audio'],\n  languages: ['java', 'python'],\n  frameworks: ['langchain', 'vectordb'],\n  backend: 'whisper-cpp',\n};\n\nclass ResizeObserver {\n  observe = vi.fn();\n  disconnect = vi.fn();\n  unobserve = vi.fn();\n}\n\nvi.mock('/@/lib/RecipeCardTags', () => ({\n  getBGColor: vi.fn((_: string) => 'bg-purple-200'),\n  getTextColor: vi.fn((_: string) => 'text-purple-200'),\n  FRAMEWORKS: ['langchain', 'vectordb'],\n  TOOLS: ['whisper-cpp'],\n}));\n\nbeforeAll(() => {\n  Object.defineProperty(window, 'ResizeObserver', { value: ResizeObserver });\n});\n\ntest('Should render tags', () => {\n  render(RecipeCardTags, { recipe: recipe });\n\n  const category1 = screen.getByText('Natural Language Processing');\n  expect(category1).toBeVisible();\n\n  const category2 = screen.getByText('Audio');\n  expect(category2).toBeVisible();\n\n  const language1 = screen.getByText('Java');\n  expect(language1).toBeVisible();\n\n  const language2 = screen.getByText('Python');\n  expect(language2).toBeVisible();\n\n  const framework1 = screen.getByText('langchain');\n  expect(framework1).toBeVisible();\n\n  const framework2 = screen.getByText('vectordb');\n  expect(framework2).toBeVisible();\n\n  const backend = screen.getByText('whisper-cpp');\n  expect(backend).toBeVisible();\n});\n\ntest('Button should be visible with \"+ X more\"', () => {\n  render(RecipeCardTags, { recipe: recipe });\n\n  const button = screen.getByRole('button');\n  expect(button).toBeVisible();\n  expect(button).toHaveTextContent('more');\n});\n\ntest('Clicking on button should show all the tags', async () => {\n  render(RecipeCardTags, { recipe: recipe });\n\n  const button = screen.getByRole('button');\n  expect(button).toBeVisible();\n  expect(button).toHaveTextContent('more');\n\n  // Clicking on the button\n  await userEvent.click(button);\n  expect(button).toHaveTextContent('Show less');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/RecipeCardTags.svelte",
    "content": "<script lang=\"ts\">\nimport type { Recipe } from '@shared/models/IRecipe';\nimport Badge from './Badge.svelte';\nimport { onDestroy, onMount } from 'svelte';\nimport { TOOLS, FRAMEWORKS, getBGColor, getTextColor } from '/@/lib/RecipeCardTags';\n\ninterface Props {\n  recipe: Recipe;\n}\n\nlet { recipe }: Props = $props();\n\nconst TAGS: string[] = [\n  ...recipe.categories,\n  ...(recipe.backend !== undefined ? [recipe.backend] : []),\n  ...(recipe.frameworks ?? []),\n  ...(recipe.languages ?? []),\n];\n\n// gap-2 = 8px\nconst PADDING = 8;\nlet expanded: boolean = $state(false);\nlet toggleButton: HTMLDivElement | undefined = $state();\nlet visibleTags: string[] = $state(TAGS);\nlet divTags: HTMLDivElement[] = $state([]);\nlet recipeCardWidth: number = $state(0);\n\nfunction updateContent(tag: string): string {\n  let updatedTag = tag;\n  if (tag === 'natural-language-processing' || tag === 'computer-vision') {\n    updatedTag = tag.replaceAll('-', ' ');\n  }\n\n  // Make first character uppercase only on use cases and languages\n  if (FRAMEWORKS.includes(updatedTag) || TOOLS.includes(updatedTag)) {\n    return updatedTag;\n  }\n\n  return updatedTag.replace(/\\b\\w/g, char => char.toUpperCase());\n}\n\nfunction updateVisibleTags(): void {\n  if (expanded) {\n    visibleTags = TAGS;\n    return;\n  }\n\n  if (!toggleButton) {\n    return;\n  }\n\n  if (TAGS.length !== visibleTags.length) {\n    toggleButton.classList.remove('hidden');\n  }\n\n  removeHiddenFromTags();\n\n  // default value is button for toggle\n  let totalWidth = PADDING + toggleButton.clientWidth;\n  visibleTags = [];\n  for (let i = 0; i < TAGS.length; i++) {\n    const tag = divTags[i];\n    const tagWidth = tag.clientWidth + PADDING;\n\n    if (totalWidth + tagWidth >= recipeCardWidth) {\n      addHiddenToTags(TAGS[i]);\n      return;\n    } else {\n      totalWidth += tagWidth;\n      visibleTags.push(TAGS[i]);\n    }\n  }\n\n  if (TAGS.length === visibleTags.length) {\n    toggleButton.classList.add('hidden');\n  }\n}\n\n// Adds hidden class to all tags after afterTag\nfunction addHiddenToTags(afterTag: string): void {\n  let isAfterTag = false;\n  for (let i = 0; i < TAGS.length; i++) {\n    const divTag = divTags[i];\n    if (isAfterTag) {\n      divTag.classList.add('hidden');\n    } else if (TAGS[i] === afterTag) {\n      divTag.classList.add('hidden');\n      isAfterTag = true;\n    }\n  }\n}\n\n// Remove the hidden class from all the tags (this will result in div element having clientWidth)\nfunction removeHiddenFromTags(): void {\n  divTags.forEach(tag => {\n    tag.classList.remove('hidden');\n  });\n  visibleTags = TAGS;\n}\n\nonMount((): void => {\n  updateVisibleTags();\n  window.addEventListener('resize', updateVisibleTags);\n});\n\nonDestroy((): void => window.removeEventListener('resize', updateVisibleTags));\n\nfunction toggleExpanded(): void {\n  removeHiddenFromTags();\n  expanded = !expanded;\n  updateVisibleTags();\n}\n</script>\n\n<div\n  bind:clientWidth={recipeCardWidth}\n  class=\"w-full flex flex-row gap-2 py-2\"\n  class:overflow-hidden={!expanded}\n  class:flex-wrap={expanded}>\n  {#each TAGS as tag, i (i)}\n    <div bind:this={divTags[i]}>\n      <Badge class=\"{getBGColor(tag)} {getTextColor(tag)}\" content={updateContent(tag)} />\n    </div>\n  {/each}\n\n  <div bind:this={toggleButton}>\n    <button onclick={toggleExpanded}>\n      <Badge\n        class=\"bg-transparent text-[var(--pd-link)]\"\n        content={expanded ? 'Show less' : `+${TAGS.length - visibleTags.length} more`} />\n    </button>\n  </div>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/RecipeCardTags.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { ExtensionConfiguration } from '@shared/models/IExtensionConfiguration';\nimport { studioClient } from '/@/utils/client';\nimport { gte } from 'semver';\n\nconst USE_CASES = ['natural-language-processing', 'audio', 'computer-vision'];\nconst LANGUAGES = ['java', 'javascript', 'python'];\nexport const FRAMEWORKS = ['langchain', 'langchain4j', 'quarkus', 'react', 'streamlit', 'vectordb', 'llama-stack-sdk'];\nexport const TOOLS = ['none', 'llama-cpp', 'whisper-cpp', 'llama-stack'];\n\n// Defaulting to Podman Desktop min version we need to run\nlet version: string = '1.8.0';\nlet configuration: ExtensionConfiguration;\nlet isDark = true;\n\nasync function setupProps(): Promise<void> {\n  configuration = await studioClient.getExtensionConfiguration();\n  version = (await studioClient.getPodmanDesktopVersion()).toString().replace(/-next/g, '');\n\n  if (configuration.appearance === 'dark') isDark = true;\n  else if (configuration.appearance === 'light') isDark = false;\n  else if (configuration.appearance === 'system') {\n    const app = document.getElementById('app');\n    if (!app) throw new Error('cannot found app element');\n    const style = window.getComputedStyle(app);\n    const color = style.getPropertyValue('--pd-terminal-background').trim();\n    isDark = color === '#000';\n  }\n}\n\nsetupProps().catch((e: unknown) => {\n  throw new Error(`Got an error when setting up props: ${e}`);\n});\n\nfunction getColor(pdColor: string, darkColor: string, lightColor: string): string {\n  if (gte(version, '1.17.0')) {\n    return pdColor;\n  } else {\n    if (isDark) return darkColor;\n    return lightColor;\n  }\n}\n\nfunction createBGColorMap(): Map<string, string> {\n  return new Map<string, string>([\n    ...USE_CASES.map(\n      useCase =>\n        [useCase, getColor('bg-[var(--pd-label-primary-bg)]', 'bg-purple-700', 'bg-purple-300')] as [string, string],\n    ),\n    ...LANGUAGES.map(\n      useCase =>\n        [useCase, getColor('bg-[var(--pd-label-secondary-bg)]', 'bg-sky-900', 'bg-sky-200')] as [string, string],\n    ),\n    ...FRAMEWORKS.map(\n      useCase =>\n        [useCase, getColor('bg-[var(--pd-label-tertiary-bg)]', 'bg-green-900', 'bg-green-200')] as [string, string],\n    ),\n    ...TOOLS.map(\n      useCase =>\n        [useCase, getColor('bg-[var(--pd-label-quaternary-bg)]', 'bg-amber-800', 'bg-amber-100')] as [string, string],\n    ),\n  ]);\n}\n\nfunction createTextColorMap(): Map<string, string> {\n  return new Map<string, string>([\n    ...USE_CASES.map(\n      useCase =>\n        [useCase, getColor('text-[var(--pd-label-primary-text)]', 'text-purple-300', 'text-purple-700')] as [\n          string,\n          string,\n        ],\n    ),\n    ...LANGUAGES.map(\n      useCase =>\n        [useCase, getColor('text-[var(--pd-label-secondary-text)]', 'text-sky-200', 'text-sky-900')] as [\n          string,\n          string,\n        ],\n    ),\n    ...FRAMEWORKS.map(\n      useCase =>\n        [useCase, getColor('text-[var(--pd-label-tertiary-text)]', 'text-green-200', 'text-green-900')] as [\n          string,\n          string,\n        ],\n    ),\n    ...TOOLS.map(\n      useCase =>\n        [useCase, getColor('text-[var(--pd-label-quaternary-text)]', 'text-amber-400', 'text-amber-900')] as [\n          string,\n          string,\n        ],\n    ),\n  ]);\n}\n\nexport function getBGColor(tag: string): string {\n  const color =\n    createBGColorMap().get(tag) ?? getColor('bg-[var(--pd-label-primary-bg)]', 'bg-purple-700', 'bg-purple-300');\n  return color;\n}\n\nexport function getTextColor(tag: string): string {\n  const color =\n    createTextColorMap().get(tag) ??\n    getColor('text-[var(--pd-label-primary-text)]', 'text-purple-300', 'text-purple-700');\n  return color;\n}\n"
  },
  {
    "path": "packages/frontend/src/lib/RecipeDetails.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { vi, test, expect, beforeEach } from 'vitest';\nimport { screen, render } from '@testing-library/svelte';\nimport userEvent from '@testing-library/user-event';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\nimport * as catalogStore from '/@/stores/catalog';\nimport { readable, writable } from 'svelte/store';\nimport RecipeDetails from './RecipeDetails.svelte';\nimport * as tasksStore from '../stores/tasks';\nimport type { Task } from '@shared/models/ITask';\n\nconst mocks = vi.hoisted(() => {\n  return {\n    getLocalRepositoriesMock: vi.fn(),\n    getTasksMock: vi.fn(),\n    openFileMock: vi.fn(),\n    requestDeleteLocalRepositoryMock: vi.fn(),\n  };\n});\n\nvi.mock('../utils/client', async () => {\n  return {\n    studioClient: {\n      openFile: mocks.openFileMock,\n      requestDeleteLocalRepository: mocks.requestDeleteLocalRepositoryMock,\n    },\n    rpcBrowser: {\n      subscribe: (): unknown => {\n        return {\n          unsubscribe: (): void => {},\n        };\n      },\n    },\n  };\n});\n\nvi.mock('/@/stores/tasks', async () => {\n  return {\n    tasks: vi.fn(),\n  };\n});\n\nvi.mock('/@/stores/catalog', async () => {\n  return {\n    catalog: vi.fn(),\n  };\n});\n\nvi.mock('../stores/localRepositories', () => ({\n  localRepositories: {\n    subscribe: (f: (msg: unknown) => void) => {\n      f(mocks.getLocalRepositoriesMock());\n      return (): void => {};\n    },\n  },\n}));\n\nconst initialCatalog: ApplicationCatalog = {\n  categories: [],\n  models: [],\n  recipes: [\n    {\n      id: 'recipe 1',\n      name: 'Recipe 1',\n      readme: 'readme 1',\n      categories: [],\n      recommended: ['model1', 'model2'],\n      description: 'description 1',\n      repository: 'repo 1',\n    },\n    {\n      id: 'recipe 2',\n      name: 'Recipe 2',\n      readme: 'readme 2',\n      categories: [],\n      description: 'description 2',\n      repository: 'repo 2',\n    },\n  ],\n};\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  mocks.getLocalRepositoriesMock.mockReturnValue([]);\n  const tasksList = writable<Task[]>([]);\n  vi.mocked(tasksStore).tasks = tasksList;\n  mocks.openFileMock.mockReturnValue(Promise.resolve());\n  mocks.requestDeleteLocalRepositoryMock.mockReturnValue(Promise.resolve());\n});\n\ntest('button vs code should be visible if local repository is not empty', async () => {\n  mocks.getLocalRepositoriesMock.mockReturnValue([\n    {\n      path: 'random-path',\n      labels: {\n        'recipe-id': 'recipe 1',\n      },\n    },\n  ]);\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  render(RecipeDetails, {\n    recipeId: 'recipe 1',\n  });\n\n  const button = screen.getByTitle('Open in VS Code Desktop');\n  expect(button).toBeDefined();\n});\n\ntest('local clone and delete local clone buttons should be visible if local repository is not empty', async () => {\n  mocks.getLocalRepositoriesMock.mockReturnValue([\n    {\n      path: 'random-path',\n      labels: {\n        'recipe-id': 'recipe 1',\n      },\n    },\n  ]);\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  render(RecipeDetails, {\n    recipeId: 'recipe 1',\n  });\n\n  const buttonLocalClone = screen.getByRole('button', { name: 'Local clone' });\n  expect(buttonLocalClone).toBeDefined();\n  expect(buttonLocalClone).toBeInTheDocument();\n  await userEvent.click(buttonLocalClone);\n\n  expect(mocks.openFileMock).toBeCalled();\n\n  const buttonDeleteClone = screen.getByTitle('Delete local clone');\n  expect(buttonDeleteClone).toBeDefined();\n  expect(buttonDeleteClone).toBeInTheDocument();\n  await userEvent.click(buttonDeleteClone);\n\n  expect(mocks.requestDeleteLocalRepositoryMock).toBeCalled();\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/RecipeDetails.svelte",
    "content": "<script lang=\"ts\">\nimport { faFolderOpen, faTrash } from '@fortawesome/free-solid-svg-icons';\nimport { faGithub } from '@fortawesome/free-brands-svg-icons';\nimport { getDisplayName } from '/@/utils/versionControlUtils';\nimport Fa from 'svelte-fa';\nimport { studioClient } from '/@/utils/client';\nimport { catalog } from '/@/stores/catalog';\nimport VSCodeIcon from '/@/lib/images/VSCodeIcon.svelte';\nimport { localRepositories } from '../stores/localRepositories';\nimport { findLocalRepositoryByRecipeId } from '/@/utils/localRepositoriesUtils';\nimport { Button } from '@podman-desktop/ui-svelte';\n\nexport let recipeId: string;\n\n$: recipe = $catalog.recipes.find(r => r.id === recipeId);\n\n$: localPath = findLocalRepositoryByRecipeId($localRepositories, recipeId);\n\nconst onClickRepository = (): void => {\n  if (!recipe) return;\n\n  studioClient.openURL(recipe.repository).catch((err: unknown) => {\n    console.error('Something went wrong while opening url', err);\n  });\n};\n\nconst openVSCode = (): void => {\n  if (localPath) {\n    studioClient.openVSCode(localPath.sourcePath, recipe?.id).catch(err => console.error('Error opening VSCode:', err));\n  }\n};\n\nconst openLocalClone = (): void => {\n  if (localPath) {\n    studioClient.openFile(localPath.path).catch(err => console.error('Error opening local clone:', err));\n  }\n};\n\nconst deleteLocalClone = (): void => {\n  if (localPath) {\n    studioClient\n      .requestDeleteLocalRepository(localPath.path)\n      .catch(err => console.error(`Error deleting local repository ${localPath.path}:`, err));\n  }\n};\n</script>\n\n<div class=\"flex flex-col w-full space-y-4 rounded-md bg-[var(--pd-content-bg)] p-4\">\n  <div class=\"flex flex-col w-full space-y-2 w-[45px]\">\n    <div class=\"text-lg\">Repository</div>\n    <div class=\"cursor-pointer flex flex-col w-full space-y-2 text-nowrap\">\n      <button on:click={onClickRepository}>\n        <div class=\"flex flex-row p-0 m-0 bg-transparent items-center space-x-2\">\n          <Fa size=\"lg\" icon={faGithub} />\n          <span>{getDisplayName(recipe?.repository)}</span>\n        </div>\n      </button>\n      {#if localPath}\n        <div class=\"flex flex-row w-full justify-between\">\n          <button on:click={openLocalClone} aria-label=\"Local clone\">\n            <div class=\"flex flex-row p-0 m-0 bg-transparent items-center space-x-2\">\n              <Fa size=\"lg\" icon={faFolderOpen} />\n              <span>Local clone</span>\n            </div>\n          </button>\n          <Button title=\"Delete local clone\" class=\"text-sm\" on:click={deleteLocalClone} icon={faTrash} />\n        </div>\n      {/if}\n    </div>\n  </div>\n  {#if localPath}\n    <Button type=\"secondary\" on:click={openVSCode} title=\"Open in VS Code Desktop\" icon={VSCodeIcon}\n      >Open in VSCode</Button>\n  {/if}\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/RecipeStatus.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { fireEvent, render, screen } from '@testing-library/svelte';\nimport { expect, test, vi } from 'vitest';\nimport RecipeStatus from '/@/lib/RecipeStatus.svelte';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport { studioClient } from '/@/utils/client';\n\nvi.mock('../utils/client', async () => ({\n  studioClient: {\n    cloneApplication: vi.fn(),\n  },\n}));\n\ntest('download icon should be visible when localPath is undefined', async () => {\n  render(RecipeStatus, {\n    recipe: {} as unknown as Recipe,\n    localRepository: undefined,\n  });\n\n  const icon = screen.getByLabelText('download icon');\n  expect(icon).toBeDefined();\n});\n\ntest('chevron down icon should be visible when localPath is defined', async () => {\n  render(RecipeStatus, {\n    recipe: {} as unknown as Recipe,\n    localRepository: {\n      labels: {},\n      path: 'random-path',\n      sourcePath: 'random-source-path',\n    },\n  });\n\n  const icon = screen.getByLabelText('chevron down icon');\n  expect(icon).toBeDefined();\n});\n\ntest('click on download icon should call cloneApplication', async () => {\n  vi.mocked(studioClient.cloneApplication).mockResolvedValue(undefined);\n\n  render(RecipeStatus, {\n    recipe: {\n      id: 'dummy-recipe-id',\n    } as unknown as Recipe,\n    localRepository: undefined,\n  });\n\n  const button = screen.getByRole('button');\n  await fireEvent.click(button);\n\n  await vi.waitFor(() => {\n    expect(studioClient.cloneApplication).toHaveBeenCalledWith('dummy-recipe-id');\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/RecipeStatus.svelte",
    "content": "<script lang=\"ts\">\nimport type { Recipe } from '@shared/models/IRecipe';\nimport Fa from 'svelte-fa';\nimport type { LocalRepository } from '@shared/models/ILocalRepository';\nimport { faCircleCheck, faDownload } from '@fortawesome/free-solid-svg-icons';\nimport { Spinner, Tooltip } from '@podman-desktop/ui-svelte';\nimport { studioClient } from '/@/utils/client';\n\nexport let recipe: Recipe;\nexport let localRepository: LocalRepository | undefined;\n\nlet loading: boolean = false;\n\nfunction onClick(): void {\n  if (loading || localRepository) return;\n  loading = true;\n\n  studioClient\n    .cloneApplication(recipe.id)\n    .catch((err: unknown) => {\n      console.error(err);\n    })\n    .finally(() => {\n      loading = false;\n    });\n}\n</script>\n\n{#key loading}\n  <Tooltip>\n    {#if localRepository}\n      <div aria-label=\"chevron down icon\" class=\"text-dustypurple-700 p-2 w-10 text-center\">\n        <Fa size=\"lg\" icon={faCircleCheck} />\n      </div>\n    {:else}\n      <button\n        on:click={onClick}\n        disabled={loading}\n        class=\"border-2 justify-center relative rounded-xs border-[var(--pd-button-secondary)] text-[var(--pd-button-secondary)] hover:bg-[var(--pd-button-secondary-hover)] hover:border-[var(--pd-button-secondary-hover)] hover:text-[var(--pd-button-text)] w-10 p-2 text-center cursor-pointer flex flex-row\">\n        {#if loading}\n          <Spinner class=\"text-[var(--pd-table-body-text-highlight)]\" size=\"1em\" />\n        {:else}\n          <div aria-label=\"download icon\">\n            <Fa size=\"sm\" icon={faDownload} />\n          </div>\n        {/if}\n      </button>\n    {/if}\n    <svelte:fragment slot=\"tip\">\n      <span class=\"inline-block py-2 px-4 rounded-md\"\n        >{loading ? 'Cloning...' : localRepository ? 'Recipe cloned' : 'Clone recipe'}</span>\n    </svelte:fragment>\n  </Tooltip>\n{/key}\n"
  },
  {
    "path": "packages/frontend/src/lib/RecipesCard.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport { beforeAll, expect, test, vi } from 'vitest';\nimport RecipesCard from '/@/lib/RecipesCard.svelte';\n\nvi.mock('../utils/client', async () => ({\n  studioClient: {},\n}));\n\nvi.mock('../stores/localRepositories', () => ({\n  localRepositories: {\n    subscribe: (f: (msg: unknown) => void) => {\n      f([]);\n      return (): void => {};\n    },\n  },\n}));\n\nclass ResizeObserver {\n  observe = vi.fn();\n  disconnect = vi.fn();\n  unobserve = vi.fn();\n}\n\nbeforeAll(() => {\n  Object.defineProperty(window, 'ResizeObserver', { value: ResizeObserver });\n});\n\nvi.mock('/@/lib/RecipeCardTags', () => ({\n  isDarkMode: vi.fn().mockReturnValue(false),\n}));\n\ntest('recipes card without recipes should display empty message', async () => {\n  render(RecipesCard, {\n    recipes: [],\n    category: {\n      id: 'dummy-category',\n      name: 'Dummy category',\n    },\n  });\n\n  const message = screen.getByText('There is no recipe in this category for now ! Come back later');\n  expect(message).toBeDefined();\n});\n\ntest('recipes card with recipes should display them', async () => {\n  render(RecipesCard, {\n    recipes: [\n      {\n        id: 'recipe1',\n        name: 'Recipe 1',\n        models: ['model1'],\n        categories: [],\n        description: 'Recipe 1',\n        readme: '',\n        repository: 'https://recipe-1',\n      },\n    ],\n    category: {\n      id: 'dummy-category',\n      name: 'Dummy category',\n    },\n  });\n\n  const text = screen.getAllByText('Recipe 1');\n  expect(text.length).toBeGreaterThan(0);\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/RecipesCard.svelte",
    "content": "<script lang=\"ts\">\nimport Card from '/@/lib/Card.svelte';\nimport type { Category } from '@shared/models/ICategory';\nimport RecipeCard from '/@/lib/RecipeCard.svelte';\nimport type { Recipe } from '@shared/models/IRecipe';\n\nexport let category: Category;\nexport let recipes: Recipe[];\n</script>\n\n<Card title={category.name} classes=\"{$$props.class} font-medium mt-4\">\n  <div slot=\"content\" class=\"w-full\">\n    {#if recipes.length === 0}\n      <div class=\"text-gray-400 mt-2\">There is no recipe in this category for now ! Come back later</div>\n    {/if}\n    <div class=\"grid grid-cols-1 sm:grid-cols-2 md:grid-cols-3 gap-4 mt-4\">\n      {#each recipes as recipe (recipe.id)}\n        <RecipeCard recipe={recipe} />\n      {/each}\n    </div>\n  </div>\n</Card>\n"
  },
  {
    "path": "packages/frontend/src/lib/button/CopyButton.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { expect, test, vi, beforeEach, describe } from 'vitest';\n\nimport { render, within, fireEvent, waitFor } from '@testing-library/svelte';\nimport CopyButton from '/@/lib/button/CopyButton.svelte';\nimport { studioClient } from '/@/utils/client';\n\nvi.mock('../../utils/client', async () => ({\n  studioClient: {\n    copyToClipboard: vi.fn(),\n  },\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(studioClient.copyToClipboard).mockResolvedValue(undefined);\n});\n\ntest('clicking on the content should call copyToClipboard', async () => {\n  const { container } = render(CopyButton, {\n    content: 'dummy-content',\n  });\n\n  const cpyButton = within(container).getByRole('button');\n  expect(cpyButton).toBeDefined();\n\n  await fireEvent.click(cpyButton);\n\n  await waitFor(() => {\n    expect(studioClient.copyToClipboard).toHaveBeenCalledWith('dummy-content');\n  });\n});\n\ndescribe('tooltips properties should be propagated', () => {\n  test('top property', async () => {\n    const { container } = render(CopyButton, {\n      content: 'dummy-content',\n      top: true,\n    });\n\n    const toolTip = container.querySelector('.tooltip.top');\n    expect(toolTip).toBeDefined();\n  });\n\n  test('topLeft property', async () => {\n    const { container } = render(CopyButton, {\n      content: 'dummy-content',\n      topLeft: true,\n    });\n\n    const toolTip = container.querySelector('.tooltip.top-left');\n    expect(toolTip).toBeDefined();\n  });\n\n  test('topRight property', async () => {\n    const { container } = render(CopyButton, {\n      content: 'dummy-content',\n      topRight: true,\n    });\n\n    const toolTip = container.querySelector('.tooltip.top-right');\n    expect(toolTip).toBeDefined();\n  });\n\n  test('right property', async () => {\n    const { container } = render(CopyButton, {\n      content: 'dummy-content',\n      right: true,\n    });\n\n    const toolTip = container.querySelector('.tooltip.right');\n    expect(toolTip).toBeDefined();\n  });\n\n  test('bottom property', async () => {\n    const { container } = render(CopyButton, {\n      content: 'dummy-content',\n      bottom: true,\n    });\n\n    const toolTip = container.querySelector('.tooltip.bottom');\n    expect(toolTip).toBeDefined();\n  });\n\n  test('bottomLeft property', async () => {\n    const { container } = render(CopyButton, {\n      content: 'dummy-content',\n      bottomLeft: true,\n    });\n\n    const toolTip = container.querySelector('.tooltip.bottom-left');\n    expect(toolTip).toBeDefined();\n  });\n\n  test('bottomRight property', async () => {\n    const { container } = render(CopyButton, {\n      content: 'dummy-content',\n      bottomLeft: true,\n    });\n\n    const toolTip = container.querySelector('.tooltip.bottom-right');\n    expect(toolTip).toBeDefined();\n  });\n\n  test('left property', async () => {\n    const { container } = render(CopyButton, {\n      content: 'dummy-content',\n      left: true,\n    });\n\n    const toolTip = container.querySelector('.tooltip.left');\n    expect(toolTip).toBeDefined();\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/button/CopyButton.svelte",
    "content": "<script lang=\"ts\">\nimport { Tooltip } from '@podman-desktop/ui-svelte';\nimport { studioClient } from '/@/utils/client';\nimport type { ComponentProps } from 'svelte';\n\ninterface Props extends Omit<ComponentProps<Tooltip>, 'tip'> {\n  class?: string;\n  content: string;\n}\n\nlet { content, class: classes, children, ...restProps }: Props = $props();\nlet status: 'idle' | 'copied' = $state('idle');\n\nfunction copy(content: string): void {\n  if (status !== 'idle') return;\n  studioClient\n    .copyToClipboard(content)\n    .then(() => {\n      status = 'copied';\n    })\n    .catch((err: unknown) => {\n      console.error(err);\n    });\n}\n\nfunction reset(): void {\n  status = 'idle';\n}\n\nfunction handleClick(): void {\n  copy(content);\n}\n</script>\n\n<Tooltip {...restProps} tip={status === 'idle' ? 'Copy' : 'Copied'}>\n  <button onmouseleave={reset} onclick={handleClick} class={`${classes} cursor-copy`}>\n    {#if children}\n      {@render children()}\n    {/if}\n  </button>\n</Tooltip>\n"
  },
  {
    "path": "packages/frontend/src/lib/button/ListItemButtonIcon.svelte",
    "content": "<script lang=\"ts\">\nimport type { IconDefinition } from '@fortawesome/free-solid-svg-icons';\nimport Fa from 'svelte-fa';\nimport { DropdownMenu } from '@podman-desktop/ui-svelte';\n\nexport let title: string;\nexport let icon: IconDefinition;\nexport let hidden = false;\nexport let enabled: boolean = true;\nexport let onClick: () => void = () => {};\nexport let menu = false;\nexport let detailed = false;\nexport let inProgress = false;\nexport let iconOffset = '';\nexport let tooltip: string = '';\n\nlet positionLeftClass = 'left-1';\nif (detailed) positionLeftClass = 'left-2';\nlet positionTopClass = 'top-1';\nif (detailed) positionTopClass = '[0.2rem]';\n\nconst buttonDetailedClass =\n  'text-[var(--pd-action-button-details-text)] bg-[var(--pd-action-button-details-bg)] hover:text-[var(--pd-action-button-details-hover-text)] font-medium rounded-lg text-sm inline-flex items-center px-3 py-2 text-center';\nconst buttonDetailedDisabledClass =\n  'text-[var(--pd-action-button-details-disabled-text)] bg-[var(--pd-action-button-details-disabled-bg)] font-medium rounded-lg text-sm inline-flex items-center px-3 py-2 text-center';\nconst buttonClass =\n  'm-0.5 text-[var(--pd-action-button-text)] hover:bg-[var(--pd-action-button-hover-bg)] hover:text-[var(--pd-action-button-hover-text)] font-medium rounded-full inline-flex items-center px-2 py-2 text-center';\nconst buttonDisabledClass =\n  'm-0.5 text-[var(--pd-action-button-disabled-text)] font-medium rounded-full inline-flex items-center px-2 py-2 text-center';\n\n$: handleClick =\n  enabled && !inProgress\n    ? onClick\n    : (): void => {\n        console.log('==> 1');\n      };\n$: styleClass = detailed\n  ? enabled && !inProgress\n    ? buttonDetailedClass\n    : buttonDetailedDisabledClass\n  : enabled && !inProgress\n    ? buttonClass\n    : buttonDisabledClass;\n</script>\n\n{#if menu}\n  <!-- enabled menu -->\n  <DropdownMenu.Item\n    title={title}\n    tooltip={tooltip}\n    icon={icon}\n    enabled={enabled}\n    hidden={hidden}\n    onClick={handleClick} />\n{:else}\n  <button\n    title={title}\n    aria-label={title}\n    on:click={handleClick}\n    class=\"{styleClass} relative\"\n    class:disabled={inProgress}\n    class:hidden={hidden}>\n    <Fa class=\"h-4 w-4 {iconOffset}\" icon={icon} />\n    <div\n      aria-label=\"spinner\"\n      class=\"w-6 h-6 rounded-full animate-spin border border-solid border-[var(--pd-action-button-spinner)] border-t-transparent absolute {positionTopClass} {positionLeftClass}\"\n      class:hidden={!inProgress}>\n    </div>\n  </button>\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/conversation/ChatMessage.svelte",
    "content": "<script lang=\"ts\">\nimport {\n  type ChatMessage,\n  isAssistantChat,\n  isAssistantToolCall,\n  isSystemPrompt,\n  isUserChat,\n} from '@shared/models/IPlaygroundMessage';\nimport ElapsedTime from '/@/lib/conversation/ElapsedTime.svelte';\n\nexport let message: ChatMessage;\n\nconst roles = {\n  system: 'System prompt',\n  user: 'User',\n  assistant: 'Assistant',\n};\n\nfunction getMessageParagraphs(message: ChatMessage): string[] {\n  if (!isAssistantToolCall(message)) {\n    return (message.content as string)?.split('\\n') ?? [];\n  }\n  return [];\n}\n</script>\n\n<div>\n  <div class=\"text-[var(--pd-content-header)]\" class:text-right={isAssistantChat(message)}>\n    {roles[message.role]}\n  </div>\n  <div\n    aria-label={`${roles[message.role]} message`}\n    class=\"p-4 rounded-md text-[var(--pd-content-card-text)]\"\n    class:bg-[var(--pd-content-card-bg)]={isUserChat(message)}\n    class:bg-[var(--pd-content-bg)]={isSystemPrompt(message)}\n    class:bg-[var(--pd-content-card-inset-bg)]={isAssistantChat(message)}\n    class:ml-8={isAssistantChat(message)}\n    class:mr-8={isUserChat(message)}>\n    {#each getMessageParagraphs(message) as paragraph, index (index)}\n      <p>{paragraph}</p>\n    {/each}\n  </div>\n  <ElapsedTime message={message} />\n  <div></div>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/conversation/ConversationActions.svelte",
    "content": "<script lang=\"ts\">\nimport { studioClient } from '/@/utils/client';\nimport { faTrash } from '@fortawesome/free-solid-svg-icons';\nimport ListItemButtonIcon from '/@/lib/button/ListItemButtonIcon.svelte';\nimport type { Conversation } from '@shared/models/IPlaygroundMessage';\n\nexport let conversation: Conversation;\nexport let detailed: boolean = false;\n\nfunction deleteConversation(): void {\n  studioClient.requestDeleteConversation(conversation.id).catch((err: unknown) => {\n    console.error('Something went wrong while trying to delete conversation', err);\n  });\n}\n</script>\n\n<ListItemButtonIcon icon={faTrash} detailed={detailed} onClick={deleteConversation} title=\"Delete conversation\" />\n"
  },
  {
    "path": "packages/frontend/src/lib/conversation/ElapsedTime.svelte",
    "content": "<script lang=\"ts\">\nimport {\n  type AssistantChat,\n  type ChatMessage,\n  isAssistantChat,\n  isPendingChat,\n} from '@shared/models/IPlaygroundMessage';\n\nexport let message: ChatMessage;\n\nfunction elapsedTime(msg: AssistantChat): string {\n  if (isPendingChat(msg)) {\n    return ((Date.now() - msg.timestamp) / 1000).toFixed(1);\n  } else if (msg.completed) {\n    return ((msg.completed - msg.timestamp) / 1000).toFixed(1);\n  } else {\n    // should not happen\n    return '';\n  }\n}\n</script>\n\n{#if isAssistantChat(message)}\n  <div class=\"text-[var(--pd-content-header)] text-right select-none\" aria-label=\"elapsed\">\n    {elapsedTime(message)} s\n  </div>\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/conversation/SystemPromptBanner.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { expect, test, vi, beforeEach } from 'vitest';\n\nimport { render, screen, fireEvent } from '@testing-library/svelte';\nimport SystemPromptBanner from '/@/lib/conversation/SystemPromptBanner.svelte';\nimport { studioClient } from '/@/utils/client';\nimport type { SystemPrompt } from '@shared/models/IPlaygroundMessage';\n\nvi.mock('../../utils/client', async () => {\n  return {\n    studioClient: {\n      setPlaygroundSystemPrompt: vi.fn(),\n    },\n  };\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(studioClient.setPlaygroundSystemPrompt).mockResolvedValue(undefined);\n});\n\ntest('render empty conversation, hidden textarea', () => {\n  render(SystemPromptBanner, {\n    conversation: {\n      id: 'dummyId',\n      modelId: 'dummyModelId',\n      name: 'dummyName',\n      messages: [],\n    },\n  });\n\n  const textbox = screen.getByRole('textbox');\n  expect(textbox).toHaveClass('hidden');\n\n  const alert = screen.getByRole('alert');\n  expect(alert).toHaveClass('hidden');\n});\n\ntest('empty conversation click on edit should reveal textarea', async () => {\n  render(SystemPromptBanner, {\n    conversation: {\n      id: 'dummyId',\n      modelId: 'dummyModelId',\n      name: 'dummyName',\n      messages: [],\n    },\n  });\n  const editBtn = screen.getByTitle('Edit system prompt');\n  await fireEvent.click(editBtn);\n\n  const textbox = screen.getByRole('textbox');\n  await vi.waitFor(() => {\n    expect(textbox).not.toHaveClass('hidden');\n  });\n});\n\ntest('input in textarea should be sent to backend when valid click', async () => {\n  render(SystemPromptBanner, {\n    conversation: {\n      id: 'dummyId',\n      modelId: 'dummyModelId',\n      name: 'dummyName',\n      messages: [],\n    },\n  });\n  const editBtn = screen.getByTitle('Edit system prompt');\n  await fireEvent.click(editBtn);\n\n  const textbox = screen.getByRole('textbox');\n  await vi.waitFor(() => {\n    expect(textbox).not.toHaveClass('hidden');\n  });\n\n  await fireEvent.input(textbox, { target: { value: 'dummyInputValue' } });\n  await fireEvent.click(editBtn);\n\n  await vi.waitFor(() => {\n    expect(studioClient.setPlaygroundSystemPrompt).toHaveBeenCalledWith('dummyId', 'dummyInputValue');\n  });\n});\n\ntest('error message should be visible if submit empty', async () => {\n  render(SystemPromptBanner, {\n    conversation: {\n      id: 'dummyId',\n      modelId: 'dummyModelId',\n      name: 'dummyName',\n      messages: [],\n    },\n  });\n  const editBtn = screen.getByTitle('Edit system prompt');\n  await fireEvent.click(editBtn);\n\n  const textbox = screen.getByRole('textbox');\n  await vi.waitFor(() => {\n    expect(textbox).not.toHaveClass('hidden');\n  });\n\n  await fireEvent.click(editBtn);\n\n  await vi.waitFor(() => {\n    const alert = screen.getByRole('alert');\n    expect(alert).not.toHaveClass('hidden');\n    expect(alert.textContent).toBe('System prompt is too short.');\n  });\n});\n\ntest('clear button should reset editing state', async () => {\n  render(SystemPromptBanner, {\n    conversation: {\n      id: 'dummyId',\n      modelId: 'dummyModelId',\n      name: 'dummyName',\n      messages: [],\n    },\n  });\n  const editBtn = screen.getByTitle('Edit system prompt');\n  await fireEvent.click(editBtn);\n\n  const textbox = screen.getByRole('textbox');\n  await vi.waitFor(() => {\n    expect(textbox).not.toHaveClass('hidden');\n  });\n\n  const clearBtn = screen.getByTitle('Clear');\n  await fireEvent.click(clearBtn);\n\n  await vi.waitFor(() => {\n    expect(textbox).toHaveClass('hidden');\n  });\n  expect(studioClient.setPlaygroundSystemPrompt).not.toHaveBeenCalled();\n});\n\ntest('clear button should set system prompt undefined if already exist', async () => {\n  render(SystemPromptBanner, {\n    conversation: {\n      id: 'dummyId',\n      modelId: 'dummyModelId',\n      name: 'dummyName',\n      messages: [\n        {\n          id: 'random',\n          content: 'existing',\n          role: 'system',\n          timestamp: 0,\n        } as SystemPrompt,\n      ],\n    },\n  });\n  const editBtn = screen.getByTitle('Edit system prompt');\n  await fireEvent.click(editBtn);\n\n  const textbox = screen.getByRole('textbox');\n  await vi.waitFor(() => {\n    expect(textbox).not.toHaveClass('hidden');\n  });\n\n  const clearBtn = screen.getByTitle('Clear');\n  await fireEvent.click(clearBtn);\n\n  await vi.waitFor(() => {\n    expect(textbox).toHaveClass('hidden');\n  });\n  expect(studioClient.setPlaygroundSystemPrompt).toHaveBeenCalledWith('dummyId', undefined);\n});\n\ntest('error message should be cleared if input change', async () => {\n  render(SystemPromptBanner, {\n    conversation: {\n      id: 'dummyId',\n      modelId: 'dummyModelId',\n      name: 'dummyName',\n      messages: [],\n    },\n  });\n  const editBtn = screen.getByTitle('Edit system prompt');\n  await fireEvent.click(editBtn);\n\n  const textbox = screen.getByRole('textbox');\n  await vi.waitFor(() => {\n    expect(textbox).not.toHaveClass('hidden');\n  });\n\n  await fireEvent.click(editBtn);\n\n  const alert = screen.getByRole('alert');\n  await vi.waitFor(() => {\n    expect(alert).not.toHaveClass('hidden');\n  });\n\n  await fireEvent.input(textbox, { target: { value: 'dummyInputValue' } });\n  await vi.waitFor(() => {\n    expect(alert).toHaveClass('hidden');\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/conversation/SystemPromptBanner.svelte",
    "content": "<script lang=\"ts\">\nimport { faCheck, faClose, faEdit, faTerminal } from '@fortawesome/free-solid-svg-icons';\nimport Fa from 'svelte-fa';\nimport { studioClient } from '/@/utils/client';\nimport { type Conversation, isSystemPrompt, isUserChat } from '@shared/models/IPlaygroundMessage';\nimport { onMount } from 'svelte';\n\nexport let conversation: Conversation;\n\nlet systemPrompt: string | undefined = undefined;\nlet editing: boolean = false;\nlet error: string | undefined;\nlet disabled: boolean = conversation.messages.some(isUserChat);\n\nconst onButtonClick = (): void => {\n  if (editing) {\n    if (systemPrompt !== undefined && systemPrompt.length > 1) {\n      error = undefined;\n      studioClient.setPlaygroundSystemPrompt(conversation.id, systemPrompt).catch((err: unknown) => {\n        error = `Something went wrong while setting the system prompt: ${String(err)}`;\n      });\n    } else {\n      error = 'System prompt is too short.';\n      return; // keep the editing state\n    }\n  }\n  editing = !editing;\n};\n\nconst onClear = (): void => {\n  systemPrompt = undefined;\n  editing = false;\n  error = undefined;\n\n  // If pressed on clear - deleting the system prompt\n  if (conversation.messages.some(isSystemPrompt)) {\n    studioClient.setPlaygroundSystemPrompt(conversation.id, undefined).catch((err: unknown) => {\n      error = `Something went wrong while setting the system prompt: ${String(err)}`;\n    });\n  }\n};\n\nconst onChange = (): void => {\n  error = undefined;\n};\n\nonMount(() => {\n  systemPrompt = conversation.messages.find(isSystemPrompt)?.content;\n});\n</script>\n\n<div class=\"bg-[var(--pd-content-card-bg)] text-[var(--pd-content-card-text)] rounded-md w-full px-4 py-2\">\n  <div class=\"flex items-center gap-x-2\">\n    <Fa icon={faTerminal} />\n    <span class=\"grow\">Define a system prompt</span>\n    <button class:hidden={!editing} on:click={onClear} title=\"Clear\" aria-label=\"Clear system prompt\">\n      <Fa icon={faClose} />\n    </button>\n    <button\n      class:text-gray-800={disabled}\n      disabled={disabled}\n      on:click={onButtonClick}\n      title=\"Edit system prompt\"\n      aria-label=\"Edit system prompt\">\n      <Fa icon={editing ? faCheck : faEdit} />\n    </button>\n  </div>\n  <div>\n    <textarea\n      class:hidden={!editing}\n      on:input={onChange}\n      aria-label=\"system-prompt-textarea\"\n      bind:value={systemPrompt}\n      class=\"w-full p-2 mt-2 outline-hidden bg-[var(--pd-content-card-inset-bg)] rounded-xs text-[var(--pd-content-card-text)] placeholder-[var(--pd-content-card-text)] resize-none\"\n      rows=\"3\"\n      placeholder=\"Provide system prompt to define general context, instructions or guidelines to be used with each query\"\n    ></textarea>\n    <span role=\"alert\" class=\"text-[var(--pd-input-field-error-text)] pt-1\" class:hidden={!error}>{error}</span>\n  </div>\n  <span class=\"mt-2 text-[var(--pd-content-card-text)]\" class:hidden={editing || !systemPrompt}>{systemPrompt}</span>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/conversation/ToolCallMessage.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { expect, test, beforeEach, describe, vi } from 'vitest';\nimport '@testing-library/jest-dom/vitest';\nimport { fireEvent, render, within } from '@testing-library/svelte';\nimport ToolCallMessage from '/@/lib/conversation/ToolCallMessage.svelte';\n\nlet container: HTMLElement;\ndescribe('with result', () => {\n  beforeEach(() => {\n    vi.resetAllMocks();\n    container = render(ToolCallMessage, {\n      message: {\n        id: '1337',\n        timestamp: Date.now() - 500,\n        completed: Date.now(),\n        role: 'assistant',\n        content: {\n          type: 'tool-call',\n          toolCallId: '1-1',\n          toolName: 'weather',\n          args: { location: 'Don Benito' },\n          result: {\n            content: [{ type: 'text', text: 'The weather in Don Benito is sunny with a temperature of 25°C.' }],\n          },\n        },\n      },\n    }).container;\n  });\n  test('Has role', () => {\n    const role = within(container).getByTestId('tool-call-role');\n    expect(role).toHaveTextContent('Assistant');\n  });\n  test('Has summary', () => {\n    const summary = within(container).getByTestId('tool-call-summary');\n    expect(summary).toHaveTextContent(/Used tool: weather/);\n  });\n  test('Details are hidden by default', () => {\n    const result = within(container).getByTestId('tool-call-details');\n    expect(result).toHaveClass('h-0');\n  });\n  test('Has arguments', () => {\n    const args = within(container).getByTestId('tool-call-arguments');\n    expect(args?.textContent).toBe('{\\n' + '  \"location\": \"Don Benito\"\\n' + '}');\n  });\n  test('Has result', () => {\n    const result = within(container).getByTestId('tool-call-result');\n    expect(result?.textContent).toBe(\n      '{\\n' +\n        '  \"content\": [\\n' +\n        '    {\\n' +\n        '      \"type\": \"text\",\\n' +\n        '      \"text\": \"The weather in Don Benito is sunny with a temperature of 25°C.\"\\n' +\n        '    }\\n' +\n        '  ]\\n' +\n        '}',\n    );\n  });\n  test('Has toggle button', () => {\n    const toggleButton = within(container).getByTestId('tool-call-show-details');\n    expect(toggleButton).toBeDefined();\n  });\n  test('Clicking toggle button shows details', async () => {\n    const toggleButton = within(container).getByTestId('tool-call-show-details');\n    await fireEvent.click(toggleButton);\n    const result = within(container).getByTestId('tool-call-details');\n    expect(result).not.toHaveClass('h-0');\n    expect(result).toHaveClass('h-fit');\n  });\n  test('Has elapsed time', () => {\n    const elapsedTime = within(container).getByText('0.5 s');\n    expect(elapsedTime.ariaLabel).toBe('elapsed');\n    expect(elapsedTime).toBeDefined();\n  });\n});\ndescribe('without result', () => {\n  beforeEach(() => {\n    container = render(ToolCallMessage, {\n      message: {\n        id: '1337',\n        timestamp: Date.now() - 500,\n        role: 'assistant',\n        content: {\n          type: 'tool-call',\n          toolCallId: '1-1',\n          toolName: 'weather',\n          args: { location: 'Don Benito' },\n        },\n      },\n    }).container;\n  });\n  test('Has role', () => {\n    const role = within(container).getByTestId('tool-call-role');\n    expect(role).toHaveTextContent('Assistant');\n  });\n  test('Has summary', () => {\n    const summary = within(container).getByTestId('tool-call-summary');\n    expect(summary).toHaveTextContent(/Used tool: weather/);\n  });\n  test('Details are hidden by default', () => {\n    const result = within(container).getByTestId('tool-call-details');\n    expect(result).toHaveClass('h-0');\n  });\n  test('Has arguments', () => {\n    const args = within(container).getByTestId('tool-call-arguments');\n    expect(args?.textContent).toBe('{\\n' + '  \"location\": \"Don Benito\"\\n' + '}');\n  });\n  test('Does not have result', () => {\n    const result = within(container).queryByTestId('tool-call-result');\n    expect(result).toBeNull();\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/conversation/ToolCallMessage.svelte",
    "content": "<script lang=\"ts\">\nimport { type AssistantChat, type ToolCall } from '@shared/models/IPlaygroundMessage';\nimport ElapsedTime from '/@/lib/conversation/ElapsedTime.svelte';\n\ninterface Props {\n  message: AssistantChat;\n}\nlet { message }: Props = $props();\nlet toolCall = $derived(message.content as ToolCall);\nlet result: string | object | undefined = $derived(toolCall?.result);\n\nlet open: boolean = $state(false);\nconst toggle = (): void => {\n  open = !open;\n};\n</script>\n\n<div>\n  <div data-testid=\"tool-call-role\" class=\"text-[var(--pd-content-header)] text-right\">Assistant</div>\n  <div\n    data-testid=\"tool-call-container\"\n    class=\"p-4 rounded-md text-[var(--pd-content-card-text)] bg-[var(--pd-content-card-inset-bg)] ml-8 flex flex-col\">\n    <div data-testid=\"tool-call-summary\" class=\"flex gap-1.5 justify-items-center\">\n      <i class=\"fa-solid fa-wrench before:h-full before:flex before:items-center\"></i>\n      <span>Used tool:</span>\n      <strong>{toolCall.toolName}</strong>\n      <div class=\"flex-grow\"></div>\n      <button\n        data-testid=\"tool-call-show-details\"\n        onclick={toggle}\n        aria-label=\"Show tool details\"\n        title=\"Show tool details\">\n        <i\n          class=\"fas text-[var(--pd-content-card-icon)] fa-angle-up transition-all before:h-full before:flex before:items-center\"\n          class:rotate-180={open}></i>\n      </button>\n    </div>\n    <div\n      data-testid=\"tool-call-details\"\n      class=\"overflow-hidden transition-[height]\"\n      class:mt-2={open}\n      class:h-fit={open}\n      class:h-0={!open}\n      style=\"interpolate-size: allow-keywords\">\n      <div>Arguments</div>\n      <pre data-testid=\"tool-call-arguments\" class=\"text-xs p-1\">{JSON.stringify(\n          toolCall.args,\n          undefined,\n          2,\n        ).trim()}</pre>\n      {#if toolCall?.result}\n        <div>Result</div>\n        <pre data-testid=\"tool-call-result\" class=\"text-xs p-1\">{typeof result === 'string'\n            ? result.trim()\n            : JSON.stringify(toolCall.result, undefined, 2).trim()}</pre>\n      {/if}\n    </div>\n  </div>\n  <ElapsedTime message={message} />\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/icons/InstructLabIcon.svelte",
    "content": "<script lang=\"ts\">\ninterface Props {\n  size?: string;\n}\n\nlet { size = '40' }: Props = $props();\n</script>\n\n<div role=\"img\">\n  <svg\n    width={size}\n    height={size}\n    viewBox=\"0 0 110.32513 76.001793\"\n    version=\"1.1\"\n    id=\"svg1\"\n    xml:space=\"preserve\"\n    xmlns=\"http://www.w3.org/2000/svg\"\n    ><defs id=\"defs1\"></defs><g id=\"layer1\" transform=\"translate(-36.723946,-99.124995)\"\n      ><g id=\"g1\"\n        ><path\n          class=\"cls-2\"\n          d=\"m 93.726338,173.93094 c -8.913455,0 -17.555707,-4.00471 -22.536754,-10.44122 -12.194982,-15.75675 -10.314657,-45.29951 -6.427452,-48.85223 v 0 c 4.303048,-3.93241 15.512665,-13.08089 26.659004,-13.08995 h 0.0089 c 11.851464,0 23.259954,9.56433 26.469164,12.50235 3.98665,3.64312 8.16313,33.56558 -2.17865,49.37655 -4.30304,6.58114 -12.52946,10.5045 -21.994349,10.5045 z m -2.305202,-70.48498 v 0 c -10.477377,0 -21.234995,8.80497 -25.375323,12.59272 v 0 c -1.084802,0.9944 -2.748163,7.93713 -2.196722,18.23372 0.3616,6.67152 1.988802,19.19194 8.841134,28.05117 4.637526,5.98448 12.701221,9.70895 21.036113,9.70895 8.823052,0 16.452832,-3.60694 20.403312,-9.64568 10.5045,-16.05506 4.99009,-44.64862 2.486,-46.93576 C 104.41164,104.3138 94.250658,103.44596 91.421136,103.44596 Z\"\n          id=\"path24\"\n          style=\"fill:currentColor;stroke:currentColor;stroke-width:2.39169;stroke-dasharray:none;stroke-opacity:1\"\n        ></path\n        ><path\n          id=\"ellipse28\"\n          style=\"fill:currentColor;stroke-width:0px\"\n          class=\"cls-1\"\n          d=\"m 79.940316,146.08772 a 3.5436857,5.0443277 0 0 1 -3.543686,5.04433 3.5436857,5.0443277 0 0 1 -3.543686,-5.04433 3.5436857,5.0443277 0 0 1 3.543686,-5.04432 3.5436857,5.0443277 0 0 1 3.543686,5.04432 z\"\n        ></path\n        ><path\n          class=\"cls-1\"\n          d=\"m 76.396632,151.57497 c -2.205765,0 -3.995688,-2.45886 -3.995688,-5.48728 0,-3.02839 1.789923,-5.48729 3.995688,-5.48729 2.205763,0 3.995685,2.4589 3.995685,5.48729 0,3.02842 -1.789922,5.48728 -3.995685,5.48728 z m 0,-10.0796 c -1.708564,0 -3.100725,2.06113 -3.100725,4.59232 0,2.53123 1.392161,4.59235 3.100725,4.59235 1.708561,0 3.100722,-2.06112 3.100722,-4.59235 0,-2.53119 -1.392161,-4.59232 -3.100722,-4.59232 z\"\n          id=\"path29\"\n          style=\"fill:currentColor;stroke:currentColor;stroke-width:0px\"></path\n        ><path\n          id=\"ellipse29\"\n          style=\"fill:currentColor;stroke-width:0px\"\n          class=\"cls-1\"\n          d=\"m 113.09005,146.08772 a 3.5436857,5.0443277 0 0 1 -3.54368,5.04433 3.5436857,5.0443277 0 0 1 -3.54369,-5.04433 3.5436857,5.0443277 0 0 1 3.54369,-5.04432 3.5436857,5.0443277 0 0 1 3.54368,5.04432 z\"\n        ></path\n        ><path\n          class=\"cls-1\"\n          d=\"m 109.54636,151.57497 c -2.20576,0 -3.99568,-2.45886 -3.99568,-5.48728 0,-3.02839 1.78992,-5.48729 3.99568,-5.48729 2.20577,0 3.99569,2.4589 3.99569,5.48729 0,3.02842 -1.78992,5.48728 -3.99569,5.48728 z m 0,-10.0796 c -1.70856,0 -3.10072,2.06113 -3.10072,4.59232 0,2.53123 1.39216,4.59235 3.10072,4.59235 1.70857,0 3.10073,-2.06112 3.10073,-4.59235 0,-2.53119 -1.39216,-4.59232 -3.10073,-4.59232 z\"\n          id=\"path30\"\n          style=\"fill:currentColor;stroke-width:0px\"></path\n        ><path\n          class=\"cls-2\"\n          d=\"m 119.30054,155.63394 h -14.80755 c -3.03744,0 -5.405924,-2.2871 -5.903126,-5.69519 l -1.65432,-11.47177 c -0.189832,-1.34696 0.21696,-2.68491 1.16616,-3.77873 1.148082,-1.32889 2.919926,-2.1244 4.736966,-2.1244 h 19.78859 c 1.97073,0 4.04089,1.00342 5.16185,2.48599 0.80456,1.07575 1.05768,2.32329 0.71417,3.51657 l -3.31769,11.47177 c -1.33792,4.6285 -3.93241,5.59576 -5.87601,5.59576 z M 102.82963,134.0193 c -1.37408,0 -2.76624,0.62375 -3.634084,1.61814 -0.47008,0.5424 -0.994403,1.43737 -0.822642,2.61258 l 1.654326,11.47177 c 0.31639,2.21481 1.82608,4.44769 4.45673,4.44769 h 14.80754 c 2.09728,0 3.60696,-1.52778 4.48385,-4.54715 l 3.31768,-11.47177 c 0.27118,-0.94919 -0.10848,-1.7447 -0.47912,-2.23288 -0.84072,-1.12095 -2.48601,-1.90744 -3.99569,-1.90744 h -19.78859 z\"\n          id=\"path31\"\n          style=\"fill:currentColor;stroke-width:0px\"></path\n        ><path\n          class=\"cls-2\"\n          d=\"M 80.690636,155.63394 H 65.883093 c -1.943603,0 -4.547127,-0.96726 -5.876007,-5.59576 L 56.6894,138.56641 c -0.343522,-1.19328 -0.09041,-2.44082 0.714162,-3.51657 1.12096,-1.49159 3.191124,-2.48599 5.161848,-2.48599 h 19.788591 c 1.817041,0 3.579846,0.79551 4.736965,2.1244 0.949204,1.09382 1.356002,2.43177 1.166164,3.77873 l -1.654324,11.47177 c -0.488161,3.40809 -2.865684,5.69519 -5.903129,5.69519 z M 62.56541,134.0193 c -1.509685,0 -3.154965,0.78645 -3.995688,1.90744 -0.37064,0.49719 -0.759361,1.28365 -0.47912,2.23288 l 3.317685,11.47177 c 0.867842,3.01937 2.377524,4.54711 4.483846,4.54711 h 14.807544 c 2.639686,0 4.140327,-2.23288 4.456727,-4.44769 l 1.654325,-11.47177 c 0.171757,-1.17521 -0.352563,-2.07014 -0.822642,-2.61254 -0.867842,-1.00345 -2.260006,-1.61818 -3.634086,-1.61818 H 62.56541 Z\"\n          id=\"path32\"\n          style=\"fill:currentColor;stroke-width:0px\"></path\n        ><path\n          class=\"cls-2\"\n          d=\"m 86.711286,144.6865 -0.37064,-1.03058 c 0.126554,-0.0451 3.145925,-1.11189 6.427452,-1.11189 3.281523,0 6.300887,1.06669 6.427448,1.11189 l -0.37064,1.03058 c 0,0 -2.974164,-1.04865 -6.056808,-1.04865 -3.082645,0 -6.029691,1.0396 -6.056812,1.04865 z\"\n          id=\"path33\"\n          style=\"fill:currentColor;stroke-width:0px\"></path\n        ><path\n          class=\"cls-2\"\n          d=\"m 109.79948,128.36022 c -2.72104,0 -3.9776,-0.93112 -4.5652,-1.79896 -0.35257,-0.52433 -0.57857,-1.28369 -0.32544,-1.70856 v 0 c 0.55144,-0.94017 1.23848,-1.55487 2.05208,-1.84416 0.84976,-0.3075 1.86224,-0.0994 2.712,0.56048 0.82265,0.5876 2.52217,2.51311 2.88377,3.38096 0.10848,0.26206 0.0904,0.56051 -0.0542,0.81361 -0.14463,0.25318 -0.38872,0.41585 -0.66896,0.452 -0.75936,0.0994 -1.43736,0.14453 -2.03401,0.14453 z m 1.41025,-0.84973 z m -4.91777,-2.10633 c 0.0452,0.15375 0.17175,0.44294 0.50624,0.72318 0.55144,0.45199 1.68144,0.9221 3.99568,0.73226 -0.5424,-0.72321 -1.46448,-1.74473 -1.97976,-2.11538 -0.37064,-0.28017 -0.904,-0.51527 -1.356,-0.35257 -0.42488,0.15376 -0.8136,0.48818 -1.15712,1.01251 z m -0.11754,0.18997 z\"\n          id=\"path34\"\n          style=\"fill:currentColor;stroke-width:0px\"></path\n        ><path\n          class=\"cls-2\"\n          d=\"m 76.161591,128.36022 c -0.605682,0 -1.283681,-0.0451 -2.034005,-0.14453 v 0 c -0.280238,-0.0362 -0.52432,-0.19885 -0.668959,-0.45199 -0.144629,-0.24396 -0.162737,-0.55146 -0.05426,-0.81362 0.361599,-0.86784 2.061123,-2.79336 2.901843,-3.39904 0.831682,-0.64182 1.844163,-0.84071 2.693926,-0.5424 0.804561,0.28017 1.500642,0.90399 2.043042,1.84417 0.253109,0.42486 0.02699,1.18422 -0.32544,1.70855 -0.57856,0.86784 -1.835122,1.79896 -4.565208,1.79896 z m -0.985362,-1.50062 c 2.314242,0.18997 3.444247,-0.28016 3.995685,-0.73226 0.334495,-0.28017 0.461042,-0.56048 0.506242,-0.72318 -0.343522,-0.52433 -0.73224,-0.86784 -1.157123,-1.01251 -0.451999,-0.16263 -0.985359,0.0724 -1.337921,0.3345 -0.53336,0.37966 -1.455442,1.40119 -1.997843,2.13345 z\"\n          id=\"path35\"\n          style=\"fill:currentColor;stroke-width:0px\"></path\n        ><path\n          class=\"cls-2\"\n          d=\"m 93.039298,168.2719 c 0,0 -0.0452,0 -0.07233,0 -5.749449,-0.0451 -8.759772,-5.73137 -8.886333,-5.97546 l -1.699524,-3.28154 2.748167,2.46795 c 0,0 1.916481,1.6995 4.131283,1.74471 v 0 c 2.006884,0.0273 3.064567,-2.94704 3.073608,-2.97417 l 0.632799,-1.78991 0.6328,1.78991 c 0,0 1.075765,2.97417 3.037445,2.97417 0.0089,0 0.02699,0 0.03615,0 2.23288,-0.0451 4.113207,-1.72663 4.131287,-1.74471 l 2.712,-2.45889 -1.66336,3.26346 c -0.12655,0.24395 -3.109767,5.97543 -8.823054,5.97543 z m -5.857929,-4.07704 c 1.301762,1.33791 3.263445,2.712 5.794648,2.73008 2.558325,0.0171 4.529046,-1.38312 5.839848,-2.74816 -0.64184,0.22585 -1.346961,0.38872 -2.106322,0.39774 -1.826082,0.0543 -3.064564,-1.41024 -3.733526,-2.54927 -0.668962,1.13903 -1.898403,2.58545 -3.733525,2.54927 -0.732243,-0.0171 -1.428323,-0.17186 -2.052083,-0.38872 z\"\n          id=\"path36\"\n          style=\"fill:currentColor;stroke:currentColor;stroke-width:0.1;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n        ></path\n        ><path\n          class=\"cls-2\"\n          d=\"m 116.44396,163.57456 c 0.83243,-0.99467 1.17647,-1.48142 2.00683,-2.57083 0.21696,0.0451 0.38872,0.0813 0.47912,0.10865 8.74857,2.67681 28.46915,-22.69354 28.11444,-29.9134 -0.18081,-3.6793 -3.40808,-7.39476 -6.81617,-11.32714 -1.22944,-1.4193 -2.49504,-2.87474 -3.59792,-4.32113 -1.37409,-1.79896 -2.54929,-3.67931 -3.57985,-5.34266 -1.9888,-3.20015 -3.56176,-5.73137 -5.87601,-6.68057 -0.89496,-0.37064 -1.83512,-0.56953 -2.78432,-0.59665 -4.84545,-0.14453 -8.5609,4.12224 -9.79938,5.74043 -5.12569,-3.95049 -14.01202,-9.546265 -23.169554,-9.546265 h -0.0089 c -8.217375,0 -16.362428,4.691785 -22.401156,9.329295 -1.383123,-1.74473 -5.008169,-5.65904 -9.627616,-5.53248 -0.9492,0.0273 -1.889363,0.22584 -2.784323,0.59662 -1.907443,0.77747 -3.173046,2.81146 -4.646569,5.15283 -0.904,1.44639 -1.934562,3.08264 -3.182084,4.55616 -0.913041,1.07575 -1.907444,2.18768 -2.865684,3.26343 -4.601367,5.16188 -8.940574,10.02538 -9.175615,14.69908 -0.280241,5.69519 10.712419,20.26772 18.82131,26.57764 3.832965,2.98319 6.870409,4.06799 9.293135,3.32674 0.433921,-0.13565 1.600081,-0.38872 2.901843,-0.67801 0.759362,1.30176 1.572963,2.52217 2.431764,3.65217 0.831682,1.09383 1.771845,2.13342 2.802406,3.10975 3.244384,0.36534 4.043683,1.1708 4.782165,1.09372 l 1.383247,1.78242 c -2.287124,-1.5097 -5.641065,-5.11719 -7.223068,-7.16024 -1.645283,-2.13345 -3.028404,-4.50194 -4.176486,-7.01506 l -0.42488,0.0905 c -1.808004,0.39777 -3.118807,0.67801 -3.787766,0.88592 -0.515282,0.16263 -2.214803,-0.20774 -5.270329,-2.58545 -8.081772,-6.29185 -17.266428,-19.75241 -17.112745,-22.86219 0.153683,-3.10978 4.348244,-7.81057 8.054651,-11.96897 0.976322,-1.09385 1.988802,-2.22386 2.928965,-3.33579 1.464482,-1.71757 2.585443,-3.49846 3.570805,-5.08046 0.840723,-1.33794 1.979765,-3.16403 2.567365,-3.40809 0.406799,-0.16263 0.804561,-0.25317 1.2204,-0.26206 2.431763,-0.0632 4.96297,2.48599 6.07489,3.91431 l 0.271183,3.64312 c 4.70081,-4.21262 15.268586,-12.42096 25.565161,-12.43002 10.640095,0 21.325395,8.127 25.845405,12.24924 0.19888,0.18075 0.4068,0.47007 0.61472,0.84072 l 0.31638,-4.31208 c 1.08481,-1.38315 3.65217,-3.97762 6.07489,-3.91434 0.41584,0.0102 0.8136,0.0994 1.22041,0.26206 0.98536,0.40679 2.41368,2.70295 3.78776,4.91775 1.0848,1.74474 2.31424,3.72451 3.82392,5.69523 1.18425,1.54585 2.49505,3.06457 3.76969,4.52904 2.76625,3.19113 5.63193,6.49074 5.74041,8.64226 0.15368,3.10975 -9.03097,16.57034 -17.12178,22.86219 -3.05553,2.37751 -4.74601,2.74816 -5.27033,2.58545 -0.3345,-0.0994 -0.87688,-0.21696 -2.08825,-0.45199 -0.8588,2.31423 -1.91648,4.50191 -3.21824,6.48168 -1.10288,1.68142 -2.47697,3.18207 -4.06801,4.47481 l -0.85526,1.74898 c 3.48592,-2.34287 5.44995,-4.45158 6.46901,-5.51636 z\"\n          id=\"path40\"\n          style=\"fill:currentColor;stroke-width:0px\"></path\n        ><path\n          class=\"cls-1\"\n          d=\"m 91.140895,152.40667 h 3.661204 c 3.299605,0 2.585446,1.76281 2.585446,3.95049 0,2.18767 -1.509685,3.95049 -3.380966,3.95049 h -2.079205 c -1.871281,0 -3.380963,-1.76282 -3.380963,-3.95049 0,-2.18768 -0.714161,-3.95049 2.585443,-3.95049 z\"\n          id=\"path49\"\n          style=\"fill:currentColor;stroke-width:0px\"></path\n        ></g\n      ></g\n    ></svg>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/icons/ModelStatusIcon.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { expect, test, vi, beforeEach } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport ModelColumnIcon from './ModelStatusIcon.svelte';\nimport { type InferenceServer, InferenceType } from '@shared/models/IInference';\nimport { readable } from 'svelte/store';\nimport * as inferenceStore from '/@/stores/inferenceServers';\n\nvi.mock('/@/stores/inferenceServers', () => ({\n  inferenceServers: vi.fn(),\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  (inferenceStore.inferenceServers as unknown) = readable<InferenceServer[]>([]);\n});\n\ntest('Expect remote model to have NONE title', async () => {\n  const object: ModelInfo = {\n    id: 'model-downloaded-id',\n    description: '',\n    license: '',\n    name: '',\n    registry: '',\n    url: '',\n    memory: 1000,\n  };\n\n  render(ModelColumnIcon, { object });\n\n  const role = screen.getByRole('status');\n  expect(role).toBeDefined();\n  expect(role.title).toBe('NONE');\n});\n\ntest('Expect downloaded model to have DOWNLOADED title', async () => {\n  const object: ModelInfo = {\n    id: 'model-downloaded-id',\n    description: '',\n    license: '',\n    name: '',\n    registry: '',\n    url: '',\n    file: {\n      file: 'file',\n      creation: undefined,\n      size: 1000,\n      path: 'path',\n    },\n    memory: 1000,\n  };\n\n  render(ModelColumnIcon, { object });\n\n  const role = screen.getByRole('status');\n  expect(role).toBeDefined();\n  expect(role.title).toBe('DOWNLOADED');\n});\n\ntest('Expect in used model to have USED title', async () => {\n  const object: ModelInfo = {\n    id: 'model-in-used-id',\n    description: '',\n    license: '',\n    name: '',\n    registry: '',\n    url: '',\n    file: {\n      file: 'file',\n      creation: undefined,\n      size: 1000,\n      path: 'path',\n    },\n    memory: 1000,\n  };\n\n  (inferenceStore.inferenceServers as unknown) = readable<InferenceServer[]>([\n    {\n      models: [object],\n      type: InferenceType.LLAMA_CPP,\n      status: 'running',\n      container: {\n        containerId: '',\n        engineId: '',\n      },\n      connection: {\n        port: 0,\n      },\n      health: undefined,\n      labels: {},\n    },\n  ]);\n\n  render(ModelColumnIcon, { object });\n\n  const role = screen.getByRole('status');\n  expect(role).toBeDefined();\n  expect(role.title).toBe('USED');\n});\n\ntest('Expect non-downloaded model to have NONE title', async () => {\n  const object: ModelInfo = {\n    id: 'model-downloaded-id',\n    description: '',\n    license: '',\n    name: '',\n    registry: '',\n    url: '',\n    memory: 1000,\n  };\n\n  render(ModelColumnIcon, { object });\n\n  const role = screen.getByRole('status');\n  expect(role).toBeDefined();\n  expect(role.title).toBe('NONE');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/icons/ModelStatusIcon.svelte",
    "content": "<script lang=\"ts\">\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport ModelWhite from './ModelWhite.svelte';\nimport { inferenceServers } from '/@/stores/inferenceServers';\n\nimport { StatusIcon } from '@podman-desktop/ui-svelte';\nimport RemoteModel from './RemoteModel.svelte';\n\ninterface Props {\n  object: ModelInfo;\n}\n\nlet { object }: Props = $props();\n\nlet status: string | undefined = $derived.by(() => {\n  if ($inferenceServers.some(server => server.models.some(model => model.id === object.id))) {\n    return 'USED';\n  } else {\n    return object.file ? 'DOWNLOADED' : 'NONE';\n  }\n});\n</script>\n\n{#if status === 'NONE'}\n  <div role=\"status\" title=\"NONE\">\n    <RemoteModel class=\"text-[var(--pd-status-not-running)]\" size=\"28\" />\n  </div>\n{:else}\n  <StatusIcon status={status} icon={ModelWhite} />\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/icons/ModelWhite.svelte",
    "content": "<script lang=\"ts\">\nexport let size = '40';\nexport let solid: boolean = false;\n\nconst fg = solid ? 'white' : 'currentColor';\n</script>\n\n<div role=\"img\">\n  <svg\n    width={size}\n    height={size}\n    style={$$props.style}\n    class={$$props.class}\n    viewBox=\"0 0 16 16\"\n    xmlns=\"http://www.w3.org/2000/svg\">\n    <g clip-path=\"url(#clip0_47_118)\">\n      <g clip-path=\"url(#clip1_47_118)\">\n        <path\n          d=\"M14.9984 7.94844C14.9703 7.13751 14.2922 6.50001 13.4813 6.50001H13.2891C13.2641 6.50001 13.2406 6.49063 13.2234 6.47188L9.23126 2.48594C9.20001 2.45469 9.19689 2.40626 9.22188 2.37032C9.39688 2.12501 9.50001 1.82501 9.50001 1.50001C9.50001 0.676569 8.82501 6.10354e-06 8.00157 -0.0015564C7.37188 -0.0015564 6.83126 0.385944 6.60939 0.935944C6.59532 0.968756 6.56407 0.992194 6.52657 0.992194H2.97345C2.93751 0.992194 2.9047 0.970319 2.89063 0.935944C2.66876 0.387506 2.12813 6.10354e-06 1.49688 6.10354e-06C0.682822 6.10354e-06 0.0109465 0.664069 8.99853e-06 1.47813C-0.0109285 2.31563 0.664072 3.00001 1.50001 3.00001C2.00001 3.00001 1.94063 2.95001 2.13282 2.86094C2.1672 2.84532 2.20626 2.85157 2.23282 2.87813L3.75938 4.40469C3.79376 4.43907 3.79376 4.49532 3.75938 4.52969L1.79688 6.49219C1.77657 6.51251 1.75001 6.52188 1.72188 6.51719C1.6422 6.50469 1.56095 6.50001 1.47813 6.50157C0.670321 6.51094 0.0109465 7.17188 8.99853e-06 7.97969C-0.0109285 8.81719 0.664072 9.50001 1.50001 9.50001C2.33595 9.50001 1.6547 9.49376 1.7297 9.48282C1.75938 9.47813 1.78907 9.48907 1.81095 9.50938L3.68907 11.4375C3.72345 11.4719 3.72345 11.5266 3.68907 11.5609L2.18595 13.1C2.16095 13.1266 2.12188 13.1344 2.08751 13.1188C1.89688 13.0375 1.68438 12.9938 1.46251 13C0.660946 13.0188 0.0109465 13.6766 8.99853e-06 14.4781C-0.0109285 15.3156 0.664072 16 1.50001 16C2.33595 16 2.67657 15.6063 2.89532 15.05C2.90938 15.0156 2.94063 14.9938 2.97813 14.9938H6.52032C6.55626 14.9938 6.58907 15.0156 6.60314 15.05C6.82189 15.6063 7.36251 16 7.99688 16C8.81407 16 9.48751 15.3391 9.50001 14.5219C9.5047 14.2031 9.40938 13.9078 9.24376 13.6625C9.22032 13.6266 9.22345 13.5797 9.2547 13.55L13.2828 9.52188C13.3031 9.50157 13.3297 9.49219 13.3578 9.49532C13.4047 9.50001 13.4516 9.50157 13.5 9.50157C14.3453 9.50157 15.0281 8.80157 14.9984 7.95001V7.94844ZM1.50001 2.00001C1.22345 2.00001 1.00001 1.77657 1.00001 1.50001C1.00001 1.22344 1.22345 1.00001 1.50001 1.00001C1.77657 1.00001 2.00001 1.22344 2.00001 1.50001C2.00001 1.77657 1.77657 2.00001 1.50001 2.00001ZM1.50001 15C1.22345 15 1.00001 14.7766 1.00001 14.5C1.00001 14.2234 1.22345 14 1.50001 14C1.77657 14 2.00001 14.2234 2.00001 14.5C2.00001 14.7766 1.77657 15 1.50001 15ZM8.00001 1.00001C8.27657 1.00001 8.50001 1.22344 8.50001 1.50001C8.50001 1.77657 8.27657 2.00001 8.00001 2.00001C7.72345 2.00001 7.50001 1.77657 7.50001 1.50001C7.50001 1.22344 7.72345 1.00001 8.00001 1.00001ZM2.91563 2.14532C2.89063 2.12032 2.88282 2.08282 2.89532 2.05001C2.90938 2.01563 2.94063 1.99219 2.97813 1.99219H6.0797C6.15938 1.99219 6.19845 2.08751 6.1422 2.14376L4.59063 3.69532C4.55626 3.72969 4.50001 3.72969 4.46563 3.69532L2.91563 2.14532ZM1.50001 8.50001C1.22345 8.50001 1.00001 8.27657 1.00001 8.00001C1.00001 7.72344 1.22345 7.50001 1.50001 7.50001C1.77657 7.50001 2.00001 7.72344 2.00001 8.00001C2.00001 8.27657 1.77657 8.50001 1.50001 8.50001ZM2.71251 6.98907L4.46563 5.23594C4.50001 5.20157 4.55626 5.20157 4.59063 5.23594L6.61876 7.26407C6.64688 7.29219 6.65313 7.33438 6.63751 7.37032C6.62657 7.39376 6.6172 7.41719 6.60626 7.44063C6.5922 7.47657 6.55782 7.50001 6.51876 7.50001H2.97657C2.94063 7.50001 2.90782 7.47813 2.89376 7.44376C2.84532 7.32188 2.78126 7.20782 2.7047 7.10469C2.67813 7.07032 2.68282 7.02032 2.71407 6.98907H2.71251ZM2.71876 9.00157C2.68751 8.97032 2.68282 8.92188 2.70938 8.88594C2.78438 8.78438 2.84532 8.67344 2.89376 8.55469C2.90782 8.52032 2.93907 8.49844 2.97657 8.49844H6.52188C6.55938 8.49844 6.59376 8.52188 6.60782 8.55626C6.61095 8.56563 6.61563 8.57501 6.61876 8.58438C6.63439 8.62032 6.62657 8.66094 6.59845 8.68751L4.56095 10.6875C4.52657 10.7219 4.47032 10.7219 4.43595 10.6875L2.7172 9.00001L2.71876 9.00157ZM6.0797 13.9922H2.97345C2.93751 13.9922 2.9047 13.9703 2.89063 13.9359C2.88595 13.925 2.88126 13.9141 2.87657 13.9031C2.86251 13.8703 2.86876 13.8313 2.89532 13.8047L4.43907 12.2609C4.47345 12.2266 4.5297 12.2266 4.56407 12.2609L6.1422 13.8391C6.19845 13.8953 6.15782 13.9906 6.0797 13.9906V13.9922ZM7.99845 14.9984C7.72188 14.9984 7.49845 14.775 7.49845 14.4984C7.49845 14.2219 7.72188 13.9984 7.99845 13.9984C8.27501 13.9984 8.49845 14.2219 8.49845 14.4984C8.49845 14.775 8.27501 14.9984 7.99845 14.9984ZM12.3234 9.06251L8.38126 13.0047C8.36095 13.025 8.3297 13.0344 8.30157 13.0297C8.20313 13.0094 8.10314 12.9984 7.99845 12.9984C7.6672 12.9984 7.36095 13.1063 7.11251 13.2875C7.07657 13.3141 7.02657 13.3109 6.99532 13.2781L5.30782 11.5594C5.27345 11.525 5.27345 11.4703 5.30782 11.4344L7.30157 9.39532C7.32813 9.36876 7.37032 9.36094 7.4047 9.37501C7.58595 9.45313 7.78595 9.49688 7.99688 9.49688C8.62813 9.49688 9.1672 9.10782 9.38907 8.55626C9.40314 8.52032 9.43751 8.49688 9.47657 8.49688H11.9969C12.0469 8.49688 12.0922 8.52657 12.1125 8.57344C12.1672 8.70469 12.2406 8.82813 12.3297 8.93751C12.3594 8.97501 12.3563 9.02813 12.3234 9.06251ZM7.49845 7.99844C7.49845 7.72188 7.72188 7.49844 7.99845 7.49844C8.27501 7.49844 8.49845 7.72188 8.49845 7.99844C8.49845 8.27501 8.27501 8.49844 7.99845 8.49844C7.72188 8.49844 7.49845 8.27501 7.49845 7.99844ZM12.4063 7.06094C12.2531 7.17657 12.1688 7.29688 12.1141 7.42188C12.0938 7.46719 12.0484 7.49844 11.9984 7.49844H9.47657C9.43751 7.49844 9.40314 7.47501 9.38907 7.43907C9.1672 6.88751 8.62813 6.49844 7.99688 6.49844C8 6.5 7.61876 6.53438 7.45001 6.60157C7.41563 6.61563 7.37501 6.60782 7.35001 6.58126L5.29688 4.52813C5.26251 4.49376 5.26251 4.43751 5.29688 4.40313L6.98751 2.71251C7.01876 2.68126 7.0672 2.67813 7.10314 2.70313C7.35314 2.88907 7.66251 3.00001 7.99845 3.00001C8.33438 3.00001 8.17188 2.99219 8.2547 2.97813C8.28282 2.97344 8.31095 2.98282 8.33126 3.00157L12.4156 6.92032C12.4578 6.96094 12.4531 7.02813 12.4063 7.06251V7.06094ZM13.4984 8.49844C13.2219 8.49844 12.9984 8.27501 12.9984 7.99844C12.9984 7.72188 13.2219 7.49844 13.4984 7.49844C13.775 7.49844 13.9984 7.72188 13.9984 7.99844C13.9984 8.27501 13.775 8.49844 13.4984 8.49844Z\"\n          fill={fg}></path>\n      </g>\n    </g>\n    <defs>\n      <clipPath id=\"clip0_47_118\">\n        <rect width=\"16\" height=\"16\" fill={fg}></rect>\n      </clipPath>\n      <clipPath id=\"clip1_47_118\">\n        <rect width=\"16\" height=\"16\" fill={fg}></rect>\n      </clipPath>\n    </defs>\n  </svg>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/icons/PlaygroundWhite.svelte",
    "content": "<div role=\"img\" class=\"rounded-xs py-[6px] pl-[7px] pr-[5px]\">\n  <svg\n    width=\"24\"\n    height=\"24\"\n    viewBox=\"0 0 24 24\"\n    version=\"1.1\"\n    id=\"svg3\"\n    xml:space=\"preserve\"\n    xmlns=\"http://www.w3.org/2000/svg\"\n    ><path\n      style=\"color:#000000;fill:#3c8d47;fill-opacity:1;stroke-width:0.752434;stroke-linecap:round;stroke-linejoin:round;-inkscape-stroke:none\"\n      d=\"M 3.0097361,0 C 1.3553589,0 0,1.355359 0,3.0097361 V 20.990264 C 0,22.644641 1.3553589,24 3.0097361,24 H 20.990264 C 22.644641,24 24,22.644641 24,20.990264 V 3.0097361 C 24,1.355359 22.644641,0 20.990264,0 Z\"\n      id=\"rect5\"></path\n    ><g id=\"g5\" transform=\"matrix(0.9420863,0,0,0.9420863,-5.6137888,-12.058117)\" style=\"fill:#ffffff\"\n      ><circle\n        cx=\"16.541031\"\n        cy=\"23.271023\"\n        r=\"0.80865288\"\n        fill=\"currentColor\"\n        id=\"circle1\"\n        style=\"fill:#ffffff;stroke-width:0.808654\"></circle\n      ><path\n        fill=\"currentColor\"\n        d=\"m 15.32805,24.888338 a 0.80865225,0.80865225 0 0 0 -0.808244,0.808244 0.7957158,0.7957158 0 0 0 0.389406,0.674669 0.78601008,0.78601008 0 0 0 0.418838,0.134233 0.80865225,0.80865225 0 0 0 0.808245,-0.808245 0.82159096,0.82159096 0 0 0 -0.808245,-0.808246\"\n        id=\"path1\"\n        style=\"fill:#ffffff;stroke-width:0.808654\"></path\n      ><path\n        fill=\"currentColor\"\n        d=\"m 11.254858,31.36806 c 4.51e-4,0.892576 0.723914,1.616038 1.61649,1.616489 h 11.321087 c 0.891471,-0.0031 1.613379,-0.725018 1.616489,-1.616489 v -9.50652 l -1.616489,1.61649 v 4.654538 h -3.234564 c 0,3.234638 -4.851959,3.234638 -4.851959,0 h -3.234564 l -0.0081,-8.086522 h 7.899743 l 1.616489,-1.61649 h -9.508988 c -0.893875,-0.0027 -1.619181,0.722615 -1.616489,1.61649 z\"\n        id=\"path2\"\n        style=\"fill:#ffffff;stroke-width:0.808654\"></path\n      ><path\n        fill=\"currentColor\"\n        d=\"m 20.103961,21.721648 c -0.303374,-0.303375 -0.710893,-0.13747 -0.935028,0.251303 l -0.941821,1.54857 a 1.681998,1.681998 0 0 1 1.20218,0.9305 1.3084011,1.3084011 0 0 1 0.477702,-0.103508 l 0.0081,-8.15e-4 a 1.3771358,1.3771358 0 0 1 1.360658,1.351602 l 0.01439,0.01126 0.307903,-0.181957 0.704101,-0.416574 c 0.384879,-0.235455 0.554678,-0.631653 0.251303,-0.935029 l -0.679197,-0.679202 4.264906,-4.264906 -1.143316,-1.143316 -4.264,4.264906 z\"\n        id=\"path3\"\n        style=\"fill:#ffffff;stroke-width:0.808654\"></path\n      ><circle\n        cx=\"17.754019\"\n        cy=\"25.697001\"\n        r=\"0.80865288\"\n        fill=\"currentColor\"\n        id=\"circle3\"\n        style=\"fill:#ffffff;stroke-width:0.808654\"></circle\n      ></g\n    ></svg>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/icons/RemoteModel.svelte",
    "content": "<script lang=\"ts\">\nexport let size = '40';\n</script>\n\n<div role=\"img\">\n  <svg\n    width={size}\n    height={size}\n    style={$$props.style}\n    class={$$props.class}\n    viewBox=\"0 0 10.538813 10.413819\"\n    xmlns=\"http://www.w3.org/2000/svg\">\n    <defs id=\"defs1\"></defs>\n    <g id=\"layer1\" transform=\"translate(-99.747914,-142.61041)\">\n      <path\n        id=\"circle1-3\"\n        fill=\"currentColor\"\n        color=\"currentColor\"\n        style=\"opacity:0.988764;stroke-width:1.47503;stroke-linejoin:round;-inkscape-stroke:none\"\n        d=\"m 102.35251,143.92394 c -0.44583,0 -0.81255,0.36672 -0.81255,0.81255 0,0.44583 0.36672,0.81179 0.81255,0.81179 0.14364,0 0.27878,-0.0379 0.39637,-0.10443 l 0.75919,0.74471 -0.90021,0.85982 c -0.0804,-0.0269 -0.16632,-0.0419 -0.25535,-0.0419 -0.44583,0 -0.81255,0.36672 -0.81255,0.81255 0,0.44583 0.36672,0.81179 0.81255,0.81179 0.0867,0 0.17063,-0.014 0.24925,-0.0396 l 0.8964,0.88649 -0.80569,0.82551 c -0.10375,-0.0485 -0.21869,-0.0762 -0.33996,-0.0762 -0.44583,0 -0.81255,0.36672 -0.81255,0.81255 0,0.44583 0.36672,0.81256 0.81255,0.81256 0.39678,0 0.73013,-0.29032 0.79883,-0.66849 h 1.54812 c 0.0541,0.39486 0.39617,0.70279 0.80493,0.70279 0.44583,0 0.81256,-0.36596 0.81256,-0.81179 0,-0.11764 -0.0259,-0.23025 -0.0717,-0.33158 l 1.86445,-2.08779 c 0.0419,0.007 0.0851,0.0106 0.12881,0.0106 0.44583,0 0.81179,-0.36673 0.81179,-0.81255 0,-0.44583 -0.36596,-0.81256 -0.81179,-0.81256 -0.0546,0 -0.10834,0.005 -0.16007,0.016 l -1.83396,-1.95363 c 0.0462,-0.10173 0.0724,-0.21409 0.0724,-0.33234 0,-0.44583 -0.36673,-0.81254 -0.81256,-0.81255 -0.32162,0 -0.60201,0.19052 -0.73328,0.46421 h -1.6693 c -0.12363,-0.29201 -0.41432,-0.49851 -0.74928,-0.49851 z m 0,0.49241 c 0.18071,0 0.32243,0.13945 0.32243,0.32014 0,0.18071 -0.14166,0.32243 -0.32243,0.32243 -0.18071,0 -0.32243,-0.14166 -0.32243,-0.32243 0,-0.1807 0.14166,-0.32014 0.32243,-0.32014 z m 3.15188,0.0343 c 0.18071,0 0.31938,0.13945 0.31938,0.32014 0,0.18071 -0.13868,0.32243 -0.31938,0.32243 -0.1807,0 -0.32319,-0.14166 -0.32319,-0.32243 0,-0.1807 0.14243,-0.32014 0.32319,-0.32014 z m -2.35915,0.46116 h 1.55956 c 0.006,0.0368 0.0153,0.0725 0.0267,0.10747 l -0.86896,0.83008 -0.77749,-0.76377 c 0.0264,-0.0548 0.0466,-0.11315 0.0602,-0.17379 z m 1.87436,0.50918 c 0.13571,0.10173 0.30367,0.16235 0.48479,0.16235 0.1495,0 0.29001,-0.0412 0.41085,-0.11281 l 1.72572,1.83473 c -0.0711,0.0774 -0.12747,0.16821 -0.16464,0.26831 h -1.20968 c -0.115,-0.30965 -0.41454,-0.53281 -0.76225,-0.53281 -0.12397,0 -0.24204,0.0288 -0.34758,0.0793 l -0.94442,-0.92765 z m -1.16166,1.11135 0.93985,0.92232 c -0.0216,0.038 -0.0403,0.0775 -0.0556,0.1189 h -1.6152 c -0.0267,-0.0833 -0.0668,-0.16075 -0.11738,-0.2302 z m -1.50543,0.96653 c 0.18071,0 0.32243,0.13946 0.32243,0.32014 0,0.1807 -0.14166,0.32243 -0.32243,0.32243 -0.18071,0 -0.32243,-0.14166 -0.32243,-0.32243 0,-0.18071 0.14166,-0.32014 0.32243,-0.32014 z m 3.15188,0.0343 c 0.18071,0 0.31938,0.13945 0.31938,0.32014 0,0.18071 -0.13868,0.32243 -0.31938,0.32243 -0.1807,0 -0.32319,-0.14166 -0.32319,-0.32243 0,-0.18071 0.14243,-0.32014 0.32319,-0.32014 z m 2.73418,0 c 0.1807,0 0.32242,0.13945 0.32242,0.32014 0,0.18071 -0.14166,0.32243 -0.32242,0.32243 -0.18071,0 -0.32015,-0.14166 -0.32015,-0.32243 0,-0.18071 0.13945,-0.32014 0.32015,-0.32014 z m -5.11162,0.53052 h 1.59309 c 0.0112,0.0411 0.0255,0.0806 0.0427,0.11892 l -0.92155,0.94365 -0.83618,-0.82627 c 0.0526,-0.0709 0.0945,-0.15064 0.12195,-0.23629 z m 3.16179,0 h 1.16547 c 0.0372,0.13718 0.10916,0.26032 0.20657,0.35902 l -1.74401,1.95287 c -0.12119,-0.0722 -0.26225,-0.11358 -0.41238,-0.11358 -0.1469,0 -0.28534,0.0397 -0.40475,0.109 l -0.90936,-0.89944 0.89945,-0.9208 c 0.12168,0.073 0.26363,0.1151 0.41466,0.1151 0.37318,0 0.69073,-0.257 0.78435,-0.60217 z m -2.44223,1.75926 0.88115,0.87124 h -1.64111 c -0.01,-0.0207 -0.0205,-0.0412 -0.032,-0.061 z m -1.494,0.89716 c 0.18071,0 0.32243,0.13868 0.32243,0.31938 0,0.18071 -0.14166,0.32243 -0.32243,0.32243 -0.18071,0 -0.32243,-0.14166 -0.32243,-0.32243 0,-0.1807 0.14166,-0.31938 0.32243,-0.31938 z m 3.15188,0.0343 c 0.18071,0 0.31938,0.13946 0.31938,0.32014 0,0.18071 -0.13868,0.32243 -0.31938,0.32243 -0.1807,0 -0.32319,-0.14166 -0.32319,-0.32243 0,-0.1807 0.14243,-0.32014 0.32319,-0.32014 z m 3.67631,-8.14306 -0.27441,0.001 a 0.33557023,0.33557023 0 0 0 -0.33691,0.33691 0.33557023,0.33557023 0 0 0 0.33691,0.33463 h 0.26526 l 0.0549,0.003 0.0434,0.005 0.0396,0.0122 0.0404,0.0144 0.0373,0.0175 0.0351,0.0229 0.0343,0.0229 0.032,0.029 0.0259,0.0313 0.0259,0.032 0.0198,0.0373 0.0206,0.0374 0.0145,0.0381 0.009,0.0427 a 0.33557023,0.33557023 0 0 0 0.38265,0.27974 0.33557023,0.33557023 0 0 0 0.28279,-0.38341 l -0.003,-0.0145 a 0.33560379,0.33560379 0 0 0 -0.009,-0.032 l -0.0168,-0.0747 a 0.33560379,0.33560379 0 0 0 -0.009,-0.032 l -0.0259,-0.0717 a 0.33560379,0.33560379 0 0 0 -0.0145,-0.029 l -0.032,-0.0663 a 0.33560379,0.33560379 0 0 0 -0.0145,-0.0282 l -0.0404,-0.064 a 0.33560379,0.33560379 0 0 0 -0.0175,-0.0282 l -0.0427,-0.0579 a 0.33560379,0.33560379 0 0 0 -0.0236,-0.0259 l -0.0488,-0.0549 a 0.33560379,0.33560379 0 0 0 -0.0229,-0.0229 l -0.0542,-0.0496 a 0.33560379,0.33560379 0 0 0 -0.0229,-0.0206 l -0.061,-0.0427 a 0.33560379,0.33560379 0 0 0 -0.0259,-0.0206 l -0.0633,-0.0374 a 0.33560379,0.33560379 0 0 0 -0.029,-0.0145 l -0.0663,-0.0351 a 0.33560379,0.33560379 0 0 0 -0.0313,-0.0115 l -0.0694,-0.0259 a 0.33560379,0.33560379 0 0 0 -0.032,-0.009 l -0.0747,-0.0198 a 0.33560379,0.33560379 0 0 0 -0.0313,-0.006 l -0.0777,-0.0115 a 0.33560379,0.33560379 0 0 0 -0.0351,-0.003 l -0.0778,-0.006 a 0.33560379,0.33560379 0 0 0 -0.0168,0 z m -8.34658,0.001 -0.0777,0.006 a 0.33560379,0.33560379 0 0 0 -0.0343,0.003 l -0.0747,0.0115 a 0.33560379,0.33560379 0 0 0 -0.0351,0.006 l -0.0717,0.0198 a 0.33560379,0.33560379 0 0 0 -0.032,0.009 l -0.0686,0.0259 a 0.33557023,0.33557023 0 0 0 -0.19895,0.42914 0.33557023,0.33557023 0 0 0 0.42915,0.202 l 0.0518,-0.0206 0.0434,-0.0115 0.0427,-0.006 0.0434,-0.003 h 0.0175 1.32478 a 0.33557023,0.33557023 0 0 0 0.33767,-0.33386 0.33557023,0.33557023 0 0 0 -0.33767,-0.33692 h -1.34232 a 0.33557023,0.33557023 0 0 0 -0.008,0.002 0.33557023,0.33557023 0 0 0 -0.009,-0.002 z m 2.70216,0 a 0.33557023,0.33557023 0 0 0 -0.33387,0.33691 0.33557023,0.33557023 0 0 0 0.33387,0.33463 h 1.34307 a 0.33557023,0.33557023 0 0 0 0.33691,-0.33463 0.33557023,0.33557023 0 0 0 -0.33691,-0.33691 z m 2.68538,0 a 0.33557023,0.33557023 0 0 0 -0.33691,0.33691 0.33557023,0.33557023 0 0 0 0.33691,0.33463 h 1.34232 a 0.33557023,0.33557023 0 0 0 0.33462,-0.33463 0.33557023,0.33557023 0 0 0 -0.33462,-0.33691 z m -6.13912,1.20968 a 0.33557023,0.33557023 0 0 0 -0.334623,0.33463 v 1.34231 a 0.33557023,0.33557023 0 0 0 0.334623,0.33463 0.33557023,0.33557023 0 0 0 0.33692,-0.33463 v -1.34234 a 0.33557023,0.33557023 0 0 0 -0.33692,-0.33463 z m 9.86726,0.76072 a 0.33557023,0.33557023 0 0 0 -0.33462,0.33691 v 1.34308 a 0.33557023,0.33557023 0 0 0 0.33462,0.33386 0.33557023,0.33557023 0 0 0 0.33692,-0.33386 v -1.34308 a 0.33557023,0.33557023 0 0 0 -0.33692,-0.33691 z m -9.86726,1.92162 a 0.33557023,0.33557023 0 0 0 -0.334623,0.33691 v 1.34308 a 0.33557023,0.33557023 0 0 0 0.334623,0.33386 0.33557023,0.33557023 0 0 0 0.33692,-0.33386 v -1.34308 a 0.33557023,0.33557023 0 0 0 -0.33692,-0.33691 z m 9.86726,0.76377 a 0.33557023,0.33557023 0 0 0 -0.33462,0.33386 v 1.34232 a 0.33557023,0.33557023 0 0 0 0.33462,0.33767 0.33557023,0.33557023 0 0 0 0.33692,-0.33767 V 147.602 a 0.33557023,0.33557023 0 0 0 -0.33692,-0.33386 z m -9.86726,1.92162 a 0.33557023,0.33557023 0 0 0 -0.334623,0.33691 v 1.34232 a 0.33557023,0.33557023 0 0 0 0.334623,0.33462 0.33557023,0.33557023 0 0 0 0.33692,-0.33462 v -1.34232 a 0.33557023,0.33557023 0 0 0 -0.33692,-0.33691 z m 9.86726,0.76301 a 0.33557023,0.33557023 0 0 0 -0.33462,0.33462 v 1.34232 a 0.33557023,0.33557023 0 0 0 0.33462,0.33691 0.33557023,0.33557023 0 0 0 0.33692,-0.33691 v -1.34232 a 0.33557023,0.33557023 0 0 0 -0.33692,-0.33462 z m -9.75216,1.92167 a 0.33557023,0.33557023 0 0 0 -0.17226,0.0152 l -7.7e-4,-7.6e-4 a 0.33557023,0.33557023 0 0 0 -0.201233,0.42914 l 0.005,0.0152 a 0.33560379,0.33560379 0 0 0 0.0122,0.0282 l 0.0343,0.0693 a 0.33560379,0.33560379 0 0 0 0.0145,0.0289 l 0.0381,0.0633 a 0.33560379,0.33560379 0 0 0 0.0198,0.026 l 0.0435,0.058 a 0.33560379,0.33560379 0 0 0 0.0198,0.026 l 0.0495,0.0549 a 0.33560379,0.33560379 0 0 0 0.0229,0.0229 l 0.0549,0.0488 a 0.33560379,0.33560379 0 0 0 0.0259,0.0207 l 0.0572,0.0457 a 0.33560379,0.33560379 0 0 0 0.0259,0.0175 l 0.064,0.0373 a 0.33560379,0.33560379 0 0 0 0.0282,0.0168 l 0.0694,0.032 a 0.33560379,0.33560379 0 0 0 0.029,0.0118 l 0.0717,0.0267 a 0.33560379,0.33560379 0 0 0 0.032,0.0118 l 0.0717,0.0175 a 0.33560379,0.33560379 0 0 0 0.0351,0.006 l 0.0747,0.0118 a 0.33560379,0.33560379 0 0 0 0.0343,0.006 l 0.0777,0.003 a 0.33560379,0.33560379 0 0 0 0.0175,0 h 0.42915 a 0.33557023,0.33557023 0 0 0 0.33386,-0.33387 0.33557023,0.33557023 0 0 0 -0.33386,-0.33691 h -0.42076 l -0.0518,-0.003 -0.0435,-0.006 -0.0427,-0.0118 -0.0404,-0.0152 -0.0381,-0.0168 -0.0343,-0.0205 -0.032,-0.026 -0.0313,-0.0289 -0.029,-0.0282 -0.0229,-0.0351 -0.0229,-0.0344 -0.0175,-0.0373 a 0.33557023,0.33557023 0 0 0 -0.25689,-0.21565 z m 9.08368,0.46421 -0.0549,0.009 -0.0549,0.003 h -1.18072 a 0.33557023,0.33557023 0 0 0 -0.33463,0.33691 0.33557023,0.33557023 0 0 0 0.33463,0.33463 h 1.18986 a 0.33560379,0.33560379 0 0 0 0.0168,0 l 0.0778,-0.003 a 0.33560379,0.33560379 0 0 0 0.0343,-0.006 l 0.0724,-0.009 a 0.33557023,0.33557023 0 0 0 0.28203,-0.38342 0.33557023,0.33557023 0 0 0 -0.38264,-0.28203 z m -6.65821,0.0118 a 0.33557023,0.33557023 0 0 0 -0.33692,0.33691 0.33557023,0.33557023 0 0 0 0.33692,0.33463 h 1.34002 a 0.33557023,0.33557023 0 0 0 0.33691,-0.33463 0.33557023,0.33557023 0 0 0 -0.33691,-0.33691 z m 2.68234,0 a 0.33557023,0.33557023 0 0 0 -0.33387,0.33691 0.33557023,0.33557023 0 0 0 0.33387,0.33463 h 1.34231 a 0.33557023,0.33557023 0 0 0 0.33691,-0.33463 0.33557023,0.33557023 0 0 0 -0.33691,-0.33691 z\"\n      ></path>\n    </g>\n  </svg>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/images/DashboardBanner.svelte",
    "content": "<svg\n  class={$$props.class}\n  style={$$props.style}\n  viewBox=\"0 0 157.406 31.095\"\n  version=\"1.1\"\n  xml:space=\"preserve\"\n  xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n  xmlns=\"http://www.w3.org/2000/svg\">\n  <defs id=\"defs1\">\n    <linearGradient id=\"linearGradient163\">\n      <stop style=\"stop-color:#234f70;stop-opacity:1\" offset=\"0\" id=\"stop164\"></stop>\n      <stop style=\"stop-color:#27004d;stop-opacity:1\" offset=\".721\" id=\"stop163\"></stop>\n    </linearGradient>\n    <linearGradient\n      xlink:href=\"#linearGradient163\"\n      id=\"linearGradient1554\"\n      gradientUnits=\"userSpaceOnUse\"\n      gradientTransform=\"matrix(.30353 0 0 .10312 2064.274 -127.527)\"\n      x1=\"-29.16\"\n      y1=\"2931.953\"\n      x2=\"419.885\"\n      y2=\"3628.453\"></linearGradient>\n    <linearGradient\n      xlink:href=\"#linearGradient163\"\n      id=\"linearGradient189\"\n      gradientUnits=\"userSpaceOnUse\"\n      gradientTransform=\"matrix(.14885 0 0 .10645 260.403 -575.337)\"\n      x1=\"-29.16\"\n      y1=\"2931.953\"\n      x2=\"419.885\"\n      y2=\"3628.453\"></linearGradient>\n    <linearGradient\n      xlink:href=\"#linearGradient163\"\n      id=\"linearGradient191\"\n      gradientUnits=\"userSpaceOnUse\"\n      gradientTransform=\"matrix(.23226 0 0 .1032 192.694 -188.673)\"\n      x1=\"-29.16\"\n      y1=\"2931.953\"\n      x2=\"419.885\"\n      y2=\"3628.453\"></linearGradient>\n    <clipPath clipPathUnits=\"userSpaceOnUse\" id=\"clipPath87-3-1-8\">\n      <path\n        d=\"M1307.43 87.858c-17.935 2.345-34.17 2.518-51.888.133-1.448-.195-2.54.812-2.403 2.267.96 10.254 3.313 20.274 7.821 29.267.655 1.306 1.806 3.378 2.55 4.636 3.712 6.278 8.899 13.221 15.351 14.755 1.419.337 3.825.337 5.243 0 6.453-1.534 11.639-8.477 15.351-14.755.744-1.258 1.895-3.33 2.55-4.636 4.525-9.028 6.879-19.09 7.832-29.387.135-1.455-.959-2.47-2.408-2.28z\"\n        style=\"opacity:1;fill:#818181;stroke:none;stroke-width:.8;stroke-dasharray:none\"\n        id=\"path88-1-5-8\"\n        transform=\"translate(-104.775 -4.768)\"></path>\n    </clipPath>\n    <clipPath clipPathUnits=\"userSpaceOnUse\" id=\"clipPath1165\">\n      <path\n        id=\"path1165\"\n        class=\"st0\"\n        d=\"M2205.713 129.613c.014-.013.093.034.174.098a.865.865 0 0 0 .198.13.54.54 0 0 1 .127.056c.13.072.466.105.644.058.175-.044.202-.03.257.135.07.23.178.5.34.835.125.26.487.633.7.715a.795.795 0 0 0 .207.054c.02-.006.069.023.11.065.073.079.46.287.516.281.017-.003.1.013.183.036.304.084.851-.01 1.36-.236.134-.06.312-.127.509-.199a.643.643 0 0 0 .161-.079.919.919 0 0 1 .33-.11c.149 0 1.612-.468 1.921-.62a9.52 9.52 0 0 0 1.786-1.17c1.036-.848 1.404-1.137 1.754-1.36.088-.057.243-.164.356-.238a14.88 14.88 0 0 1 .805-.514c.173-.11.32-.19.94-.507.112-.06.312-.14.442-.183a1.61 1.61 0 0 0 .252-.097.407.407 0 0 1 .136-.035c.112-.018.227-.046.863-.218.14-.038.378-.094.517-.125.14-.031.283-.066.315-.075.032-.01.153-.038.269-.066.118-.025.258-.063.314-.083.056-.019.112-.032.126-.017.014.014.063-.02.112-.067.068-.064.082-.098.052-.13-.047-.05-1.344-.09-1.755-.06-.49.04-1.317.196-1.747.328-.098.032-.206.06-.238.07-.199.047-1.229.402-1.264.436a.578.578 0 0 1-.134.06c-.206.06-.392.135-.438.179a.159.159 0 0 1-.095.043.661.661 0 0 0-.186.068l-.31.158c-.179.09-.474.255-.697.392a2.203 2.203 0 0 1-.221.116c-.05.02-.18.089-.289.145-.535.299-.64.359-.799.441-.19.1-.51.275-.686.375-.23.13-.521.285-.687.375-.095.05-.215.116-.268.153-.053.036-.13.062-.164.054-.052-.005-.048-.029.02-.12.308-.395.524-.695.579-.795.047-.085.425-.457 1.378-1.356 1.316-1.237 1.637-1.569 1.919-1.992.08-.123.184-.277.23-.348.225-.335.312-.506.407-.799.218-.654.393-1.132.425-1.162a.741.741 0 0 1 .23-.064.699.699 0 0 0 .232-.07.143.143 0 0 1 .088-.036.57.57 0 0 0 .196-.085c.084-.047.24-.133.352-.192.26-.14.773-.626 1.043-.99.108-.15.262-.359.345-.464.082-.106.148-.215.148-.243 0-.028.03-.075.062-.106.032-.03.109-.164.178-.297.062-.134.135-.27.157-.312.113-.195.148-.6.055-.648-.048-.022-.098.019-.34.276a3.33 3.33 0 0 0-.347.411c-.04.072-.086.116-.14.132a.386.386 0 0 0-.133.073 1.283 1.283 0 0 1-.236.143c-.1.053-.292.149-.422.219-.531.274-.945.423-1.002.363-.03-.032-.031-.56-.01-1.854.01-.222.055-.54.108-.692.01-.02.038-.145.07-.276.07-.296.194-.685.23-.72a.655.655 0 0 0 .077-.147.777.777 0 0 1 .095-.178c.054-.071-.043-.223-.124-.194-.088.036-.522.598-.631.817a8.622 8.622 0 0 0-.607 1.45c-.165.488-.173.523-.205.675a.549.549 0 0 1-.053.165c-.014.013-.029.055-.03.096 0 .056-.06.287-.155.656-.012.045-.046.165-.066.265-.051.21-.112.275-.34.384-.158.076-.239.105-.505.202-.196.072-.646.41-1.182.893-.508.456-.579.503-1.344.966-.116.07-.25.156-.3.19-.562.392-.753.519-.766.504-.01-.007 0-.09.018-.18.048-.255.06-.363.09-.646.01-.146.03-.274.046-.287.06-.058.122-1.26.114-2.146 0-.26-.01-.629-.01-.82 0-.19-.012-.36-.022-.372a.722.722 0 0 1-.026-.205c0-.1-.037-.452-.07-.78a32.33 32.33 0 0 1-.07-.671 29.58 29.58 0 0 1-.068-.72c-.062-.564-.133-1.309-.125-1.35 0-.032-.01-.15-.018-.261a10.81 10.81 0 0 1-.054-.883c0-.056 0-.216-.01-.351a9.78 9.78 0 0 1 .028-1.381c.02-.25.043-.589.05-.759.01-.17.037-.46.07-.647a4.99 4.99 0 0 0 .05-.374c0-.014.018-.104.044-.197.027-.093.05-.21.052-.266 0-.055.034-.228.072-.386.035-.156.067-.315.068-.356 0-.077.113-.5.135-.52.01-.008.057-.183.117-.39.057-.21.141-.5.187-.645.049-.148.102-.32.124-.389.023-.068.08-.244.135-.399.109-.299.119-.41.036-.447-.099-.047-.503.418-.636.727-.015.041-.084.16-.146.274-.065.11-.131.24-.153.294-.02.052-.07.169-.114.258-.07.154-.159.353-.314.738-.041.093-.085.21-.111.261a.963.963 0 0 0-.06.151.539.539 0 0 1-.044.117c-.037.076-.12.31-.25.698a22.27 22.27 0 0 0-.382 1.216c-.03.096-.064.203-.082.234-.015.034-.02.072-.01.083.013.014.01.042-.013.059a.54.54 0 0 0-.067.206 1.283 1.283 0 0 1-.059.231 4.25 4.25 0 0 0-.053.173l-.1.379c-.037.145-.097.424-.137.618-.04.193-.104.525-.15.732a9.185 9.185 0 0 0-.104.532c-.013.086-.066.421-.124.746-.057.325-.107.664-.116.76-.01.098-.028.264-.048.378-.017.11-.044.36-.063.547l-.05.481c-.053.45-.101 1.226-.114 1.875-.011.41-.012.75-.01.753.01.007-.053 2.06-.066 2.31-.01.087-.024.246-.037.36l-.09.626c-.021.156-.04.336-.048.405-.01.066-.021.135-.04.152a1.308 1.308 0 0 0-.083.31c-.028.156-.07.297-.088.314a.08.08 0 0 0-.023.062c.01.014-.032.18-.086.359-.053.179-.1.358-.108.393-.01.034-.023.09-.04.12-.02.045-.194.651-.248.858-.05.197-.12.372-.145.368-.042-.001-.15-.386-.2-.73-.092-.625-.117-.837-.123-1.035-.03-.723-.013-1.653.05-2.314.047-.533.075-2.022.056-2.904a8.318 8.318 0 0 0-.08-1.089 19.13 19.13 0 0 1-.062-.55c0-.038-.03-.295-.06-.577-.029-.279-.057-.543-.055-.592 0-.045-.01-.128-.016-.18a.95.95 0 0 1-.017-.154c0-.031-.02-.257-.043-.5a10.088 10.088 0 0 1 0-1.146c.02-.385.05-.988.066-1.342.016-.354.03-.732.04-.843.037-.523.096-1.757.126-2.631.032-.985 0-1.388-.172-2.118-.028-.116-.039-.22-.029-.23.01-.01.01-.035-.01-.053-.017-.017-.03-.066-.035-.108 0-.045-.037-.202-.08-.356a3.707 3.707 0 0 1-.083-.329c0-.024-.06-.234-.127-.461a18.594 18.594 0 0 1-.14-.503 1.127 1.127 0 0 0-.058-.176c-.02-.049-.042-.122-.052-.154-.01-.034-.05-.181-.096-.328-.07-.224-.094-.263-.146-.254-.045.009-.07.047-.09.12-.012.058-.043.724-.062 1.473a48.295 48.295 0 0 1-.06 1.56 4.172 4.172 0 0 0-.029.277c0 .045-.039.287-.083.533a7.14 7.14 0 0 0-.077.574c0 .1-.114.677-.143.705-.014.013-.704 2.97-.718 3.084a6.16 6.16 0 0 1-.13.618.791.791 0 0 0-.043.19c-.016.097-.036.19-.054.207-.018.017-.018.038 0 .056.017.017.016.038 0 .052a1.312 1.312 0 0 0-.07.29c-.025.145-.073.393-.108.556a4.286 4.286 0 0 0-.068.355c0 .035-.033.2-.065.374a3.405 3.405 0 0 0-.054.342c0 .017-.01.11-.03.21-.02.101-.036.312-.04.479 0 .163-.015.305-.022.312-.028.027-.1.778-.079.8.01.01.014.135 0 .274-.01.138 0 .326.014.413.015.088.031.282.034.428 0 .295.038.688.065.717.01.01.025.09.034.185.02.184.044.31.073.39.01.032.038.154.06.276.02.122.045.227.055.238.01.01.032.105.05.21.018.104.052.26.074.348.025.091.051.189.057.224.11.478.288 1.208.299 1.219.01.01.022.094.033.185.01.094.033.205.053.248.02.042.029.087.015.1-.014.014-.011.039 0 .053.024.025.073.262.068.342a12.947 12.947 0 0 1 .124.864c.028.243.058.452.07.467.01.01.026.09.028.177 0 .087.01.195.015.244.032.375.044.567.043.636 0 .188.019.462.043.622.015.095.016.181.01.192-.011.01-.194-.162-.407-.386a6.133 6.133 0 0 0-.539-.517c-.279-.208-.46-.32-.509-.322a.143.143 0 0 1-.086-.04c-.034-.036-.319-.15-.532-.212-.217-.064-.517.105-.571.326-.039.172-.052.28-.025.308.014.014.023.06.025.105 0 .125.333.664.683 1.132.117.152.204.293.196.314-.018.016.94 1.023.971.993zm4.722-1.57a4.45 4.45 0 0 1 .166-.36c.047-.092.099-.195.114-.236.01-.038.602-.627 1.36-1.354 1.43-1.373 1.296-1.258 2.011-1.674.1-.06.265-.156.37-.223.106-.066.251-.156.322-.196.07-.04.197-.113.275-.167.292-.183.482-.269.453-.207-.04.086-.918.872-1.148 1.03-.48.32-.714.502-1.133.866-.465.408-1.007.97-1.159 1.203-.097.146-1.578 1.573-1.651 1.589-.063.012-.057-.068.02-.27zm-7.622-1.896c.151.053.381.017.48-.078.093-.088.101-.122.082-.3-.025-.24-.319-.442-.624-.442-.163-.001-.314.196-.302.388.01.166.175.362.364.432zm6.482-11.181c.17 0 .198-.012.355-.161.157-.149.171-.176.179-.34.01-.148-.01-.197-.093-.29-.088-.092-.136-.107-.32-.109-.247-.003-.331.037-.464.204-.136.17-.126.316.035.528.12.17.12.17.308.168zm4.23 3.961c.14.018.167.004.295-.117.121-.115.143-.156.154-.315.011-.16 0-.195-.103-.302-.176-.178-.241-.204-.416-.153-.192.054-.239.098-.327.311-.081.193-.068.235.107.42.112.11.166.14.29.156z\"\n        style=\"fill:none;stroke:#251e5b;stroke-width:.226122;stroke-dasharray:none;stroke-opacity:1\"></path>\n    </clipPath>\n    <clipPath clipPathUnits=\"userSpaceOnUse\" id=\"clipPath189\">\n      <path\n        style=\"fill:url(#linearGradient189);fill-opacity:1;stroke-width:.517138;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4.4\"\n        id=\"rect189\"\n        transform=\"matrix(-.98033 -.19739 -.0909 -.99586 0 0)\"\n        d=\"M260.117-258.27h77.189v32.101h-77.189z\"></path>\n    </clipPath>\n    <clipPath clipPathUnits=\"userSpaceOnUse\" id=\"clipPath190\">\n      <path\n        style=\"fill:url(#linearGradient191);fill-opacity:1;stroke-width:.636045;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4.4\"\n        id=\"rect191\"\n        transform=\"scale(-1 1)\"\n        d=\"M192.248 118.712h120.444v31.121H192.248z\"></path>\n    </clipPath>\n    <filter\n      style=\"color-interpolation-filters:sRGB\"\n      id=\"filter66-4-6-0\"\n      x=\"-.068\"\n      y=\"-.116\"\n      width=\"1.137\"\n      height=\"1.231\">\n      <feGaussianBlur stdDeviation=\"1.171\" result=\"blur-xs\" id=\"feGaussianBlur66-4-1-7\"></feGaussianBlur>\n    </filter>\n    <filter\n      style=\"color-interpolation-filters:sRGB\"\n      id=\"filter55-7-0-8\"\n      x=\"-.046\"\n      y=\"-.195\"\n      width=\"1.093\"\n      height=\"1.39\">\n      <feGaussianBlur stdDeviation=\".63\" result=\"blur-xs\" id=\"feGaussianBlur55-6-6-6\"></feGaussianBlur>\n    </filter>\n  </defs>\n  <g id=\"layer1\" transform=\"translate(-2063.771 -134.042)\">\n    <g id=\"g1748\" transform=\"translate(.078 -45.558)\">\n      <path\n        style=\"fill:url(#linearGradient1554);fill-opacity:1;stroke-width:.726814;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4.4\"\n        id=\"rect1308-0\"\n        d=\"M2063.693 179.599h157.406v31.095h-157.406z\"></path>\n      <g id=\"g1525\" transform=\"translate(.132 81.47)\" clip-path=\"none\">\n        <g id=\"g1408\" transform=\"matrix(.40675 0 0 .4079 1490.433 -4.433)\" style=\"stroke-width:1.02535\">\n          <g id=\"g1407\" transform=\"matrix(.48705 0 0 .48705 893.123 241.34)\" style=\"stroke-width:1.02535\">\n            <path\n              id=\"path1360\"\n              style=\"opacity:1;stroke-width:1.02535;stroke-linecap:round;stroke-linejoin:round\"\n              d=\"M1313.26 93.146c0 3.08-1.23 5.579-2.747 5.579s-2.746-2.498-2.746-5.58c0-3.08 1.23-5.579 2.746-5.578 1.517 0 2.747 2.497 2.747 5.579zm-60.587 7.489c.75 2.988 2.551 5.112 4.022 4.742 1.471-.369 2.056-3.091 1.306-6.08-.75-2.988-2.552-5.111-4.023-4.742-1.471.37-2.055 3.091-1.305 6.08z\"\n              transform=\"rotate(7.167 1257.962 -753.498)\"></path>\n            <ellipse\n              style=\"opacity:1;fill:#000;fill-opacity:1;stroke:none;stroke-width:1.36351;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none\"\n              id=\"ellipse1360\"\n              cx=\"1176.713\"\n              cy=\"132.727\"\n              rx=\"5.982\"\n              ry=\"3.752\"></ellipse>\n            <rect\n              style=\"opacity:1;fill:#464649;fill-opacity:1;stroke:#000;stroke-width:.820279;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none\"\n              id=\"rect1361\"\n              width=\"33.156\"\n              height=\"7.764\"\n              x=\"1160.738\"\n              y=\"79.958\"\n              ry=\".098\"\n              rx=\".099\"></rect>\n            <path\n              d=\"M1307.43 87.858c-17.935 2.345-34.17 2.518-51.888.133-1.448-.195-2.54.812-2.403 2.267.96 10.254 3.313 20.274 7.821 29.267.655 1.306 1.806 3.378 2.55 4.636 3.712 6.278 8.899 13.221 15.351 14.755 1.419.337 3.825.337 5.243 0 6.453-1.534 11.639-8.477 15.351-14.755.744-1.258 1.895-3.33 2.55-4.636 4.525-9.028 6.879-19.09 7.832-29.387.135-1.455-.959-2.47-2.408-2.28z\"\n              style=\"opacity:1;fill:#818181;stroke:none;stroke-width:.820282;stroke-dasharray:none;paint-order:normal\"\n              id=\"path1361\"\n              transform=\"translate(-104.775 -4.768)\"></path>\n            <path\n              d=\"M1278.84 42.895c-10.766.995-23.561 8.915-25.718 23.89-.208 1.446-.333 3.815-.392 5.275-.088 2.176-.13 4.362-.126 6.551 0 1.462 1.201 2.829 2.648 3.033 17.986 2.54 34.153 2.362 52.46-.128 1.447-.197 2.645-1.554 2.647-3.016 0-2.152-.042-4.301-.128-6.44-.059-1.46-.182-3.83-.39-5.275-2.157-14.975-14.95-22.895-25.716-23.89-1.454-.134-3.83-.134-5.285 0z\"\n              style=\"opacity:1;fill:#818181;stroke:#000;stroke-width:.820282;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none\"\n              id=\"path1362\"\n              transform=\"translate(-104.775 -1.593)\"></path>\n            <path\n              d=\"M1278.84 42.895c-10.766.995-23.561 8.915-25.718 23.89-.208 1.446-.333 3.815-.392 5.275-.088 2.176-.13 4.362-.126 6.551 0 1.462 1.201 2.829 2.648 3.033 17.986 2.54 34.153 2.362 52.46-.128 1.447-.197 2.645-1.554 2.647-3.016 0-2.152-.042-4.301-.128-6.44-.059-1.46-.182-3.83-.39-5.275-2.157-14.975-14.95-22.895-25.716-23.89-1.454-.134-3.83-.134-5.285 0z\"\n              style=\"opacity:1;fill:#000;stroke:none;stroke-width:.512676;stroke-linecap:round;stroke-linejoin:round\"\n              id=\"path1363\"\n              transform=\"matrix(.77418 0 0 .77418 184.61 12.664)\"></path>\n            <path\n              style=\"fill:none;fill-opacity:1;stroke:#000;stroke-width:.93102;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n              d=\"M166.687-90.618a2.357 2.357 0 0 1 1.46-1.346 2.353 2.353 0 0 1 1.868.213\"\n              id=\"path1364\"\n              transform=\"matrix(-1.3041 .097 .097 1.3041 1419.543 144.375)\"></path>\n            <path\n              style=\"fill:none;fill-opacity:1;stroke:#000;stroke-width:.93102;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n              d=\"M190.906-88.08a2.357 2.357 0 0 0-1.148-1.62 2.353 2.353 0 0 0-1.871-.18\"\n              id=\"path1365\"\n              transform=\"scale(-1.3077 1.3077) rotate(-10.341 313.312 5934.297)\"></path>\n            <path\n              style=\"fill:#000;fill-opacity:1;stroke:none;stroke-width:.83244;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n              d=\"M1315.199 89.01c1.452-1.584 3.407-3.799 4.933-5.453.99-1.073 2.747-2.735 4.12-3.202 1.26-.429 3.633-.2 4.881.54l.087.054c1.217.752 2.158 3.174 1.838 4.582-.028.121-.06.238-.096.35-.44 1.378-2.288 3.054-3.57 3.746l-.05.027c-1.288.688-3.513 1.52-4.874 2.053-1.15.45-2.503.977-3.642 1.421-1.361.531-3.278.1-4.281-.962-.659-.697-.333-2.08.653-3.156zm-66.79 0c-1.452-1.584-3.407-3.799-4.933-5.453-.99-1.073-2.746-2.735-4.12-3.202-1.26-.429-3.633-.2-4.881.54a8.599 8.599 0 0 0-.087.054c-1.217.752-2.158 3.174-1.837 4.582.027.121.06.238.096.35.44 1.378 2.287 3.054 3.568 3.746l.051.027c1.289.688 3.514 1.52 4.875 2.053l3.64 1.421c1.362.531 3.28.1 4.283-.962.658-.697.333-2.08-.654-3.156z\"\n              id=\"path1366\"\n              transform=\"translate(-105.092 -.005)\"></path>\n            <path\n              style=\"fill:#000;fill-opacity:1;stroke:none;stroke-width:.919646;stroke-linecap:butt;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n              d=\"M1271.07 146.141c-3.675 2.376-9.03 3.945-10.155 4.426-1.562.67-2.813 3.185-1.762 4.633 1.344 1.851 3.156 1.596 4.315 1.292 1.158-.304 5.561-3.075 5.561-3.075s-2.855 2.678-3.433 3.886c-.577 1.208-1.068 2.548 1.133 3.948 5.11 2.194 9.556-8.733 12.049-11.255l.133-.157c1.36-1.604 1.086-3.632-.755-4.655-2.157-1.198-5.018-.38-7.086.957z\"\n              id=\"path1367\"\n              transform=\"translate(-102.353 -6.489)\"></path>\n            <g\n              id=\"g1377\"\n              transform=\"matrix(.62255 0 0 .62255 403.618 .988)\"\n              style=\"display:inline;opacity:.5;mix-blend-mode:normal;stroke:#8ed0c3;stroke-width:.809678;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter66-4-6-0)\">\n              <g\n                id=\"g1367\"\n                transform=\"scale(-3.20805 3.20805) rotate(-5.092 -707.12 4046.622)\"\n                style=\"fill:none;stroke:#8ed0c3;stroke-width:.25239;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1\">\n                <ellipse\n                  ry=\"3.16\"\n                  rx=\"3.576\"\n                  cy=\"75.12\"\n                  cx=\"-34.974\"\n                  id=\"ellipse1367\"\n                  style=\"fill:none;fill-opacity:1;stroke:#8ed0c3;stroke-width:.252391;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1\"\n                  transform=\"matrix(.99946 -.03271 .03774 .99929 0 0)\"></ellipse>\n              </g>\n              <g\n                id=\"g1372\"\n                transform=\"matrix(-2.846 .09218 .09218 2.846 1304.402 -68.829)\"\n                style=\"stroke:#8ed0c3;stroke-width:.284349;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1\">\n                <g\n                  transform=\"matrix(-.80864 -.0407 -.04101 .80568 801.15 104.048)\"\n                  style=\"stroke:#8ed0c3;stroke-width:.351832;stroke-linecap:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                  id=\"g1371\">\n                  <g\n                    id=\"g1370\"\n                    style=\"stroke:#8ed0c3;stroke-width:.351832;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1\">\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.351832;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M956.328-6.287c-2.174.008-3.902.727-5.254 1.625\"\n                      id=\"path1368\"></path>\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.351832;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M957.005-5.389c-2.45.31-3.99 1.667-5.216 3\"\n                      id=\"path1369\"></path>\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.351832;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M957.146-4.45c-2.038.806-3.038 2.468-3.8 4.057\"\n                      id=\"path1370\"></path>\n                  </g>\n                </g>\n              </g>\n              <g\n                id=\"g1376\"\n                transform=\"rotate(7.875 1141.849 8667.541) scale(2.84749)\"\n                style=\"stroke:#8ed0c3;stroke-width:.284349;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1\">\n                <g\n                  transform=\"matrix(-.80864 -.0407 -.04101 .80568 801.15 104.048)\"\n                  style=\"stroke:#8ed0c3;stroke-width:.351832;stroke-linecap:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                  id=\"g1375\">\n                  <g\n                    id=\"g1374\"\n                    style=\"stroke:#8ed0c3;stroke-width:.351832;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1\">\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.351832;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M956.328-6.287c-2.174.008-3.902.727-5.254 1.625\"\n                      id=\"path1372\"></path>\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.351832;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M957.005-5.389c-2.45.31-3.99 1.667-5.216 3\"\n                      id=\"path1373\"></path>\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.351832;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M957.146-4.45c-2.038.806-3.038 2.468-3.8 4.057\"\n                      id=\"path1374\"></path>\n                  </g>\n                </g>\n              </g>\n              <path\n                id=\"path1376\"\n                style=\"fill:none;fill-opacity:1;stroke:#8ed0c3;stroke-width:.809678;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                d=\"M1245.914 100.242c-.303-.775-.33-2.132-1.21-2.324l-2.62-.571-2.617.594c-.878.2-.895 1.556-1.19 2.334-.304.778 3.831 3.283 3.831 3.283s4.11-2.54 3.806-3.316z\"\n              ></path>\n              <path\n                id=\"path1377\"\n                d=\"M1241.642 104.456a2.154 2.154 0 0 1-.95.817 2.428 2.428 0 0 1-1.333.16 2.769 2.769 0 0 1-.905-.315l.001.048a3.626 3.626 0 0 0 7.252 0v-.042c-.28.152-.58.261-.894.309a2.427 2.427 0 0 1-1.334-.16 2.157 2.157 0 0 1-.95-.816c-.592-1.46-.483-1.076-.886 0z\"\n                style=\"fill:none;fill-opacity:1;stroke:#8ed0c3;stroke-width:.809678;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n              ></path>\n            </g>\n            <g\n              id=\"g1383\"\n              style=\"display:inline;opacity:.5;fill:#8ed0c3;fill-opacity:1;stroke-width:1.02535;filter:url(#filter55-7-0-8)\"\n              transform=\"translate(-.75)\">\n              <g\n                style=\"fill:#8ed0c3;fill-opacity:1;stroke-width:.715752;stroke-dasharray:none\"\n                id=\"g1380\"\n                transform=\"matrix(-1.54622 -.71 -.70963 1.54543 2966.23 1095.6)\">\n                <g\n                  id=\"g1379\"\n                  transform=\"matrix(-.99938 .03515 .03513 .99938 0 0)\"\n                  style=\"fill:#8ed0c3;fill-opacity:1;stroke-width:.715752\">\n                  <path\n                    style=\"color:#000;fill:#8ed0c3;fill-opacity:1;stroke-width:.715752;stroke-linecap:square;-inkscape-stroke:none\"\n                    d=\"M-1204.476-75.636a1.87 1.943 0 0 1-1.87 1.943 1.87 1.943 0 0 1-1.87-1.943 1.87 1.943 0 0 1 1.87-1.943 1.87 1.943 0 0 1 1.87 1.943z\"\n                    id=\"path1378\"></path>\n                  <path\n                    style=\"color:#000;fill:#8ed0c3;fill-opacity:1;stroke-width:.715752;stroke-linecap:square;-inkscape-stroke:none\"\n                    d=\"M-1206.346-77.928c-1.228 0-2.219 1.037-2.219 2.291 0 1.255.99 2.293 2.22 2.293 1.228 0 2.218-1.038 2.218-2.293 0-1.254-.99-2.29-2.219-2.29zm0 .698c.837 0 1.52.702 1.52 1.593 0 .892-.683 1.596-1.52 1.596-.836 0-1.521-.704-1.521-1.596 0-.891.685-1.593 1.521-1.593z\"\n                    id=\"path1379\"></path>\n                </g>\n              </g>\n              <g\n                style=\"fill:#8ed0c3;fill-opacity:1;stroke-width:.715752;stroke-dasharray:none\"\n                id=\"g1382\"\n                transform=\"matrix(-1.54622 -.71 -.70963 1.54543 2922.446 1086.631)\">\n                <g id=\"g1381\" transform=\"rotate(-1.455)\" style=\"fill:#8ed0c3;fill-opacity:1;stroke-width:.715752\">\n                  <path\n                    style=\"color:#000;fill:#8ed0c3;fill-opacity:1;stroke-width:.715752;stroke-linecap:square;-inkscape-stroke:none\"\n                    d=\"M1195.169-87.545a1.87 1.943 0 0 1-1.87 1.943 1.87 1.943 0 0 1-1.87-1.943 1.87 1.943 0 0 1 1.87-1.944 1.87 1.943 0 0 1 1.87 1.944z\"\n                    id=\"path1380\"></path>\n                  <path\n                    style=\"color:#000;fill:#8ed0c3;fill-opacity:1;stroke-width:.715752;stroke-linecap:square;-inkscape-stroke:none\"\n                    d=\"M1193.299-89.838c-1.229 0-2.219 1.038-2.219 2.293 0 1.255.99 2.291 2.219 2.291 1.228 0 2.219-1.036 2.219-2.29 0-1.256-.99-2.294-2.22-2.294zm0 .7c.836 0 1.521.701 1.521 1.593s-.685 1.594-1.521 1.594c-.837 0-1.52-.702-1.52-1.594 0-.892.683-1.594 1.52-1.594z\"\n                    id=\"path1381\"></path>\n                </g>\n              </g>\n            </g>\n            <g\n              id=\"g1393\"\n              transform=\"matrix(.62255 0 0 .62255 403.618 .988)\"\n              style=\"display:inline;stroke:#8ed0c3;stroke-width:.485807;stroke-dasharray:none;stroke-opacity:1\">\n              <g\n                id=\"g1384\"\n                transform=\"scale(-3.20805 3.20805) rotate(-5.092 -707.12 4046.622)\"\n                style=\"stroke:#8ed0c3;stroke-width:.151434;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1\">\n                <ellipse\n                  ry=\"3.16\"\n                  rx=\"3.576\"\n                  cy=\"75.12\"\n                  cx=\"-34.974\"\n                  id=\"ellipse1383\"\n                  style=\"fill:none;fill-opacity:1;stroke:#8ed0c3;stroke-width:.151435;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1\"\n                  transform=\"matrix(.99946 -.03271 .03774 .99929 0 0)\"></ellipse>\n              </g>\n              <g\n                id=\"g1388\"\n                transform=\"matrix(-2.846 .09218 .09218 2.846 1304.402 -68.829)\"\n                style=\"stroke:#8ed0c3;stroke-width:.17061;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1\">\n                <g\n                  transform=\"matrix(-.80864 -.0407 -.04101 .80568 801.15 104.048)\"\n                  style=\"stroke:#8ed0c3;stroke-width:.211099;stroke-linecap:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                  id=\"g1387\">\n                  <g\n                    id=\"g1386\"\n                    style=\"stroke:#8ed0c3;stroke-width:.211099;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1\">\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.211099;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M956.483-6.275c-2.174.009-4.057.715-5.409 1.613\"\n                      id=\"path1384\"></path>\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.211099;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M956.87-5.4c-2.45.31-3.856 1.678-5.08 3.01\"\n                      id=\"path1385\"></path>\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.211099;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M957.28-4.527c-2.037.807-3.172 2.545-3.934 4.134\"\n                      id=\"path1386\"></path>\n                  </g>\n                </g>\n              </g>\n              <g\n                id=\"g1392\"\n                transform=\"rotate(7.875 1141.849 8667.541) scale(2.84749)\"\n                style=\"stroke:#8ed0c3;stroke-width:.17061;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1\">\n                <g\n                  transform=\"matrix(-.80864 -.0407 -.04101 .80568 801.15 104.048)\"\n                  style=\"stroke:#8ed0c3;stroke-width:.211099;stroke-linecap:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                  id=\"g1391\">\n                  <g\n                    id=\"g1390\"\n                    style=\"stroke:#8ed0c3;stroke-width:.211099;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1\">\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.211099;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M956.601-6.365c-2.175.008-4.175.805-5.527 1.703\"\n                      id=\"path1388\"></path>\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.211099;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M957.005-5.389c-2.45.31-3.99 1.667-5.216 3\"\n                      id=\"path1389\"></path>\n                    <path\n                      style=\"fill:none;fill-rule:evenodd;stroke:#8ed0c3;stroke-width:.211099;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M957.146-4.45c-2.038.806-3.038 2.468-3.8 4.057\"\n                      id=\"path1390\"></path>\n                  </g>\n                </g>\n              </g>\n              <path\n                id=\"path1392\"\n                style=\"fill:none;fill-opacity:1;stroke:#8ed0c3;stroke-width:.485807;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                d=\"M1245.914 100.242c-.303-.775-.33-2.132-1.21-2.324l-2.62-.571-2.617.594c-.878.2-.895 1.556-1.19 2.334-.304.778 3.831 3.283 3.831 3.283s4.11-2.54 3.806-3.316z\"\n              ></path>\n              <path\n                id=\"path1393\"\n                d=\"M1241.642 104.456a2.154 2.154 0 0 1-.95.817 2.428 2.428 0 0 1-1.333.16 2.769 2.769 0 0 1-.905-.315l.001.048a3.626 3.626 0 0 0 7.252 0v-.042c-.28.152-.58.261-.894.309a2.427 2.427 0 0 1-1.334-.16 2.157 2.157 0 0 1-.95-.816c-.592-1.46-.483-1.076-.886 0z\"\n                style=\"fill:none;fill-opacity:1;stroke:#8ed0c3;stroke-width:.485807;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n              ></path>\n            </g>\n            <g id=\"g1398\" transform=\"translate(-.85)\" style=\"display:inline;stroke-width:1.02535\">\n              <g\n                style=\"fill:#3c8d47;fill-opacity:1;stroke-width:.715752;stroke-dasharray:none\"\n                id=\"g1395\"\n                transform=\"matrix(-1.54622 -.71 -.70963 1.54543 2966.429 1095.6)\">\n                <g\n                  id=\"g1394\"\n                  transform=\"matrix(-.99938 .03515 .03513 .99938 0 0)\"\n                  style=\"fill:#3c8d47;fill-opacity:1;stroke-width:.715752\">\n                  <path\n                    style=\"color:#000;fill:#8ed0c3;fill-opacity:1;stroke-width:.715752;stroke-linecap:square;-inkscape-stroke:none\"\n                    d=\"M-1204.476-75.636a1.87 1.943 0 0 1-1.87 1.943 1.87 1.943 0 0 1-1.87-1.943 1.87 1.943 0 0 1 1.87-1.943 1.87 1.943 0 0 1 1.87 1.943z\"\n                    id=\"path1394\"></path>\n                </g>\n              </g>\n              <g\n                style=\"stroke-width:.715752;stroke-dasharray:none\"\n                id=\"g1397\"\n                transform=\"matrix(-1.54622 -.71 -.70963 1.54543 2922.446 1086.631)\">\n                <g id=\"g1396\" transform=\"rotate(-1.455)\" style=\"fill:#3d6f1d;stroke-width:.715752\">\n                  <path\n                    style=\"color:#000;fill:#8ed0c3;fill-opacity:1;stroke-width:.715752;stroke-linecap:square;-inkscape-stroke:none\"\n                    d=\"M1195.169-87.545a1.87 1.943 0 0 1-1.87 1.943 1.87 1.943 0 0 1-1.87-1.943 1.87 1.943 0 0 1 1.87-1.944 1.87 1.943 0 0 1 1.87 1.944z\"\n                    id=\"path1395\"></path>\n                </g>\n              </g>\n            </g>\n            <g\n              id=\"g1399\"\n              clip-path=\"url(#clipPath87-3-1-8)\"\n              style=\"stroke:#464649;stroke-width:1.02535;stroke-opacity:1\">\n              <path\n                style=\"opacity:.5;fill:none;fill-opacity:1;stroke:#464649;stroke-width:.820282;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n                d=\"M1169.605 85.022V99.49h13.788v32.794\"\n                id=\"path1398\"></path>\n              <path\n                style=\"opacity:.5;fill:none;fill-opacity:1;stroke:#464649;stroke-width:.820279;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"path1399\"\n                transform=\"rotate(21.744)\"\n                d=\"M1161.978-341.29c0 10.246-3.341 18.551-7.463 18.551s-7.464-8.305-7.464-18.551c0-10.246 3.342-18.551 7.464-18.551s7.463 8.305 7.463 18.551zm-54.107 21.402c7.008 7.473 15.127 11.246 18.134 8.426 3.007-2.82-.238-11.164-7.246-18.637-7.009-7.473-15.128-11.246-18.134-8.426-3.007 2.82.237 11.163 7.246 18.637z\"\n              ></path>\n            </g>\n            <g id=\"g1406\" style=\"fill:#464649;fill-opacity:1;stroke-width:1.02535\">\n              <circle\n                style=\"opacity:1;fill:#464649;fill-opacity:1;stroke:none;stroke-width:.820279;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"circle1399\"\n                cx=\"1180.559\"\n                cy=\"101.561\"\n                r=\".856\"></circle>\n              <circle\n                style=\"opacity:1;fill:#464649;fill-opacity:1;stroke:none;stroke-width:.820279;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"circle1400\"\n                cx=\"1200.679\"\n                cy=\"97.079\"\n                r=\".856\"></circle>\n              <circle\n                style=\"opacity:1;fill:#464649;fill-opacity:1;stroke:none;stroke-width:.820279;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"circle1401\"\n                cx=\"1198.823\"\n                cy=\"99.353\"\n                r=\".856\"></circle>\n              <circle\n                style=\"fill:#464649;fill-opacity:1;stroke:none;stroke-width:.820279;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"circle1402\"\n                cx=\"1046.988\"\n                cy=\"519.068\"\n                r=\".856\"\n                transform=\"scale(1 -1) rotate(-32.287)\"></circle>\n              <circle\n                style=\"fill:#464649;fill-opacity:1;stroke:none;stroke-width:.820279;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"circle1403\"\n                cx=\"1045.132\"\n                cy=\"521.341\"\n                r=\".856\"\n                transform=\"scale(1 -1) rotate(-32.287)\"></circle>\n              <circle\n                style=\"opacity:1;fill:#464649;fill-opacity:1;stroke:none;stroke-width:.820279;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"circle1404\"\n                cx=\"1175.9\"\n                cy=\"235.939\"\n                r=\".856\"\n                transform=\"rotate(-7.654)\"></circle>\n              <circle\n                style=\"opacity:1;fill:#464649;fill-opacity:1;stroke:none;stroke-width:.820279;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"circle1405\"\n                cx=\"1172.407\"\n                cy=\"235.939\"\n                r=\".856\"\n                transform=\"rotate(-7.654)\"></circle>\n              <circle\n                style=\"opacity:1;fill:#464649;fill-opacity:1;stroke:none;stroke-width:.820279;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"circle1406\"\n                cx=\"1177.957\"\n                cy=\"101.561\"\n                r=\".856\"></circle>\n            </g>\n            <path\n              d=\"M1307.43 87.858c-17.935 2.345-34.17 2.518-51.888.133-1.448-.195-2.54.812-2.403 2.267.96 10.254 3.313 20.274 7.821 29.267.655 1.306 1.806 3.378 2.55 4.636 3.712 6.278 8.899 13.221 15.351 14.755 1.419.337 3.825.337 5.243 0 6.453-1.534 11.639-8.477 15.351-14.755.744-1.258 1.895-3.33 2.55-4.636 4.525-9.028 6.879-19.09 7.832-29.387.135-1.455-.959-2.47-2.408-2.28z\"\n              style=\"opacity:1;fill:none;stroke:#000;stroke-width:.820282;stroke-dasharray:none;paint-order:normal\"\n              id=\"path1406\"\n              transform=\"translate(-104.775 -4.768)\"></path>\n          </g>\n        </g>\n        <path\n          style=\"color:#000;opacity:1;mix-blend-mode:screen;fill:#40378d;fill-opacity:1;stroke-width:1.00001;stroke-linejoin:bevel;-inkscape-stroke:none\"\n          d=\"M2138.33 102.752a.622.622 0 0 0-.624.618c0 .34.28.616.624.616a.622.622 0 0 0 .625-.616.623.623 0 0 0-.625-.618zm0 .159c.26 0 .467.205.467.459a.462.462 0 0 1-.467.459.461.461 0 0 1-.466-.46c0-.253.206-.458.466-.458z\"\n          id=\"path1408\"></path>\n        <g\n          id=\"g1427\"\n          transform=\"matrix(-1.35229 0 0 .99916 1798.116 -20.483)\"\n          style=\"opacity:.23;mix-blend-mode:normal;fill:#35569c;fill-opacity:1;stroke:none\"\n          clip-path=\"url(#clipPath190)\">\n          <path\n            id=\"path1409\"\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.794;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-311.086 111.971-10.816 12.2-1.218 7.265 6.546 6.77.943.785-7.721 6.11.486 19.768 3.001 10.12.167 4.205c3.608-.496 6.424-2.188 7.699-6.213 1.917 8.247 9.755 2.497 11.635-3.023 4.973 4.558 9.313 4.556 15.137.21 6.322 3.634 12.436 8.504 20.85-.297 0 0 4.58 4.749 9.01 3.524 2.813-.778 3.407-6.12 3.425-6.285-.007.104.07.588 1.44 2.45 1.609 2.189 3.92 2.783 3.92 2.783s1.442-15.852 1.85-15.84c17.784.533-2.017-8.924-4.212-12.04-3.517-4.994-8.46-7.683-13.216-9.055-3.166-.913-6.833 3.025-9.66.704-1.59-1.304-1.504-5.031-2.702-7.066-3.396-5.764-12.303-13.858-12.303-13.858l-6.37-3.151z\"\n          ></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-246.802 172.377-2.565-12.413 4.283-3.347z\"\n            id=\"path1410\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-279.352 161.673 29.99-1.705 2.632 12.42-5.069-4.707-2.507 5.711-4.526.16-5.568-3.786-5.497 4.383c-2.614.83-5.217.682-7.816.192l-7.567-4.109-3.634 1.905z\"\n            id=\"path1411\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-291.653 165.266 12.482-3.614-2.078 10.847-3.786-2.417-3.87 2.52z\"\n            id=\"path1412\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"M-316.62 142.818c.257-.635 5.291-4.128 5.291-4.128l-3.952.285-3.083 2.582z\"\n            id=\"path1413\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.55;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"M-318.429 141.447c.827.276 9.322 6.046 9.322 6.046l.783 5.567 10.396 9.932 6.655 2.4 1.21 2.892\"\n            id=\"path1414\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-319.468 178.959-.04-3.976-2.994-10.204-.44-19.515 4.622-3.724 8.994 6.142.734 5.547 10.525 9.934 6.656 2.47 2.586 6.555c-1.768.941-3.78 1.093-5.875.972l-5.568-3.038c-1.72 2.421-3.272 4.995-5.864 6.622l-3.89-.007-1.914-3.693-3.352 4.915z\"\n            id=\"path1415\"></path>\n          <path\n            id=\"path1416\"\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"M-277.427 138.199c-.187.007-.37.03-.546.074-8.224 2.064-16.392 3.047-20.683 2.954a10.807 10.807 0 0 0-1.032-.953c-.943-.753-2.519-1.353-3.073-.477-.364.574 1.697 1.468 1.145 1.688-2.145.853-3.787.37-3.066 1.648 1.518 2.692 8.276-1.766 20.452 3.946 7.716 2.63 22.625 7.614 25.732-1.279-.637-3.025-2.646-5.599-4.497-6.982l-1.72.975c-2.825-.684-5.512-.907-8.697-.89-1.189.006-2.708-.752-4.015-.704z\"\n          ></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.465;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-279.205 162.062-1.832 10.69\"\n            id=\"path1417\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-322.694 131.356 1.015-6.962 10.577-12.054.153 6.218c-5.457 12.314-6.287 10.61-11.745 12.798z\"\n            id=\"path1418\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-310.873 118.38 23.76-2.869-6.28-3.152-17.674.03z\"\n            id=\"path1419\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-310.61 118.583 1.97 8.438-2.627 11.588-4.042.378-7.383-7.675s1.947-.822 4.32-2.132c.84-.464 1.866-.693 2.307-1.324.888-1.272 1.764-4.22 2.952-6.304 1.07-1.876 2.503-2.97 2.503-2.97z\"\n            id=\"path1420\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.55;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"M-322.105 131.182s1.71-.883 3.723-2.138c2.33-1.047 2.55-1.353 3.335-2.432.817-1.126 1.728-4.973 1.728-4.973l2.446-3.356-.227-6.195\"\n            id=\"path1421\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-308.55 147.14 59.716-2.502 4.07 12.08-4.789 3.758-30.382 3.39-11.657 1.394-6.194-2.205-10.325-10.04z\"\n            id=\"path1422\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-256.192 160.345 6.633-.575 3.585-2.366\"\n            id=\"path1423\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.4;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-255.01 165.328 2.137-1.193.752-2.056 3.046-.382-.49-1.798\"\n            id=\"path1424\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.465;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-309.225 147.492 10.544.624\"\n            id=\"path1425\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.465;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-284.8 163.3 5.48-1.375 15.05-1.008\"\n            id=\"path1426\"></path>\n          <path\n            style=\"fill:#35569c;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"M-270.516 146.783c.02-.002 6.705 6.235 2.674 7.327-1.705.463-6.85-5.713-6.85-5.713z\"\n            id=\"path1427\"></path>\n        </g>\n        <g\n          id=\"g1429\"\n          transform=\"matrix(.19471 0 0 .19526 2128.6 79.937)\"\n          style=\"opacity:1;mix-blend-mode:screen;fill:#40378d;fill-opacity:1;stroke:none;stroke-width:1.02535\">\n          <path\n            style=\"color:#000;opacity:.885304;fill:#40378d;fill-opacity:1;stroke-width:1.02535;stroke-linejoin:bevel;-inkscape-stroke:none\"\n            d=\"M457.455 520.738c-2.74 0-4.976 2.196-4.976 4.903 0 2.706 2.235 4.902 4.976 4.902 2.74 0 4.979-2.196 4.979-4.902 0-2.707-2.238-4.903-4.979-4.903zm0 .809c2.311 0 4.17 1.832 4.17 4.094 0 2.262-1.859 4.093-4.17 4.093s-4.17-1.831-4.17-4.093 1.859-4.094 4.17-4.094z\"\n            id=\"path1428\"\n            transform=\"translate(-402.75 -417.176)\"></path>\n          <path\n            style=\"fill:#40378d;fill-opacity:1;stroke:none;stroke-width:.271289px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"M56.342 106.57s.709.97.85 1.532c.16.64.18.77.6.36.388-.375.653-1.16-.242-1.828-.474-.354-1.208-.064-1.208-.064z\"\n            id=\"path1429\"></path>\n        </g>\n        <path\n          style=\"color:#000;opacity:1;mix-blend-mode:screen;fill:#40378d;fill-opacity:1;stroke-width:1.00001;stroke-linejoin:bevel;-inkscape-stroke:none\"\n          d=\"M2119.724 123.523c.333 0 .607.267.607.597 0 .33-.274.598-.607.598a.605.605 0 0 1-.608-.598c0-.33.276-.597.608-.597zm0 .25a.352.352 0 0 0-.36.347c0 .192.16.349.36.349a.352.352 0 0 0 .359-.349.351.351 0 0 0-.359-.348z\"\n          id=\"path1430\"></path>\n        <g\n          id=\"g1432\"\n          transform=\"matrix(-.30872 0 0 .30959 2126.005 91.526)\"\n          style=\"opacity:1;mix-blend-mode:screen;fill:#40378d;fill-opacity:1;stroke:none;stroke-width:1.02535\">\n          <path\n            style=\"color:#000;opacity:.885304;fill:#40378d;fill-opacity:1;stroke-width:1.02535;stroke-linejoin:bevel;-inkscape-stroke:none\"\n            d=\"M26.047 105.768c-1.86 0-3.385 1.485-3.385 3.318s1.525 3.318 3.385 3.318c1.86 0 3.387-1.485 3.387-3.318s-1.527-3.318-3.387-3.318zm0 .808c1.433 0 2.578 1.123 2.578 2.51 0 1.386-1.145 2.512-2.578 2.512-1.434 0-2.578-1.126-2.578-2.512 0-1.387 1.144-2.51 2.578-2.51z\"\n            id=\"path1431\"></path>\n          <path\n            style=\"fill:#40378d;fill-opacity:1;stroke:none;stroke-width:.271289px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"M27.114 107.859s.463.628.555.993c.104.414.116.499.391.234.253-.244.426-.753-.158-1.186-.309-.229-.788-.041-.788-.041z\"\n            id=\"path1432\"></path>\n        </g>\n        <g\n          id=\"g1434\"\n          transform=\"matrix(-.26035 0 0 .26108 2166.303 79.589)\"\n          style=\"opacity:1;mix-blend-mode:screen;fill:#40378d;fill-opacity:1;stroke:none;stroke-width:1.02535\">\n          <path\n            style=\"color:#000;opacity:.885304;fill:#40378d;fill-opacity:1;stroke-width:1.02535;stroke-linejoin:bevel;-inkscape-stroke:none\"\n            d=\"M457.455 520.738c-2.74 0-4.976 2.197-4.976 4.903 0 2.705 2.236 4.9 4.976 4.9 2.74 0 4.979-2.195 4.979-4.9 0-2.706-2.238-4.903-4.979-4.903zm0 .809c2.312 0 4.172 1.831 4.172 4.094 0 2.262-1.86 4.093-4.172 4.093-2.312 0-4.17-1.831-4.17-4.093 0-2.263 1.858-4.094 4.17-4.094z\"\n            id=\"path1433\"\n            transform=\"translate(-402.75 -417.176)\"></path>\n          <path\n            style=\"fill:#40378d;fill-opacity:1;stroke:none;stroke-width:.272328;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"M56.342 106.57s.709.97.85 1.532c.16.64.18.77.6.36.388-.375.653-1.16-.242-1.828-.474-.354-1.208-.064-1.208-.064z\"\n            id=\"path1434\"></path>\n        </g>\n        <g\n          id=\"g1451\"\n          transform=\"matrix(-2.1191 .19951 .19343 -.99089 1533.121 348.217)\"\n          style=\"display:inline;opacity:1;mix-blend-mode:normal;fill:#251f5b;fill-opacity:1;stroke:none\"\n          clip-path=\"url(#clipPath189)\">\n          <path\n            id=\"path1435\"\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.794;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-311.235 110.34-10.816 12.2-1.218 7.264 6.546 6.77.943.785-7.721 6.11.486 19.768 3.001 10.12.167 4.205c3.608-.496 6.424-2.189 7.699-6.213 1.917 8.247 9.755 2.497 11.635-3.023 4.973 4.558 9.313 4.556 15.137.21 6.322 3.634 12.436 8.504 20.85-.297 0 0 4.58 4.749 9.01 3.523 2.813-.778 3.407-6.12 3.425-6.284-.007.104.07.588 1.44 2.45 1.609 2.189 3.92 2.783 3.92 2.783s1.966-15.252 1.85-15.84c-.116-.587-4.212-12.04-4.212-12.04l-13.216-9.055-9.66.704c-.625-2.126-1.78-6.403-2.702-7.066-2.305-1.995-12.303-13.859-12.303-13.859l-6.37-3.15z\"\n          ></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-291.685 164.915 12.482-3.615s-.346 9.444-2.078 10.847c-1.036.84-3.786-2.416-3.786-2.416l-3.87 2.52z\"\n            id=\"path1436\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"M-316.62 142.818c.257-.635 5.291-4.128 5.291-4.128l-3.952.285-3.083 2.582z\"\n            id=\"path1437\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.55;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"M-318.429 141.447c.827.276 9.322 6.046 9.322 6.046l.783 5.567 10.396 9.932 6.655 2.4 1.21 2.892\"\n            id=\"path1438\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-319.468 178.959-.04-3.976-2.994-10.204-.44-19.515 4.622-3.724 8.994 6.142.734 5.547 10.525 9.934 6.656 2.47 2.586 6.555c-1.768.941-3.821 1.386-5.875.972-1.822-.368-5.568-3.038-5.568-3.038-1.72 2.421-4.568 5.809-5.864 6.622-1.296.814-3.89-.007-3.89-.007l-1.914-3.693-3.352 4.915z\"\n            id=\"path1439\"></path>\n          <path\n            id=\"path1440\"\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"M-277.427 138.199c-.187.007-.37.03-.546.074-8.224 2.064-16.392 3.047-20.683 2.954a10.807 10.807 0 0 0-1.032-.953c-.943-.753-2.519-1.353-3.073-.477-.364.574 1.697 1.468 1.145 1.688-2.145.853-3.787.37-3.066 1.648 1.518 2.692 8.276-1.766 20.452 3.946 7.716 2.63 22.625 7.614 25.732-1.279-.637-3.025-2.646-5.599-4.497-6.982l-1.72.975c-2.825-.684-5.512-.907-8.697-.89-1.189.006-2.708-.752-4.015-.704z\"\n          ></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.465;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-279.205 162.062-1.832 10.69\"\n            id=\"path1441\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-322.694 131.356 1.015-6.962 10.577-12.054.153 6.218c-5.457 12.314-6.287 10.61-11.745 12.798z\"\n            id=\"path1442\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-310.873 118.38 23.76-2.869-6.28-3.152-17.674.03z\"\n            id=\"path1443\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-310.61 118.583 1.97 8.438-2.627 11.588-4.042.378-7.383-7.675s1.947-.822 4.32-2.132c.84-.464 1.866-.693 2.307-1.324.888-1.272 1.764-4.22 2.952-6.304 1.07-1.876 2.503-2.97 2.503-2.97z\"\n            id=\"path1444\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.55;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"M-322.105 131.182s1.71-.883 3.723-2.138c2.33-1.047 2.55-1.353 3.335-2.432.817-1.126 1.728-4.973 1.728-4.973l2.446-3.356-.227-6.195\"\n            id=\"path1445\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"m-308.55 147.14 59.716-2.502 4.07 12.08-4.789 3.758-30.382 3.39-11.657 1.394-6.194-2.205-10.325-10.04z\"\n            id=\"path1446\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-256.192 160.345 6.633-.575 3.585-2.366\"\n            id=\"path1447\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.4;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-255.01 165.328 2.137-1.193.752-2.056 3.046-.382-.49-1.798\"\n            id=\"path1448\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.465;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-309.225 147.492 10.544.624\"\n            id=\"path1449\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.465;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n            d=\"m-284.8 163.3 5.48-1.375 15.05-1.008\"\n            id=\"path1450\"></path>\n          <path\n            style=\"fill:#251f5b;fill-opacity:1;stroke:none;stroke-width:.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\"\n            d=\"M-270.516 146.783c.02-.002 6.705 6.235 2.674 7.327-1.705.463-6.85-5.713-6.85-5.713z\"\n            id=\"path1451\"></path>\n        </g>\n        <g\n          id=\"g1469\"\n          transform=\"matrix(.26016 -.08442 .0844 .26022 1960.339 113.338)\"\n          style=\"opacity:.8;mix-blend-mode:normal;fill:none;stroke:#8772c7;stroke-width:.639647;stroke-dasharray:none;stroke-opacity:1\">\n          <g id=\"g1468\" style=\"fill:none;stroke:#8772c7;stroke-width:.639647;stroke-dasharray:none;stroke-opacity:1\">\n            <path\n              d=\"M587.008 150.21c-1.297-.372-2.786-.567-4.363-.559-6.308.032-14.03 3.319-16.499 11.604-.822 2.76-1.2 5.792-1.277 8.851-1.935-1.198-3.686-2.286-4.147-2.566-2.3-1.395-3.392-1.82-4.858-2.044-1.039-.159-3.213-.39-4.914 1.969-.968 1.342-.644 3.515.259 5.08s2.83 2.53 4.053 2.754c1.74.316 6.656.68 9.83.99.617 6.659 1.987 10.174 3.59 15.089 2.328 7.142 6.497 13.606 8.709 20.785.803 2.607 1.983 5.313 1.634 8.019-.451 3.49-4.038 8.483-4.667 9.47-.63.989-.367 2.91.795 3.327 1.486.534 2.42-.319 2.963-.942.543-.624 1.975-3.914 1.975-3.914s-.586 2.637-.446 3.55c.14.914.38 1.867 2.18 1.81 4.08-.798 1.182-9.327 2.947-13.626.215-.525.426-.657.913-1.436.919-1.47 5.306-8.804 7.165-13.474 1.294-3.249 5.595-13.425 7.383-19.372 1.406.804 2.638 1.51 2.968 1.695 1.952 1.094 2.873 1.417 4.097 1.56.868.103 2.68.233 4.02-1.778.764-1.145.431-2.944-.366-4.22-.798-1.275-2.426-2.022-3.45-2.172-1.134-.166-3.845-.3-6.262-.43.452-5.527.347-15.333-2.747-20.858-2.52-4.499-5.037-6.538-10.258-8.732a10.98 10.98 0 0 0-1.227-.43z\"\n              style=\"fill:none;stroke:#8772c7;stroke-width:.639647;stroke-dasharray:none;stroke-opacity:1\"\n              id=\"path1452\"></path>\n            <g\n              id=\"g1467\"\n              transform=\"translate(207.386 -48.536) scale(.40802)\"\n              style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:1.56768;stroke-dasharray:none;stroke-opacity:1\">\n              <g\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.666197;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"g1453\"\n                transform=\"matrix(-2.13906 -.9822 -.9817 2.13796 3394.704 1939.962)\">\n                <ellipse\n                  transform=\"matrix(-.99938 .03515 .03513 .99938 0 0)\"\n                  style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.666196;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                  id=\"ellipse1452\"\n                  cx=\"-1206.346\"\n                  cy=\"-75.636\"\n                  rx=\"1.87\"\n                  ry=\"1.943\"></ellipse>\n                <ellipse\n                  transform=\"rotate(-2.014) skewX(-.001)\"\n                  ry=\".968\"\n                  rx=\".919\"\n                  cy=\"-76.183\"\n                  cx=\"1206.915\"\n                  id=\"ellipse1453\"\n                  style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.666196;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                ></ellipse>\n              </g>\n              <g\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.666197;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"g1455\"\n                transform=\"matrix(-2.13906 -.9822 -.9817 2.13796 3334.132 1926.304)\">\n                <ellipse\n                  ry=\"1.943\"\n                  rx=\"1.87\"\n                  cy=\"-87.545\"\n                  cx=\"1193.299\"\n                  id=\"ellipse1454\"\n                  style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.666196;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                  transform=\"rotate(-1.455)\"></ellipse>\n                <ellipse\n                  style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.666196;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                  id=\"ellipse1455\"\n                  cx=\"-1193.962\"\n                  cy=\"-88.102\"\n                  rx=\".919\"\n                  ry=\".968\"\n                  transform=\"matrix(-.99968 .02539 .02538 .99968 0 0)\"></ellipse>\n              </g>\n              <g\n                id=\"g1456\"\n                transform=\"scale(-2.84528 2.84528) rotate(-5.092 991.483 3402.73)\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.550975;stroke-dasharray:none;stroke-opacity:1\">\n                <ellipse\n                  ry=\"3.16\"\n                  rx=\"3.576\"\n                  cy=\"75.12\"\n                  cx=\"-34.974\"\n                  id=\"ellipse1456\"\n                  style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.550979;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1\"\n                  transform=\"matrix(.99946 -.03271 .03774 .99929 0 0)\"></ellipse>\n              </g>\n              <path\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.866563;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                d=\"M166.687-90.618a2.357 2.357 0 0 1 1.46-1.346 2.353 2.353 0 0 1 1.868.213\"\n                id=\"path1456\"\n                transform=\"matrix(-1.8041 .1342 .1342 1.8041 1255.004 624.032)\"></path>\n              <path\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.866563;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                d=\"M190.906-88.08a2.357 2.357 0 0 0-1.148-1.62 2.353 2.353 0 0 0-1.871-.18\"\n                id=\"path1457\"\n                transform=\"scale(-1.80908 1.80908) rotate(-10.341 1805.23 3886.55)\"></path>\n              <ellipse\n                ry=\"3.394\"\n                rx=\"3.237\"\n                cy=\"33.848\"\n                cx=\"-1076.172\"\n                id=\"ellipse1457\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:1.56768;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1\"\n                transform=\"matrix(-.89355 -.44896 -.44896 .89355 0 0)\"></ellipse>\n              <ellipse\n                ry=\"3.008\"\n                rx=\"2.868\"\n                cy=\"58.403\"\n                cx=\"-1026.862\"\n                id=\"ellipse1458\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:1.56768;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1\"\n                transform=\"matrix(-.89355 -.44896 -.44896 .89355 0 0)\"></ellipse>\n              <g\n                id=\"g1462\"\n                transform=\"matrix(-2.52417 .08175 .08175 2.52417 975.228 360.663)\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.620742;stroke-dasharray:none;stroke-opacity:1\">\n                <g\n                  transform=\"matrix(-.80864 -.0407 -.04101 .80568 801.15 104.048)\"\n                  style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.768063;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                  id=\"g1461\">\n                  <g\n                    id=\"g1460\"\n                    style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.768063;stroke-dasharray:none;stroke-opacity:1\">\n                    <path\n                      style=\"fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#8772c7;stroke-width:.768063;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M956.381-6.31c-2.175.01-3.903.729-5.254 1.626\"\n                      id=\"path1458\"></path>\n                    <path\n                      style=\"fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#8772c7;stroke-width:.768063;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M957.005-5.389c-2.45.31-3.99 1.667-5.216 3\"\n                      id=\"path1459\"></path>\n                    <path\n                      style=\"fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#8772c7;stroke-width:.768063;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M957.146-4.45c-2.038.806-3.038 2.468-3.8 4.057\"\n                      id=\"path1460\"></path>\n                  </g>\n                </g>\n              </g>\n              <g\n                id=\"g1466\"\n                transform=\"rotate(7.875 -2141.352 6578.628) scale(2.52549)\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.620742;stroke-dasharray:none;stroke-opacity:1\">\n                <g\n                  transform=\"matrix(-.80864 -.0407 -.04101 .80568 801.15 104.048)\"\n                  style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.768063;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                  id=\"g1465\">\n                  <g\n                    id=\"g1464\"\n                    style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.768063;stroke-dasharray:none;stroke-opacity:1\">\n                    <path\n                      style=\"fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#8772c7;stroke-width:.768063;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M956.443-6.343c-2.175.009-3.903.728-5.254 1.625\"\n                      id=\"path1462\"></path>\n                    <path\n                      style=\"fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#8772c7;stroke-width:.768063;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M957.005-5.389c-2.45.31-3.99 1.667-5.216 3\"\n                      id=\"path1463\"></path>\n                    <path\n                      style=\"fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#8772c7;stroke-width:.768063;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                      d=\"M957.146-4.45c-2.038.806-3.038 2.468-3.8 4.057\"\n                      id=\"path1464\"></path>\n                  </g>\n                </g>\n              </g>\n              <path\n                id=\"path1466\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:1.56768;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n                d=\"M928.83 552.636a32.28 32.28 0 0 1 2.678 1.771c12.688 9.35 11.882 28.054 8.405 44.828-2.965 14.306-9.713 23.56-19.325 21.94-23.203-3.911-38.93-45.217-26.724-65.327 6.072-10.004 24.23-9.048 34.966-3.212z\"\n              ></path>\n              <path\n                id=\"path1467\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:1.56768;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                d=\"M923.354 510.615c-.269-.688-.292-1.891-1.073-2.061l-2.324-.507-2.321.527c-.779.176-.793 1.38-1.056 2.07-.269.69 3.399 2.912 3.399 2.912s3.644-2.254 3.375-2.941z\"\n              ></path>\n              <ellipse\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:1.56768;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"ellipse1467\"\n                cx=\"920.081\"\n                cy=\"517.049\"\n                rx=\"2.578\"\n                ry=\"2.162\"></ellipse>\n            </g>\n          </g>\n        </g>\n        <g\n          id=\"g1489\"\n          transform=\"rotate(34.046 574.497 3402.962) scale(.28648)\"\n          style=\"opacity:.8;mix-blend-mode:normal;fill:none;stroke:#8772c7;stroke-width:.610755;stroke-dasharray:none;stroke-opacity:1\">\n          <g\n            id=\"g1488\"\n            transform=\"scale(-1 1) rotate(18.751 463.95 -226.833)\"\n            style=\"fill:none;stroke:#8772c7;stroke-width:.610755;stroke-dasharray:none;stroke-opacity:1\">\n            <g\n              id=\"g1487\"\n              transform=\"translate(8.79 -613.284)\"\n              style=\"fill:none;stroke:#8772c7;stroke-width:.610755;stroke-dasharray:none;stroke-opacity:1\">\n              <path\n                d=\"M-242.621 100.867c3.39.988 5.487 2.748 6.5 4.051 1.307 1.68 2.318 3.3 2.705 6.446.386 3.147-.256 9.583-1.819 13.11-.234.53-.597 1.204-1.03 1.941 2.158.381 5.409.742 6.48.928 1.814.317 3.516 2.693 3.204 4.51-.298 1.737-2.38 3.243-4.323 3.042-2.5-.258-5.018-3.011-7.188-5.71-1.74 2.404-4.083 5.087-6.873 7.259-4.736 3.685-16.706 8.496-25.639 7.963-4.457-.266-9.402-2.89-13.715-5.19-4.897-2.613-14.325-6.209-13.57-9.652.755-3.443 4.659-.683 6.609.697 0 0-2.863-3.118-3.275-4.002-.412-.883-.74-2.445.406-3.645 1.146-1.2 2.852-.278 3.965.736 1.114 1.013 5.608 7.554 7.997 9.57 3.23 2.726 11.545-7.006 13.253-9.773 1.708-2.767 8.852-16.302 11.536-18.93 2.96-2.898 7.045-4.63 12.221-3.897.923.13 1.774.318 2.556.546z\"\n                style=\"fill:none;stroke:#8772c7;stroke-width:.610755;stroke-linecap:square;stroke-linejoin:bevel;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"path1470\"></path>\n              <path\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.643104;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1\"\n                d=\"M-52.463 274.054c2.118-2.156 9.255-5.987 13.423-4.376 4.167 1.61 5.03 5.367 5.052 7.55.038 3.838-2.02 7.176-4.658 9.53-3.179 2.835-12.667 6.653-18.09 3.737-1.354-.727-2.047-1.462-1.712-4.279\"\n                id=\"path1471\"\n                transform=\"rotate(-3.57 -2321.819 3558.763) scale(.9497)\"></path>\n              <ellipse\n                transform=\"matrix(-.99526 .09726 .09728 .99526 0 0)\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.610755;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                id=\"ellipse1471\"\n                cx=\"257.448\"\n                cy=\"82.037\"\n                rx=\"1.53\"\n                ry=\"1.589\"></ellipse>\n              <ellipse\n                transform=\"rotate(-5.582) skewX(.001)\"\n                ry=\".792\"\n                rx=\".752\"\n                cy=\"81.59\"\n                cx=\"-256.982\"\n                id=\"ellipse1472\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.610755;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n              ></ellipse>\n              <ellipse\n                ry=\"1.589\"\n                rx=\"1.53\"\n                cy=\"88.817\"\n                cx=\"-245.747\"\n                id=\"ellipse1473\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.610755;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                transform=\"rotate(-5.023) skewX(.001)\"></ellipse>\n              <ellipse\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.610755;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                id=\"ellipse1474\"\n                cx=\"246.212\"\n                cy=\"88.369\"\n                rx=\".752\"\n                ry=\".792\"\n                transform=\"matrix(-.99616 .08755 .08757 .99616 0 0)\"></ellipse>\n              <ellipse\n                ry=\"2.955\"\n                rx=\"3.345\"\n                cy=\"166.272\"\n                cx=\"-209.318\"\n                id=\"ellipse1475\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.610759;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1\"\n                transform=\"matrix(.96975 .2441 -.23922 .97097 0 0)\"></ellipse>\n              <g\n                transform=\"matrix(.58197 .16426 -.16364 .57973 -803.757 -44.318)\"\n                style=\"fill:none;stroke:#8772c7;stroke-width:1.01195;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"g1476\">\n                <ellipse\n                  style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:1.01195;stroke-dasharray:none;stroke-opacity:1\"\n                  id=\"ellipse1476\"\n                  cx=\"952.327\"\n                  cy=\"140.653\"\n                  rx=\"1.812\"\n                  ry=\"1.526\"\n                  transform=\"rotate(-8.638) skewX(-.064)\"></ellipse>\n              </g>\n              <path\n                d=\"M-241.464 109.814c-.023-.24.1-.673-.151-.744-.252-.07-.438-.3-.685-.37-.248-.068-.526.028-.778-.043-.25-.07-.373.363-.518.556-.147.192.806 1.222.806 1.222s1.349-.38 1.326-.62z\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.610755;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                id=\"path1476\"></path>\n              <path\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.610755;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                d=\"M-247.634 97.242c.081-.423.36-.804.737-1.01a1.48 1.48 0 0 1 1.179-.095\"\n                id=\"path1477\"></path>\n              <path\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.610755;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1\"\n                d=\"M-234.225 100.887a1.482 1.482 0 0 0-.405-1.182 1.48 1.48 0 0 0-1.097-.44\"\n                id=\"path1478\"></path>\n              <ellipse\n                ry=\"1.18\"\n                rx=\"1.125\"\n                cy=\"84.39\"\n                cx=\"-261.8\"\n                id=\"ellipse1478\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.610755;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1\"\n                transform=\"rotate(-5.582)\"></ellipse>\n              <ellipse\n                ry=\"1.046\"\n                rx=\".997\"\n                cy=\"89.811\"\n                cx=\"-244.862\"\n                id=\"ellipse1479\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.610755;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:6;stroke-opacity:1\"\n                transform=\"rotate(-5.582)\"></ellipse>\n              <g\n                transform=\"matrix(-.68514 -.18957 -.18927 .68254 414.695 297.304)\"\n                style=\"fill:none;stroke:#8772c7;stroke-width:.860713;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"g1482\">\n                <g\n                  id=\"g1481\"\n                  style=\"fill:none;stroke:#8772c7;stroke-width:.860713;stroke-dasharray:none;stroke-opacity:1\">\n                  <path\n                    style=\"fill:none;fill-rule:evenodd;stroke:#8772c7;stroke-width:.860713;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                    d=\"M956.51-6.468c-2.175.009-3.903.728-5.255 1.625\"\n                    id=\"path1479\"></path>\n                  <path\n                    style=\"fill:none;fill-rule:evenodd;stroke:#8772c7;stroke-width:.860713;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                    d=\"M957.005-5.389c-2.45.31-3.99 1.667-5.216 3\"\n                    id=\"path1480\"></path>\n                  <path\n                    style=\"fill:none;fill-rule:evenodd;stroke:#8772c7;stroke-width:.860713;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                    d=\"M957.242-4.514c-2.038.806-3.038 2.468-3.8 4.058\"\n                    id=\"path1481\"></path>\n                </g>\n              </g>\n              <g\n                transform=\"matrix(.69934 .12758 -.12672 .69687 -916.006 -7.74)\"\n                style=\"fill:none;stroke:#8772c7;stroke-width:.860713;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                id=\"g1485\">\n                <g\n                  id=\"g1484\"\n                  style=\"fill:none;stroke:#8772c7;stroke-width:.860713;stroke-dasharray:none;stroke-opacity:1\">\n                  <path\n                    style=\"fill:none;fill-rule:evenodd;stroke:#8772c7;stroke-width:.860713;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                    d=\"M956.426-6.37c-2.175.009-3.903.728-5.254 1.625\"\n                    id=\"path1482\"></path>\n                  <path\n                    style=\"fill:none;fill-rule:evenodd;stroke:#8772c7;stroke-width:.860713;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                    d=\"M957.005-5.389c-2.45.31-3.99 1.667-5.216 3\"\n                    id=\"path1483\"></path>\n                  <path\n                    style=\"fill:none;fill-rule:evenodd;stroke:#8772c7;stroke-width:.860713;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1\"\n                    d=\"M957.146-4.45c-2.038.806-3.038 2.468-3.8 4.057\"\n                    id=\"path1484\"></path>\n                </g>\n              </g>\n              <g\n                id=\"g1486\"\n                transform=\"scale(.98586) rotate(-40.772 -18.564 593.03)\"\n                style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.619517;stroke-dasharray:none;stroke-opacity:1\">\n                <path\n                  style=\"fill:none;fill-opacity:1;stroke:#8772c7;stroke-width:.619517;stroke-linecap:butt;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1\"\n                  d=\"M101.615 72.074S91.415 79.13 89.37 82.66c-1.011 1.745-.842 3.781.238 4.839a4.334 4.334 0 0 0 3.053 1.247c1.033-.008 3.027-.76 4.038-1.759 1.44-1.424 3.467-6.732 4.414-7.92\"\n                  id=\"path1485\"></path>\n              </g>\n            </g>\n          </g>\n        </g>\n        <g\n          id=\"g1490\"\n          transform=\"matrix(.01483 -.00196 .00196 .01483 2156.538 120.51)\"\n          style=\"mix-blend-mode:screen;fill:none;stroke:#251e5b;stroke-width:13.3736;stroke-dasharray:none;stroke-opacity:1\">\n          <path\n            id=\"path1489\"\n            class=\"st0\"\n            d=\"M218.4 514.1c0-.4-1.8-.9-3.9-1.2-2.2-.3-4.3-.8-4.7-1.1-.4-.3-1.6-.8-2.6-1.1-2.9-.9-8.1-5.4-9.9-8.7-1.8-3.2-2.4-3.4-5.6-1.9-4.4 2.2-9.9 4.4-17.1 6.7-5.6 1.8-16.2 1.7-20.4-.3-1.8-.9-3.4-1.9-3.7-2.3-.2-.4-1.3-.7-2.5-.7-2.2 0-10.7-2.8-11.4-3.7-.2-.3-1.6-1.3-3.1-2.2-5.5-3.3-11.8-12.7-15.6-23.4-1-2.8-2.5-6.4-4.2-10.3-.6-1.4-1.1-2.9-1.1-3.5 0-1-2.1-5.6-3-6.4-2.1-2.2-15.7-30.4-17.8-37.1-4-12.9-6.6-27.1-7.8-42.8-2-27.2-2.9-36.7-4.5-45-.4-2.1-1-5.9-1.5-8.6-.4-2.7-1.3-7.4-1.9-10.5-.6-3.1-1.4-7-1.8-8.6-.8-4.1-1.7-7.4-5.7-21-.7-2.5-2.3-6.6-3.5-9.1-1.2-2.5-2.1-4.8-2.1-5.1 0-.3-.6-1.4-1.4-2.5-1.3-1.9-2.5-4-8.9-15.8-1.4-2.6-3.9-6.9-5.4-9.4-1.5-2.5-3-5.1-3.3-5.7-.3-.6-1.6-2.8-2.8-4.9-1.3-2.1-2.7-4.7-3.2-5.8-.5-1.1-1.1-2.1-1.5-2.1-.4 0-.6-1.2-.6-2.6 0-1.9.3-2.6 1.2-2.6 1.4 0 20.2 18.6 25.5 25.1 6.3 7.8 15.6 22.2 19.7 30.4.9 1.9 2 3.9 2.3 4.5 2.1 3.6 11.3 23.8 11.3 24.8 0 .4.5 1.7 1 2.8 2 3.9 3.5 7.7 3.5 9 0 .7.3 1.6.7 2 .4.4 1.1 2.1 1.6 3.7.5 1.7 1.4 4.7 2 6.8 1.2 3.9 2.9 10.6 4 15.8.4 1.7 1 3.9 1.4 4.9.4 1 1.2 3.9 1.9 6.3 3.1 12.1 3.7 14.5 4.7 18 1.2 4.2 3.1 11.4 4.1 15.4 1.3 5.2 3.1 11.7 4.1 15.4.6 2.1 1.3 4.8 1.5 6.1.2 1.3.9 2.8 1.5 3.2.8.7 1.1.3 1.5-2 1.5-10.1 2.9-17.5 3.6-19.7.6-1.9.8-12.7.7-39.4-.2-36.8.2-46.2 2.5-56.3.7-2.9 1.5-6.6 1.9-8.3 1.8-8 3.1-11.7 6.1-17.2 6.6-12.4 11.2-21.7 11.2-22.6 0-.5-1-2.4-2.3-4.3-1.2-1.8-2.2-3.8-2.2-4.4 0-.6-.3-1.4-.7-1.8-.4-.4-1.1-2.2-1.5-4.1-.5-1.9-1.4-5.4-2.1-7.9-1.6-5.8-1.6-20.2 0-29.3.7-3.7 1.6-8.9 2-11.6.4-2.7 1.1-5.2 1.5-5.6.4-.4.7-1.5.7-2.4 0-.9.9-3.9 1.9-6.8 1.1-2.8 2.1-5.8 2.4-6.7 1.3-4.4 6.8-10.6 8.8-9.9 1 .4 1.1 1.7.7 8.9-.2 4.7-.8 9.5-1.2 10.9-.5 1.6-.5 2.9 0 3.9.4.8.8 2.1.8 3 0 .8.5 3.3 1.2 5.5.6 2.2 1.9 6.4 2.7 9.3 3.4 11.7 7 19.9 8.7 19.9.9 0 8.7-7.4 27.5-25.9 3.2-3.2 7.2-8.4 8.7-11.3.2-.4 1.6-2.6 3.1-4.9 3.4-5.2 7.4-12.5 7.4-13.5 0-.4.5-1.9 1.1-3.2.6-1.3 1.2-3 1.3-3.9.3-1.8 3.9-2.5 4.6-.9.7 1.8-1.5 16.1-3.2 20.8-5 13.8-6.3 16.7-12.9 29.3-4.9 9.3-5.3 9.9-7.1 12.5-.9 1.3-1.7 2.7-1.7 3.1 0 .4-.4 1.2-1 1.8-.8.8-3.4 4.9-7.5 11.5-.5.8-1.8 3-3 4.7-2.4 3.7-2.5 5.5-.9 10.4 1.1 3.4 1.8 5 4.1 10.3 1.7 3.9 3 15.3 3.4 30 .4 13.9.7 15.6 4.6 33.4.6 2.7 1.2 5.9 1.4 7.1 2.1 13.8 2.9 18.4 3.3 18.4.2 0 1.3-1.3 2.4-2.8 3.1-4.3 4.5-6 8.3-10.4 2-2.2 3.6-4.3 3.6-4.7 0-1.7 16.9-19.5 30.1-31.8 3.9-3.6 9.4-8.7 12.2-11.4 2.8-2.7 5.5-4.9 5.8-4.9.3 0 1.8-1.1 3.4-2.5 1.5-1.4 7.2-5.8 12.5-9.9 5.4-4.1 10.2-7.8 10.9-8.4 1-.8 9.1-7.1 11.6-9.1 9.2-7 21.2-16.4 21.7-17.1.4-.5 2.3-2 4.1-3.4 3.8-2.9 11.6-9.4 13.8-11.6.8-.8 3.2-3 5.3-4.8 8.1-7.1 14.2-13.1 20-19.8 3.4-3.8 8.1-8.9 10.5-11.4 2.4-2.5 6.3-7 8.6-10.1 2.4-3.1 4.5-5.8 4.8-6 .2-.2 1.3-1.7 2.3-3.4 1-1.7 2.4-3.7 3.2-4.5.8-.8 2.9-3.7 4.7-6.5 1.8-2.7 3.7-5.4 4.3-6 1.1-1.1 5.8-8.7 5.8-9.3 0-.2 1.9-3.4 4.1-7.2 2.3-3.8 5.4-9.1 6.9-11.8 1.5-2.8 3.3-6 4-7.3.7-1.3 2.5-4.6 4-7.6 2.9-5.8 4.4-7.5 6.1-6.8 2.1.8.9 13.3-1.8 19.6-.4.8-1.2 3.5-2 6-.7 2.5-1.7 5.3-2.2 6.4-.5 1-1.5 3.4-2.2 5.3-1.3 3.2-3 7.3-6.5 15-.8 1.9-1.9 4.2-2.3 5.3-.4 1-1 2.4-1.4 3-.4.6-.9 1.6-1.1 2.3-.6 1.6-2.9 6.1-6.8 13.5-5.5 10.4-7.2 13.5-12.6 22.7-1 1.8-2.1 3.8-2.3 4.5-.3.7-.8 1.3-1.1 1.3-.4 0-.7.5-.7 1 0 .6-.9 2.3-2.1 3.9-1.2 1.6-2.3 3.4-2.6 4.1-.3.6-1.1 2-1.8 3.2-.7 1.1-2.6 4.2-4.2 6.8-1.6 2.6-4.9 7.4-7.2 10.7-2.3 3.3-6.3 8.9-8.7 12.5-2.4 3.6-5.3 7.6-6.4 9-1.1 1.4-5.3 6.9-9.3 12.3-4 5.4-8.3 10.9-9.6 12.4-1.3 1.5-3.5 4.1-4.9 6-1.4 1.8-4.7 5.7-7.2 8.6-2.5 2.9-5.4 6.3-6.4 7.5-5.9 7.1-16.7 18.7-26.1 28-5.9 5.9-10.9 10.7-11 10.7-.2 0-29.7 29.7-33.2 33.4-1.2 1.3-3.3 3.8-4.8 5.6-3.1 3.9-3.3 4.2-8 10.1-2 2.5-4.4 5.3-5.3 6.4-.9 1-1.7 2.2-1.7 2.7 0 .4-1.5 3-3.4 5.6-1.9 2.6-3.4 5.2-3.4 5.7s-.3 1-.6 1.2c-.3.1-2.2 3-4.1 6.3-1.9 3.3-3.9 6.5-4.3 7.1-.4.6-1 1.6-1.2 2.3-.4.9-6.9 12-9.2 15.7-2.2 3.5-3.8 7-3.4 7.3.6.6 7.8-3.2 13.6-7.3 10.5-7.4 14-10 17-12.7 11.1-9.7 24.6-23 33.5-33.2 7.2-8.2 28.8-29.5 42.1-41.6 7-6.4 8.4-7.5 17.2-14.1 4.3-3.2 8.4-6.3 9-6.8.6-.5 4.8-3.7 9.4-7.2 4.5-3.5 8.8-6.8 9.5-7.5.7-.6 2-1.7 2.9-2.3.9-.6 2.1-1.5 2.5-1.9.5-.4 4.1-3.3 8-6.4 3.9-3.1 11.5-10.3 16.9-16.1 5.4-5.7 13.9-14.6 18.9-19.8 5-5.2 10.4-10.7 11.9-12.4 7.2-7.9 24.6-26.1 37.1-38.8 14.1-14.3 20.5-19.5 33.7-27.2 2.1-1.2 3.8-2.5 3.8-2.8 0-.3.4-.6.9-.6s1.4-.5 2.1-1c.7-.6 3.5-2.3 6.4-3.8 2.9-1.5 5.6-3.1 6-3.4.4-.3 4.3-2.4 8.6-4.6 4.3-2.2 8.5-4.5 9.4-5 .8-.5 2.3-1.2 3.4-1.6 1-.4 2.4-1.1 3-1.4.6-.4 3.4-1.8 6.2-3.2 4.3-2.1 5.2-2.3 5.8-1.4.5.8.3 1.7-.5 3-.7 1-10.1 10.8-20.9 21.6-10.8 10.9-20.8 21.1-22.2 22.8-1.4 1.7-3 3.6-3.7 4.3-.6.7-3.7 4.6-6.7 8.7-3.1 4.1-6.4 8.2-7.4 9.2-1.5 1.4-8.4 11.2-8.4 12 0 .4-34 52.1-35.5 53.9-2.1 2.7-7.3 10.2-7.3 10.6 0 .3-1 1.7-2.2 3.3-1.2 1.6-2.3 3.2-2.3 3.7s-.3.8-.8.8-.8.3-.8.7c0 .4-1.5 2.7-3.3 5.1-1.8 2.4-4.8 6.6-6.7 9.4-1.9 2.8-3.8 5.5-4.3 6s-2.5 3.3-4.6 6.2c-2.1 2.9-4 5.4-4.3 5.6-.3.2-1.5 1.7-2.7 3.4-1.2 1.7-4.1 4.9-6.5 7.3-2.4 2.3-4.3 4.5-4.3 4.7 0 .8-10.1 12.4-10.7 12.4-.3 0-2.2 1.7-4.1 3.8-1.9 2.1-4.8 4.6-6.3 5.6s-4.6 3.5-6.8 5.5c-4.4 4.1-10.7 9.1-11.5 9.1-.3 0-1.7.9-3.2 2.1-3 2.3-5.2 3.7-6.8 4.4-.6.3-2.8 1.6-4.9 3-2.1 1.4-4 2.5-4.3 2.5-.3 0-2 1-3.8 2.2-1.8 1.2-4.6 2.9-6.2 3.8-1.7.9-3.5 1.9-4.1 2.3-8.6 5.1-21.9 12.7-22.2 12.7-.3 0-1.7 1-3.2 2.1-1.5 1.2-3.5 2.4-4.4 2.7-.9.3-1.7.8-1.7 1.2 0 .4-.4.7-.8.7-.7 0-4.9 2.6-6 3.8-.2.2-1.9 1.4-3.8 2.6-1.9 1.2-6.7 4.7-10.7 7.7-4 3-7.5 5.5-7.9 5.5-.3 0-1.7.9-3 2.1-1.3 1.2-3 2.6-3.8 3.2-6 4.8-9 7.3-10 8.3-2.8 2.6-7.1 6.2-9.8 8.1-1.6 1.1-2.9 2.3-2.9 2.6 0 .3 5.1.6 11.4.6 6.3 0 13.1.3 15.2.7 7 1.2 11.2 2.3 11.9 3 .4.4 1.2.7 1.8.7 1 0 6.7 2.6 10.6 4.9 4 2.3 5.7 9.1 3.2 13-2 3-3.4 4.7-4.2 4.7-.4 0-1.2.5-1.9 1.1-1.9 1.7-14.5 4.4-26.3 5.8-3.9.4-7.2 1.1-7.4 1.5 0 .5-28.3.5-28.3-.4zm-43.1-91.8c1-2.1 2.3-5.5 3-7.5.7-2 1.5-4.2 1.9-5 .4-.7.8-17.7.9-39.1.2-40.4.4-36.8-3.5-53.2-.5-2.3-1.4-6.1-1.9-8.6s-1.2-5.9-1.6-7.5c-.4-1.6-1.1-4.5-1.4-6.4-1.4-6.9-2.8-10.9-3.3-9.6-.7 1.8 0 25.8.9 31.4 2 11.6 2.6 17.6 3.1 28.9.5 12.6-.2 28.5-1.5 34-.8 3.5-1.1 45.4-.3 46.7.7 1.1 1.8-.1 3.7-4.1zm135 86c-2.9-1.5-5.6-5.4-5.6-8.2 0-2.6.4-3.2 3.3-5.4 3.9-3 11-1.5 15.3 3 2.3 2.4 1.5 7.4-1.5 9.9-2.6 2.2-7.8 2.5-11.5.7zm74.2-252.7c-2.4-2.5-2.6-3.1-2.6-7.5s.2-5 2.5-7.4c2.1-2.2 3-2.7 5.6-2.7 2.6 0 3.5.5 6.1 3.2 3.5 3.6 4.1 5.4 3.5 9.7-.6 4.4-2.9 6.3-8.3 6.9-4.2.6-4.2.6-6.8-2.2zm-117.9-6.9c-2.2-1.8-2.4-2.4-2.4-6 0-3.4.3-4.3 2.5-6.7 2.2-2.4 2.9-2.7 5.9-2.7 5.1.1 6.4.7 8.1 4 1.9 3.6 1.9 4.9 0 9.2-1.7 3.9-2.5 4.3-7.7 4.3-3.2-.1-4.4-.5-6.4-2.1z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:13.3736;stroke-dasharray:none;stroke-opacity:1\"></path>\n        </g>\n        <g\n          id=\"g1521\"\n          transform=\"matrix(.00691 .00485 -.00379 .0054 2184.655 117.132)\"\n          style=\"mix-blend-mode:screen;fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\">\n          <path\n            id=\"path1490\"\n            d=\"M357.76 95.153c-22.952.117-45.436 20.645-42.594 44.469 1.836 32.006 20.234 59.467 32.663 88.048 12.755 26.492 27.214 54.898 24.056 85.202-3.123 17.687-18.61 30.482-35.222 35.153-34.444 12.021-71.672 10.244-107.56 15.004-20.136 2.294-42.036 5.932-58.031 19.219-18.969 17.794-13.655 53.749 9.938 65.062 15.45 7.421 33.532 6.655 50.34 5.731 36.81-3.46 72.996-12.639 110.13-12.072 17.345.994 37.023 3.1 48.75 17.653 12.098 13.993 11.566 33.618 11.27 51.028-2.045 36.918-14.034 72.8-14.38 109.88.39 15.231 3.221 32.997 16.766 42.188 16.794 12.688 44.06 10.656 56.25-7.343 14.995-20.11 18.46-45.74 23.854-69.601 6.25-31.306 10.461-64.497 28.303-91.65 9.25-13.274 25.837-19.547 41.656-17.061 25.235 3.464 46.993 18.3 67.82 31.979 32.63 22.36 62.672 49.916 99.836 64.646 16.008 5.926 35.864 4.576 48.281-7.97 14.764-13.197 20.306-37.347 8.563-54.25-15.713-25.411-43.07-39.946-66.868-56.735-27.284-18.116-56.205-35.874-75.226-63.202-9.464-15.012-10.742-35.278-.832-50.314 16.234-29.414 45.577-47.888 71.139-68.246 26.21-19.492 54.959-38.034 71.537-66.94 7.473-15.383 4.89-34.894-6.906-47.406-12.674-15.7-35.511-22.498-54.281-14.312-32.1 11.008-56.606 36.062-82.763 56.772-30.456 25.604-61.364 53.713-100.36 64.79-18.925 4.375-40.455-.954-53.375-16.03-27.892-27.827-41.671-65.796-61.375-99.281-11.067-19.89-24.026-41.086-45.187-51.281-5.146-2.105-10.61-3.338-16.188-3.125z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1491\"\n            d=\"M341.74 107.53c-66.912 33.562 69.382 157.19 32.325 222.23-36.774 64.545-214.64 11.063-206.07 84.853 8.813 75.882 172.95-18.338 226.27 36.366 49.328 50.605-41.169 197.06 28.284 210.11 71.099 13.36 37.658-163.62 105.06-189.91 79.361-30.953 193.81 150.75 242.44 80.812 45.391-65.281-147.38-106.39-149.5-185.87-2.214-82.953 197.19-137.14 145.46-202.03-58.317-73.157-166.78 127.05-258.6 109.1-76.644-14.986-95.859-200.68-165.66-165.67z\"\n            fill=\"#fff\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <g\n            id=\"g1493\"\n            transform=\"translate(-704.78 -1720.6)\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\">\n            <path\n              id=\"path1492\"\n              transform=\"translate(-100 -205.71)\"\n              d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n              style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\"></path>\n            <path\n              id=\"path1493\"\n              transform=\"matrix(.67859 0 0 .66252 279.35 501.65)\"\n              fill=\"#fff\"\n              d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n              style=\"fill:none;stroke:#251e5b;stroke-width:14.465;stroke-dasharray:none;stroke-opacity:1\"></path>\n          </g>\n          <path\n            id=\"path1494\"\n            transform=\"translate(-1070 -2394.5) scale(1.2571)\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:7.71529;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1495\"\n            transform=\"translate(-652.4 -1654.7) scale(.90358)\"\n            fill=\"#fff\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:10.7338;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1496\"\n            transform=\"translate(-1368 -2922) scale(1.5464)\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:6.27191;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1497\"\n            transform=\"translate(-950.96 -2180.8) scale(1.1928)\"\n            fill=\"#fff\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:8.13119;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <g\n            id=\"g1499\"\n            transform=\"matrix(-1 0 0 1 1786.9 -1686.3)\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\">\n            <path\n              id=\"path1498\"\n              transform=\"translate(-100 -205.71)\"\n              d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n              style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\"></path>\n            <path\n              id=\"path1499\"\n              transform=\"matrix(.67859 0 0 .66252 279.35 501.65)\"\n              fill=\"#fff\"\n              d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n              style=\"fill:none;stroke:#251e5b;stroke-width:14.465;stroke-dasharray:none;stroke-opacity:1\"></path>\n          </g>\n          <path\n            id=\"path1500\"\n            transform=\"matrix(-1.2571 0 0 1.2571 2120.6 -2374.5)\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:7.71529;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1501\"\n            transform=\"matrix(-.90358 0 0 .90358 1703.1 -1634.7)\"\n            fill=\"#fff\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:10.7338;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1502\"\n            transform=\"translate(-1253.7 -2916.3) scale(1.5464)\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:6.27191;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1503\"\n            transform=\"translate(-836.67 -2175.1) scale(1.1928)\"\n            fill=\"#fff\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:8.13119;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <g\n            id=\"g1505\"\n            transform=\"matrix(1 0 0 -1 -861.92 2299.3)\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\">\n            <path\n              id=\"path1504\"\n              transform=\"translate(-100 -205.71)\"\n              d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n              style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\"></path>\n            <path\n              id=\"path1505\"\n              transform=\"matrix(.67859 0 0 .66252 279.35 501.65)\"\n              fill=\"#fff\"\n              d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n              style=\"fill:none;stroke:#251e5b;stroke-width:14.465;stroke-dasharray:none;stroke-opacity:1\"></path>\n          </g>\n          <path\n            id=\"path1506\"\n            transform=\"matrix(1.2571 0 0 -1.2571 -1190 3030.3)\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:7.71529;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1507\"\n            transform=\"matrix(.90358 0 0 -.90358 -772.4 2290.5)\"\n            fill=\"#fff\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:10.7338;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1508\"\n            transform=\"matrix(1.5464 0 0 -1.5464 -1436 3625)\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:6.27191;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1509\"\n            transform=\"matrix(1.1928 0 0 -1.1928 -1019 2883.7)\"\n            fill=\"#fff\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:8.13119;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <g\n            id=\"g1511\"\n            transform=\"rotate(180 753.45 1253.95)\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\">\n            <path\n              id=\"path1510\"\n              transform=\"translate(-100 -205.71)\"\n              d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n              style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\"></path>\n            <path\n              id=\"path1511\"\n              transform=\"matrix(.67859 0 0 .66252 279.35 501.65)\"\n              fill=\"#fff\"\n              d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n              style=\"fill:none;stroke:#251e5b;stroke-width:14.465;stroke-dasharray:none;stroke-opacity:1\"></path>\n          </g>\n          <path\n            id=\"path1512\"\n            transform=\"rotate(180 961.75 1586.55) scale(1.2571)\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:7.71529;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1513\"\n            transform=\"rotate(180 752.95 1216.7) scale(.90358)\"\n            fill=\"#fff\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:10.7338;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1514\"\n            transform=\"rotate(180 1137.45 1848.2) scale(1.5464)\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:6.27191;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1515\"\n            transform=\"rotate(180 928.95 1477.6) scale(1.1928)\"\n            fill=\"#fff\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:8.13119;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <g\n            id=\"g1517\"\n            transform=\"matrix(-1 0 0 1 1798.3 -1363.5)\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\">\n            <path\n              id=\"path1516\"\n              transform=\"translate(-100 -205.71)\"\n              d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n              style=\"fill:none;stroke:#251e5b;stroke-width:9.69888;stroke-dasharray:none;stroke-opacity:1\"></path>\n            <path\n              id=\"path1517\"\n              transform=\"matrix(.67859 0 0 .66252 279.35 501.65)\"\n              fill=\"#fff\"\n              d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n              style=\"fill:none;stroke:#251e5b;stroke-width:14.465;stroke-dasharray:none;stroke-opacity:1\"></path>\n          </g>\n          <path\n            id=\"path1518\"\n            transform=\"matrix(-1.2571 0 0 1.2571 2120.6 -2165.9)\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:7.71529;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1519\"\n            transform=\"matrix(-.90358 0 0 .90358 1703.1 -1426.1)\"\n            fill=\"#fff\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:10.7338;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1520\"\n            transform=\"matrix(-1.5464 0 0 1.5464 2377.8 -2824.9)\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:6.27191;stroke-dasharray:none;stroke-opacity:1\"></path>\n          <path\n            id=\"path1521\"\n            transform=\"matrix(-1.1928 0 0 1.1928 1960.8 -2083.7)\"\n            fill=\"#fff\"\n            d=\"M1198 2092.8a22.224 22.224 0 1 1-44.447 0 22.224 22.224 0 1 1 44.447 0z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:8.13119;stroke-dasharray:none;stroke-opacity:1\"></path>\n        </g>\n        <g\n          id=\"g1522\"\n          transform=\"matrix(.13497 0 0 .13426 2149.064 -136.483)\"\n          style=\"mix-blend-mode:screen;fill:none;stroke:#251e5b;stroke-width:1.87865;stroke-dasharray:none;stroke-opacity:1\">\n          <ellipse\n            style=\"fill:none;fill-opacity:1;stroke:#251e5b;stroke-width:1.48573;stroke-linecap:round;stroke-miterlimit:4.4;stroke-dasharray:none;stroke-opacity:1\"\n            id=\"ellipse1521\"\n            cx=\"342.125\"\n            cy=\"1941.221\"\n            rx=\"20.503\"\n            ry=\"17.172\"></ellipse>\n          <path\n            style=\"fill:none;fill-rule:evenodd;stroke:#251e5b;stroke-width:5.61536;stroke-dasharray:none;stroke-opacity:1\"\n            id=\"path1522\"\n            d=\"M957.699 7272.336c-.98-3.417 4.215-2.973 5.762-1.652 4.457 3.806 1.738 10.699-2.365 13.5-7.499 5.117-17.455.722-21.459-6.67-5.964-11.01.317-24.43 11.078-29.539 14.505-6.886 31.53 1.37 37.708 15.552 7.847 18.01-2.433 38.714-20.076 45.948-21.532 8.828-45.962-3.505-54.245-24.64-9.825-25.07 4.585-53.262 29.235-62.59 22.134-8.377 47.533-.484 62.066 17.917\"\n            transform=\"matrix(.26458 0 0 .26458 92.302 14.826)\"></path>\n        </g>\n        <g\n          id=\"g1523\"\n          transform=\"matrix(.0146 -.01536 .01536 .0146 2168.591 122.06)\"\n          style=\"mix-blend-mode:screen;fill:#589f25;stroke:#251e5b;stroke-width:9.4408;stroke-dasharray:none;stroke-opacity:1\">\n          <path\n            id=\"path1523\"\n            class=\"st0\"\n            d=\"M218.4 514.1c0-.4-1.8-.9-3.9-1.2-2.2-.3-4.3-.8-4.7-1.1-.4-.3-1.6-.8-2.6-1.1-2.9-.9-8.1-5.4-9.9-8.7-1.8-3.2-2.4-3.4-5.6-1.9-4.4 2.2-9.9 4.4-17.1 6.7-5.6 1.8-16.2 1.7-20.4-.3-1.8-.9-3.4-1.9-3.7-2.3-.2-.4-1.3-.7-2.5-.7-2.2 0-10.7-2.8-11.4-3.7-.2-.3-1.6-1.3-3.1-2.2-5.5-3.3-11.8-12.7-15.6-23.4-1-2.8-2.5-6.4-4.2-10.3-.6-1.4-1.1-2.9-1.1-3.5 0-1-2.1-5.6-3-6.4-2.1-2.2-15.7-30.4-17.8-37.1-4-12.9-6.6-27.1-7.8-42.8-2-27.2-2.9-36.7-4.5-45-.4-2.1-1-5.9-1.5-8.6-.4-2.7-1.3-7.4-1.9-10.5-.6-3.1-1.4-7-1.8-8.6-.8-4.1-1.7-7.4-5.7-21-.7-2.5-2.3-6.6-3.5-9.1-1.2-2.5-2.1-4.8-2.1-5.1 0-.3-.6-1.4-1.4-2.5-1.3-1.9-2.5-4-8.9-15.8-1.4-2.6-3.9-6.9-5.4-9.4-1.5-2.5-3-5.1-3.3-5.7-.3-.6-1.6-2.8-2.8-4.9-1.3-2.1-2.7-4.7-3.2-5.8-.5-1.1-1.1-2.1-1.5-2.1-.4 0-.6-1.2-.6-2.6 0-1.9.3-2.6 1.2-2.6 1.4 0 20.2 18.6 25.5 25.1 6.3 7.8 15.6 22.2 19.7 30.4.9 1.9 2 3.9 2.3 4.5 2.1 3.6 11.3 23.8 11.3 24.8 0 .4.5 1.7 1 2.8 2 3.9 3.5 7.7 3.5 9 0 .7.3 1.6.7 2 .4.4 1.1 2.1 1.6 3.7.5 1.7 1.4 4.7 2 6.8 1.2 3.9 2.9 10.6 4 15.8.4 1.7 1 3.9 1.4 4.9.4 1 1.2 3.9 1.9 6.3 3.1 12.1 3.7 14.5 4.7 18 1.2 4.2 3.1 11.4 4.1 15.4 1.3 5.2 3.1 11.7 4.1 15.4.6 2.1 1.3 4.8 1.5 6.1.2 1.3.9 2.8 1.5 3.2.8.7 1.1.3 1.5-2 1.5-10.1 2.9-17.5 3.6-19.7.6-1.9.8-12.7.7-39.4-.2-36.8.2-46.2 2.5-56.3.7-2.9 1.5-6.6 1.9-8.3 1.8-8 3.1-11.7 6.1-17.2 6.6-12.4 11.2-21.7 11.2-22.6 0-.5-1-2.4-2.3-4.3-1.2-1.8-2.2-3.8-2.2-4.4 0-.6-.3-1.4-.7-1.8-.4-.4-1.1-2.2-1.5-4.1-.5-1.9-1.4-5.4-2.1-7.9-1.6-5.8-1.6-20.2 0-29.3.7-3.7 1.6-8.9 2-11.6.4-2.7 1.1-5.2 1.5-5.6.4-.4.7-1.5.7-2.4 0-.9.9-3.9 1.9-6.8 1.1-2.8 2.1-5.8 2.4-6.7 1.3-4.4 6.8-10.6 8.8-9.9 1 .4 1.1 1.7.7 8.9-.2 4.7-.8 9.5-1.2 10.9-.5 1.6-.5 2.9 0 3.9.4.8.8 2.1.8 3 0 .8.5 3.3 1.2 5.5.6 2.2 1.9 6.4 2.7 9.3 3.4 11.7 7 19.9 8.7 19.9.9 0 8.7-7.4 27.5-25.9 3.2-3.2 7.2-8.4 8.7-11.3.2-.4 1.6-2.6 3.1-4.9 3.4-5.2 7.4-12.5 7.4-13.5 0-.4.5-1.9 1.1-3.2.6-1.3 1.2-3 1.3-3.9.3-1.8 3.9-2.5 4.6-.9.7 1.8-1.5 16.1-3.2 20.8-5 13.8-6.3 16.7-12.9 29.3-4.9 9.3-5.3 9.9-7.1 12.5-.9 1.3-1.7 2.7-1.7 3.1 0 .4-.4 1.2-1 1.8-.8.8-3.4 4.9-7.5 11.5-.5.8-1.8 3-3 4.7-2.4 3.7-2.5 5.5-.9 10.4 1.1 3.4 1.8 5 4.1 10.3 1.7 3.9 3 15.3 3.4 30 .4 13.9.7 15.6 4.6 33.4.6 2.7 1.2 5.9 1.4 7.1 2.1 13.8 2.9 18.4 3.3 18.4.2 0 1.3-1.3 2.4-2.8 3.1-4.3 4.5-6 8.3-10.4 2-2.2 3.6-4.3 3.6-4.7 0-1.7 16.9-19.5 30.1-31.8 3.9-3.6 9.4-8.7 12.2-11.4 2.8-2.7 5.5-4.9 5.8-4.9.3 0 1.8-1.1 3.4-2.5 1.5-1.4 7.2-5.8 12.5-9.9 5.4-4.1 10.2-7.8 10.9-8.4 1-.8 9.1-7.1 11.6-9.1 9.2-7 21.2-16.4 21.7-17.1.4-.5 2.3-2 4.1-3.4 3.8-2.9 11.6-9.4 13.8-11.6.8-.8 3.2-3 5.3-4.8 8.1-7.1 14.2-13.1 20-19.8 3.4-3.8 8.1-8.9 10.5-11.4 2.4-2.5 6.3-7 8.6-10.1 2.4-3.1 4.5-5.8 4.8-6 .2-.2 1.3-1.7 2.3-3.4 1-1.7 2.4-3.7 3.2-4.5.8-.8 2.9-3.7 4.7-6.5 1.8-2.7 3.7-5.4 4.3-6 1.1-1.1 5.8-8.7 5.8-9.3 0-.2 1.9-3.4 4.1-7.2 2.3-3.8 5.4-9.1 6.9-11.8 1.5-2.8 3.3-6 4-7.3.7-1.3 2.5-4.6 4-7.6 2.9-5.8 4.4-7.5 6.1-6.8 2.1.8.9 13.3-1.8 19.6-.4.8-1.2 3.5-2 6-.7 2.5-1.7 5.3-2.2 6.4-.5 1-1.5 3.4-2.2 5.3-1.3 3.2-3 7.3-6.5 15-.8 1.9-1.9 4.2-2.3 5.3-.4 1-1 2.4-1.4 3-.4.6-.9 1.6-1.1 2.3-.6 1.6-2.9 6.1-6.8 13.5-5.5 10.4-7.2 13.5-12.6 22.7-1 1.8-2.1 3.8-2.3 4.5-.3.7-.8 1.3-1.1 1.3-.4 0-.7.5-.7 1 0 .6-.9 2.3-2.1 3.9-1.2 1.6-2.3 3.4-2.6 4.1-.3.6-1.1 2-1.8 3.2-.7 1.1-2.6 4.2-4.2 6.8-1.6 2.6-4.9 7.4-7.2 10.7-2.3 3.3-6.3 8.9-8.7 12.5-2.4 3.6-5.3 7.6-6.4 9-1.1 1.4-5.3 6.9-9.3 12.3-4 5.4-8.3 10.9-9.6 12.4-1.3 1.5-3.5 4.1-4.9 6-1.4 1.8-4.7 5.7-7.2 8.6-2.5 2.9-5.4 6.3-6.4 7.5-5.9 7.1-16.7 18.7-26.1 28-5.9 5.9-10.9 10.7-11 10.7-.2 0-29.7 29.7-33.2 33.4-1.2 1.3-3.3 3.8-4.8 5.6-3.1 3.9-3.3 4.2-8 10.1-2 2.5-4.4 5.3-5.3 6.4-.9 1-1.7 2.2-1.7 2.7 0 .4-1.5 3-3.4 5.6-1.9 2.6-3.4 5.2-3.4 5.7s-.3 1-.6 1.2c-.3.1-2.2 3-4.1 6.3-1.9 3.3-3.9 6.5-4.3 7.1-.4.6-1 1.6-1.2 2.3-.4.9-6.9 12-9.2 15.7-2.2 3.5-3.8 7-3.4 7.3.6.6 7.8-3.2 13.6-7.3 10.5-7.4 14-10 17-12.7 11.1-9.7 24.6-23 33.5-33.2 7.2-8.2 28.8-29.5 42.1-41.6 7-6.4 8.4-7.5 17.2-14.1 4.3-3.2 8.4-6.3 9-6.8.6-.5 4.8-3.7 9.4-7.2 4.5-3.5 8.8-6.8 9.5-7.5.7-.6 2-1.7 2.9-2.3.9-.6 2.1-1.5 2.5-1.9.5-.4 4.1-3.3 8-6.4 3.9-3.1 11.5-10.3 16.9-16.1 5.4-5.7 13.9-14.6 18.9-19.8 5-5.2 10.4-10.7 11.9-12.4 7.2-7.9 24.6-26.1 37.1-38.8 14.1-14.3 20.5-19.5 33.7-27.2 2.1-1.2 3.8-2.5 3.8-2.8 0-.3.4-.6.9-.6s1.4-.5 2.1-1c.7-.6 3.5-2.3 6.4-3.8 2.9-1.5 5.6-3.1 6-3.4.4-.3 4.3-2.4 8.6-4.6 4.3-2.2 8.5-4.5 9.4-5 .8-.5 2.3-1.2 3.4-1.6 1-.4 2.4-1.1 3-1.4.6-.4 3.4-1.8 6.2-3.2 4.3-2.1 5.2-2.3 5.8-1.4.5.8.3 1.7-.5 3-.7 1-10.1 10.8-20.9 21.6-10.8 10.9-20.8 21.1-22.2 22.8-1.4 1.7-3 3.6-3.7 4.3-.6.7-3.7 4.6-6.7 8.7-3.1 4.1-6.4 8.2-7.4 9.2-1.5 1.4-8.4 11.2-8.4 12 0 .4-34 52.1-35.5 53.9-2.1 2.7-7.3 10.2-7.3 10.6 0 .3-1 1.7-2.2 3.3-1.2 1.6-2.3 3.2-2.3 3.7s-.3.8-.8.8-.8.3-.8.7c0 .4-1.5 2.7-3.3 5.1-1.8 2.4-4.8 6.6-6.7 9.4-1.9 2.8-3.8 5.5-4.3 6s-2.5 3.3-4.6 6.2c-2.1 2.9-4 5.4-4.3 5.6-.3.2-1.5 1.7-2.7 3.4-1.2 1.7-4.1 4.9-6.5 7.3-2.4 2.3-4.3 4.5-4.3 4.7 0 .8-10.1 12.4-10.7 12.4-.3 0-2.2 1.7-4.1 3.8-1.9 2.1-4.8 4.6-6.3 5.6s-4.6 3.5-6.8 5.5c-4.4 4.1-10.7 9.1-11.5 9.1-.3 0-1.7.9-3.2 2.1-3 2.3-5.2 3.7-6.8 4.4-.6.3-2.8 1.6-4.9 3-2.1 1.4-4 2.5-4.3 2.5-.3 0-2 1-3.8 2.2-1.8 1.2-4.6 2.9-6.2 3.8-1.7.9-3.5 1.9-4.1 2.3-8.6 5.1-21.9 12.7-22.2 12.7-.3 0-1.7 1-3.2 2.1-1.5 1.2-3.5 2.4-4.4 2.7-.9.3-1.7.8-1.7 1.2 0 .4-.4.7-.8.7-.7 0-4.9 2.6-6 3.8-.2.2-1.9 1.4-3.8 2.6-1.9 1.2-6.7 4.7-10.7 7.7-4 3-7.5 5.5-7.9 5.5-.3 0-1.7.9-3 2.1-1.3 1.2-3 2.6-3.8 3.2-6 4.8-9 7.3-10 8.3-2.8 2.6-7.1 6.2-9.8 8.1-1.6 1.1-2.9 2.3-2.9 2.6 0 .3 5.1.6 11.4.6 6.3 0 13.1.3 15.2.7 7 1.2 11.2 2.3 11.9 3 .4.4 1.2.7 1.8.7 1 0 6.7 2.6 10.6 4.9 4 2.3 5.7 9.1 3.2 13-2 3-3.4 4.7-4.2 4.7-.4 0-1.2.5-1.9 1.1-1.9 1.7-14.5 4.4-26.3 5.8-3.9.4-7.2 1.1-7.4 1.5 0 .5-28.3.5-28.3-.4zm-43.1-91.8c1-2.1 2.3-5.5 3-7.5.7-2 1.5-4.2 1.9-5 .4-.7.8-17.7.9-39.1.2-40.4.4-36.8-3.5-53.2-.5-2.3-1.4-6.1-1.9-8.6s-1.2-5.9-1.6-7.5c-.4-1.6-1.1-4.5-1.4-6.4-1.4-6.9-2.8-10.9-3.3-9.6-.7 1.8 0 25.8.9 31.4 2 11.6 2.6 17.6 3.1 28.9.5 12.6-.2 28.5-1.5 34-.8 3.5-1.1 45.4-.3 46.7.7 1.1 1.8-.1 3.7-4.1zm135 86c-2.9-1.5-5.6-5.4-5.6-8.2 0-2.6.4-3.2 3.3-5.4 3.9-3 11-1.5 15.3 3 2.3 2.4 1.5 7.4-1.5 9.9-2.6 2.2-7.8 2.5-11.5.7zm74.2-252.7c-2.4-2.5-2.6-3.1-2.6-7.5s.2-5 2.5-7.4c2.1-2.2 3-2.7 5.6-2.7 2.6 0 3.5.5 6.1 3.2 3.5 3.6 4.1 5.4 3.5 9.7-.6 4.4-2.9 6.3-8.3 6.9-4.2.6-4.2.6-6.8-2.2zm-117.9-6.9c-2.2-1.8-2.4-2.4-2.4-6 0-3.4.3-4.3 2.5-6.7 2.2-2.4 2.9-2.7 5.9-2.7 5.1.1 6.4.7 8.1 4 1.9 3.6 1.9 4.9 0 9.2-1.7 3.9-2.5 4.3-7.7 4.3-3.2-.1-4.4-.5-6.4-2.1z\"\n            style=\"fill:none;stroke:#251e5b;stroke-width:9.4408;stroke-dasharray:none;stroke-opacity:1\"></path>\n        </g>\n        <g\n          id=\"g1524\"\n          transform=\"matrix(.0523 -.01677 .01668 .05202 2117.592 30.896)\"\n          style=\"mix-blend-mode:screen;fill:none;stroke:#251e5b;stroke-width:4.61442;stroke-dasharray:none;stroke-opacity:1\">\n          <ellipse\n            style=\"fill:none;fill-opacity:1;stroke:#251e5b;stroke-width:4.61442;stroke-linecap:round;stroke-miterlimit:4.4;stroke-dasharray:none;stroke-opacity:1\"\n            id=\"ellipse1523\"\n            cx=\"342.125\"\n            cy=\"1941.221\"\n            rx=\"20.503\"\n            ry=\"17.172\"></ellipse>\n          <path\n            style=\"fill:none;fill-rule:evenodd;stroke:#251e5b;stroke-width:17.4403;stroke-dasharray:none;stroke-opacity:1\"\n            id=\"path1524\"\n            d=\"M957.699 7272.336c-.98-3.417 4.215-2.973 5.762-1.652 4.457 3.806 1.738 10.699-2.365 13.5-7.499 5.117-17.455.722-21.459-6.67-5.964-11.01.317-24.43 11.078-29.539 14.505-6.886 31.53 1.37 37.708 15.552 7.847 18.01-2.433 38.714-20.076 45.948-21.532 8.828-45.962-3.505-54.245-24.64-9.825-25.07 4.585-53.262 29.235-62.59 22.134-8.377 47.533-.484 62.066 17.917\"\n            transform=\"matrix(.26458 0 0 .26458 92.302 14.826)\"></path>\n        </g>\n        <rect\n          style=\"opacity:1;mix-blend-mode:normal;fill:#201c4d;fill-opacity:1;stroke-width:.726814;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4.4\"\n          id=\"rect1524\"\n          width=\"157.406\"\n          height=\"31.095\"\n          x=\"2063.561\"\n          y=\"98.044\"\n          ry=\"1.587\"\n          rx=\"1.587\"\n          clip-path=\"url(#clipPath1165)\"></rect>\n      </g>\n    </g>\n  </g>\n</svg>\n"
  },
  {
    "path": "packages/frontend/src/lib/images/PodIcon.svelte",
    "content": "<script lang=\"ts\">\nexport let size = '40';\nexport let solid = false;\n</script>\n\n<svg\n  width={size}\n  height={size}\n  class={$$props.class}\n  style={$$props.style}\n  viewBox=\"0.557 0.555 5.24 5.24\"\n  version=\"1.1\"\n  xml:space=\"preserve\"\n  xmlns=\"http://www.w3.org/2000/svg\"\n  xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n  {#if solid}\n    <defs>\n      <mask id=\"podmask\">\n        <rect x=\"-20\" y=\"-20\" width=\"100\" height=\"100\" fill=\"white\"></rect>\n        <path\n          style=\"stroke-linejoin:round;stroke-miterlimit:10;fill-rule:evenodd;stroke-width:1.3004\"\n          d=\"m-10.69 15.912-1.2244 0.38102c-0.07756 0.02078-0.08938 0.04372-0.08938 0.13709l-7.81e-4 1.4146-1.1746 0.36557c-0.05866 0.01854-0.09608 0.07594-0.08938 0.13709l-2e-3 1.4025c-9.6e-5 0.04783 0.02628 0.09181 0.06854 0.11424l1.219 0.64982c0.04484 0.0259 0.0808 0.02977 0.13239 0l1.1982-0.63907 1.1968 0.63907c0.070707 0.04082 0.10676 0.01595 0.1344 0l1.2163-0.64982c0.042259-0.02243 0.06864-0.0664 0.068544-0.11424v-1.3823c-1.251e-4 -0.0029-3.492e-4 -0.0059-6.72e-4 -0.0088 0.012965-0.06474-0.025016-0.12882-0.088031-0.14851l-1.176-0.36621v-1.4065c0-0.0864-0.0025-0.11749-0.088704-0.14448l-1.2244-0.38102c-0.02557-0.0069-0.05523-0.0056-0.07591-4.3e-5zm0.03761 0.25608 0.79027 0.24662-0.79027 0.24864-0.79228-0.24864zm1.0947 0.45763v1.1303l-0.96028 0.41328-0.0046-1.2311zm-2.1894 0.0021 0.96566 0.31046-0.0046 1.2311-0.96028-0.41328zm-0.17002 1.4609 0.73718 0.26275-0.73718 0.22982-0.7896-0.24662zm2.5301 0 0.7896 0.24595-0.7896 0.24662-0.73785-0.2305zm-1.4354 0.45427v1.1303l-0.95961 0.51072-0.0053-1.3285zm2.5301 0v1.1303l-0.96028 0.51072-0.00537-1.3285zm-4.7194 0.0027 0.96566 0.30979-0.0053 1.3312-0.96028-0.5134zm2.5301 0 0.96499 0.30979-0.00537 1.3312-0.95961-0.5134z\"\n        ></path>\n      </mask>\n    </defs>\n    <g transform=\"translate(13.828 -15.047)\">\n      <path\n        style=\"fill:currentColor;fill-rule:nonzero;stroke-width:1;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none\"\n        mask=\"url(#podmask)\"\n        d=\"m -10.69,15.912 -1.2244,0.38102 c -0.07756,0.02078 -0.08938,0.04372 -0.08938,0.13709 l -7.81e-4,1.4146 -1.1746,0.36557 c -0.05866,0.01854 -0.09608,0.07594 -0.08938,0.13709 l -0.002,1.4025 c -9.6e-5,0.04783 0.02628,0.09181 0.06854,0.11424 l 1.219,0.64982 c 0.04484,0.0259 0.0808,0.02977 0.13239,0 l 1.1982,-0.63907 1.1968,0.63907 c 0.070707,0.04082 0.10676,0.01595 0.1344,0 l 1.2163,-0.64982 c 0.042259,-0.02243 0.06864,-0.0664 0.068544,-0.11424 v -1.3823 c -1.251e-4,-0.0029 -3.492e-4,-0.0059 -6.72e-4,-0.0088 0.012965,-0.06474 -0.025016,-0.12882 -0.088031,-0.14851 l -1.176,-0.36621 v -1.4065 c 0,-0.0864 -0.0025,-0.11749 -0.088704,-0.14448 l -1.2244,-0.38102 c -0.02557,-0.0069 -0.05523,-0.0056 -0.07591,-4.3e-5 z\"\n      ></path>\n    </g>\n  {:else}\n    <g transform=\"translate(13.828 -15.047)\">\n      <path\n        style=\"fill:currentColor;fill-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:10;stroke-width:1.3004\"\n        d=\"m-10.69 15.912-1.2244 0.38102c-0.07756 0.02078-0.08938 0.04372-0.08938 0.13709l-7.81e-4 1.4146-1.1746 0.36557c-0.05866 0.01854-0.09608 0.07594-0.08938 0.13709l-2e-3 1.4025c-9.6e-5 0.04783 0.02628 0.09181 0.06854 0.11424l1.219 0.64982c0.04484 0.0259 0.0808 0.02977 0.13239 0l1.1982-0.63907 1.1968 0.63907c0.070707 0.04082 0.10676 0.01595 0.1344 0l1.2163-0.64982c0.042259-0.02243 0.06864-0.0664 0.068544-0.11424v-1.3823c-1.251e-4 -0.0029-3.492e-4 -0.0059-6.72e-4 -0.0088 0.012965-0.06474-0.025016-0.12882-0.088031-0.14851l-1.176-0.36621v-1.4065c0-0.0864-0.0025-0.11749-0.088704-0.14448l-1.2244-0.38102c-0.02557-0.0069-0.05523-0.0056-0.07591-4.3e-5zm0.03761 0.25608 0.79027 0.24662-0.79027 0.24864-0.79228-0.24864zm1.0947 0.45763v1.1303l-0.96028 0.41328-0.0046-1.2311zm-2.1894 0.0021 0.96566 0.31046-0.0046 1.2311-0.96028-0.41328zm-0.17002 1.4609 0.73718 0.26275-0.73718 0.22982-0.7896-0.24662zm2.5301 0 0.7896 0.24595-0.7896 0.24662-0.73785-0.2305zm-1.4354 0.45427v1.1303l-0.95961 0.51072-0.0053-1.3285zm2.5301 0v1.1303l-0.96028 0.51072-0.00537-1.3285zm-4.7194 0.0027 0.96566 0.30979-0.0053 1.3312-0.96028-0.5134zm2.5301 0 0.96499 0.30979-0.00537 1.3312-0.95961-0.5134z\"\n      ></path>\n    </g>\n  {/if}\n</svg>\n"
  },
  {
    "path": "packages/frontend/src/lib/images/VSCodeIcon.svelte",
    "content": "<script lang=\"ts\">\nexport let size = '20';\n</script>\n\n<svg\n  width={size}\n  height={size}\n  class={$$props.class}\n  viewBox=\"0 0 100 100\"\n  fill=\"none\"\n  xmlns=\"http://www.w3.org/2000/svg\">\n  <mask id=\"mask0\" maskUnits=\"userSpaceOnUse\" x=\"0\" y=\"0\" width=\"100\" height=\"100\">\n    <path\n      fill-rule=\"evenodd\"\n      clip-rule=\"evenodd\"\n      d=\"M70.9119 99.3171C72.4869 99.9307 74.2828 99.8914 75.8725 99.1264L96.4608 89.2197C98.6242 88.1787 100 85.9892 100 83.5872V16.4133C100 14.0113 98.6243 11.8218 96.4609 10.7808L75.8725 0.873756C73.7862 -0.130129 71.3446 0.11576 69.5135 1.44695C69.252 1.63711 69.0028 1.84943 68.769 2.08341L29.3551 38.0415L12.1872 25.0096C10.589 23.7965 8.35363 23.8959 6.86933 25.2461L1.36303 30.2549C-0.452552 31.9064 -0.454633 34.7627 1.35853 36.417L16.2471 50.0001L1.35853 63.5832C-0.454633 65.2374 -0.452552 68.0938 1.36303 69.7453L6.86933 74.7541C8.35363 76.1043 10.589 76.2037 12.1872 74.9905L29.3551 61.9587L68.769 97.9167C69.3925 98.5406 70.1246 99.0104 70.9119 99.3171ZM75.0152 27.2989L45.1091 50.0001L75.0152 72.7012V27.2989Z\"\n      fill=\"white\"></path>\n  </mask>\n  <g mask=\"url(#mask0)\">\n    <path\n      d=\"M96.4614 10.7962L75.8569 0.875542C73.4719 -0.272773 70.6217 0.211611 68.75 2.08333L1.29858 63.5832C-0.515693 65.2373 -0.513607 68.0937 1.30308 69.7452L6.81272 74.754C8.29793 76.1042 10.5347 76.2036 12.1338 74.9905L93.3609 13.3699C96.086 11.3026 100 13.2462 100 16.6667V16.4275C100 14.0265 98.6246 11.8378 96.4614 10.7962Z\"\n      fill=\"#0065A9\"></path>\n    <g filter=\"url(#filter0_d)\">\n      <path\n        d=\"M96.4614 89.2038L75.8569 99.1245C73.4719 100.273 70.6217 99.7884 68.75 97.9167L1.29858 36.4169C-0.515693 34.7627 -0.513607 31.9063 1.30308 30.2548L6.81272 25.246C8.29793 23.8958 10.5347 23.7964 12.1338 25.0095L93.3609 86.6301C96.086 88.6974 100 86.7538 100 83.3334V83.5726C100 85.9735 98.6246 88.1622 96.4614 89.2038Z\"\n        fill=\"#007ACC\"></path>\n    </g>\n    <g filter=\"url(#filter1_d)\">\n      <path\n        d=\"M75.8578 99.1263C73.4721 100.274 70.6219 99.7885 68.75 97.9166C71.0564 100.223 75 98.5895 75 95.3278V4.67213C75 1.41039 71.0564 -0.223106 68.75 2.08329C70.6219 0.211402 73.4721 -0.273666 75.8578 0.873633L96.4587 10.7807C98.6234 11.8217 100 14.0112 100 16.4132V83.5871C100 85.9891 98.6234 88.1786 96.4586 89.2196L75.8578 99.1263Z\"\n        fill=\"#1F9CF0\"></path>\n    </g>\n    <g style=\"mix-blend-mode:overlay\" opacity=\"0.25\">\n      <path\n        fill-rule=\"evenodd\"\n        clip-rule=\"evenodd\"\n        d=\"M70.8511 99.3171C72.4261 99.9306 74.2221 99.8913 75.8117 99.1264L96.4 89.2197C98.5634 88.1787 99.9392 85.9892 99.9392 83.5871V16.4133C99.9392 14.0112 98.5635 11.8217 96.4001 10.7807L75.8117 0.873695C73.7255 -0.13019 71.2838 0.115699 69.4527 1.44688C69.1912 1.63705 68.942 1.84937 68.7082 2.08335L29.2943 38.0414L12.1264 25.0096C10.5283 23.7964 8.29285 23.8959 6.80855 25.246L1.30225 30.2548C-0.513334 31.9064 -0.515415 34.7627 1.29775 36.4169L16.1863 50L1.29775 63.5832C-0.515415 65.2374 -0.513334 68.0937 1.30225 69.7452L6.80855 74.754C8.29285 76.1042 10.5283 76.2036 12.1264 74.9905L29.2943 61.9586L68.7082 97.9167C69.3317 98.5405 70.0638 99.0104 70.8511 99.3171ZM74.9544 27.2989L45.0483 50L74.9544 72.7012V27.2989Z\"\n        fill=\"url(#paint0_linear)\"></path>\n    </g>\n  </g>\n  <defs>\n    <filter\n      id=\"filter0_d\"\n      x=\"-8.39411\"\n      y=\"15.8291\"\n      width=\"116.727\"\n      height=\"92.2456\"\n      filterUnits=\"userSpaceOnUse\"\n      color-interpolation-filters=\"sRGB\">\n      <feFlood flood-opacity=\"0\" result=\"BackgroundImageFix\"></feFlood>\n      <feColorMatrix in=\"SourceAlpha\" type=\"matrix\" values=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0\"></feColorMatrix>\n      <feOffset></feOffset>\n      <feGaussianBlur stdDeviation=\"4.16667\"></feGaussianBlur>\n      <feColorMatrix type=\"matrix\" values=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.25 0\"></feColorMatrix>\n      <feBlend mode=\"overlay\" in2=\"BackgroundImageFix\" result=\"effect1_dropShadow\"></feBlend>\n      <feBlend mode=\"normal\" in=\"SourceGraphic\" in2=\"effect1_dropShadow\" result=\"shape\"></feBlend>\n    </filter>\n    <filter\n      id=\"filter1_d\"\n      x=\"60.4167\"\n      y=\"-8.07558\"\n      width=\"47.9167\"\n      height=\"116.151\"\n      filterUnits=\"userSpaceOnUse\"\n      color-interpolation-filters=\"sRGB\">\n      <feFlood flood-opacity=\"0\" result=\"BackgroundImageFix\"></feFlood>\n      <feColorMatrix in=\"SourceAlpha\" type=\"matrix\" values=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0\"></feColorMatrix>\n      <feOffset></feOffset>\n      <feGaussianBlur stdDeviation=\"4.16667\"></feGaussianBlur>\n      <feColorMatrix type=\"matrix\" values=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.25 0\"></feColorMatrix>\n      <feBlend mode=\"overlay\" in2=\"BackgroundImageFix\" result=\"effect1_dropShadow\"></feBlend>\n      <feBlend mode=\"normal\" in=\"SourceGraphic\" in2=\"effect1_dropShadow\" result=\"shape\"></feBlend>\n    </filter>\n    <linearGradient\n      id=\"paint0_linear\"\n      x1=\"49.9392\"\n      y1=\"0.257812\"\n      x2=\"49.9392\"\n      y2=\"99.7423\"\n      gradientUnits=\"userSpaceOnUse\">\n      <stop stop-color=\"white\"></stop>\n      <stop offset=\"1\" stop-color=\"white\" stop-opacity=\"0\"></stop>\n    </linearGradient>\n  </defs>\n</svg>\n"
  },
  {
    "path": "packages/frontend/src/lib/instructlab/AboutInstructLabDiscoverCard.svelte",
    "content": "<script lang=\"ts\">\nimport { Button } from '@podman-desktop/ui-svelte';\nimport { studioClient } from '/@/utils/client';\n\ninterface Props {\n  title: string;\n  desc: string;\n  link: string;\n  image: string;\n}\n\nlet { title, desc, link, image }: Props = $props();\n\nasync function openLink(): Promise<void> {\n  await studioClient.openURL(link);\n}\n</script>\n\n<div\n  class=\"flex flex-col items-center justify-top rounded-md text-white relative bg-cover bg-center h-[300px]\"\n  style={`background-image: linear-gradient(to bottom, rgba(0, 0, 0, 0.7), rgba(0, 0, 0, 0) 80%), url(${image});`}>\n  <div class=\"text-lg font-bold mt-2 px-2\">{title}</div>\n  <div class=\"text-sm my-2 px-2\">{desc}</div>\n  <Button on:click={openLink}>Get started</Button>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/instructlab/AboutInstructLabExploreCard.svelte",
    "content": "<script lang=\"ts\">\nimport { Button } from '@podman-desktop/ui-svelte';\nimport { studioClient } from '/@/utils/client';\n\ninterface Props {\n  title?: string;\n  link: string;\n  image: string;\n  isVideo?: boolean;\n}\n\nlet { title, link, image, isVideo = false }: Props = $props();\n\nasync function openLink(): Promise<void> {\n  await studioClient.openURL(link);\n}\n</script>\n\n<div\n  class=\"flex flex-col flex-1 justify-between gap-4 m-2 bg-[var(--pd-content-card-carousel-card-bg)] hover:bg-[var(--pd-content-card-carousel-card-hover-bg)] rounded-md items-center\">\n  <img src={image} class=\"h-[200px] w-full object-cover rounded-md\" alt={`${title} image`} />\n  <div class=\"text-[var(--pd-content-card-carousel-card-header-text)] text-center font-semibold px-2\">{title}</div>\n  <Button on:click={openLink} class=\"self-center mb-2\">{isVideo ? 'Watch' : 'Read more'}</Button>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/markdown/LinkComponent.svelte",
    "content": "<script lang=\"ts\">\nimport { studioClient } from '/@/utils/client.js';\n\nexport let href: string = '';\nexport let title: string | undefined = undefined;\nexport let text: string = '';\n\nconst onClick = (): void => {\n  studioClient.openURL(href).catch(err => console.error(`Error opening link ${href}:`, err));\n};\n</script>\n\n<!-- href set to void operator to avoid any redirect -->\n<a href=\"javascript:void(0);\" class=\"break-all\" role=\"button\" title={title} on:click={onClick}>{text}</a>\n"
  },
  {
    "path": "packages/frontend/src/lib/markdown/MarkdownRenderer.svelte",
    "content": "<script lang=\"ts\">\nimport SvelteMarkdown from 'svelte-markdown';\nimport LinkComponent from '/@/lib/markdown/LinkComponent.svelte';\n\nexport let source: string | undefined;\n</script>\n\n<article class=\"prose min-w-full text-base\">\n  <SvelteMarkdown source={source ?? ''} renderers={{ link: LinkComponent }} />\n</article>\n"
  },
  {
    "path": "packages/frontend/src/lib/monaco-editor/MonacoEditor.svelte",
    "content": "<script lang=\"ts\">\nimport { onDestroy, onMount } from 'svelte';\nimport type * as Monaco from 'monaco-editor/esm/vs/editor/editor.api.js';\nimport './monaco';\nimport type { HTMLAttributes } from 'svelte/elements';\n\ninterface Props extends HTMLAttributes<HTMLElement> {\n  content: string;\n  // supported languages https://github.com/microsoft/monaco-editor/tree/main/src/basic-languages\n  language: string;\n  readOnly?: boolean;\n  onChange?: (content: string) => void;\n  noMinimap?: boolean;\n}\n\nlet {\n  content = $bindable(),\n  language,\n  readOnly = false,\n  onChange,\n  class: className,\n  noMinimap,\n  ...restProps\n}: Props = $props();\n\nlet editorInstance: Monaco.editor.IStandaloneCodeEditor;\nlet editorContainer: HTMLElement;\n\nfunction getTerminalBg(): string {\n  const app = document.getElementById('app');\n  if (!app) throw new Error('cannot found app element');\n  const style = window.getComputedStyle(app);\n\n  let color = style.getPropertyValue('--pd-terminal-background').trim();\n\n  // convert to 6 char RGB value since some things don't support 3 char format\n  if (color?.length < 6) {\n    color = color\n      .split('')\n      .map(c => {\n        return c === '#' ? c : c + c;\n      })\n      .join('');\n  }\n  return color;\n}\n\nonMount(async () => {\n  const terminalBg = getTerminalBg();\n  const isDarkTheme: boolean = terminalBg === '#000000';\n\n  // solution from https://github.com/vitejs/vite/discussions/1791#discussioncomment-9281911\n  import('monaco-editor/esm/vs/editor/editor.api.js')\n    .then(monaco => {\n      // define custom theme\n      monaco.editor.defineTheme('podmanDesktopTheme', {\n        base: isDarkTheme ? 'vs-dark' : 'vs',\n        inherit: true,\n        rules: [{ token: 'custom-color', background: terminalBg }],\n        colors: {\n          'editor.background': terminalBg,\n          // make the --vscode-focusBorder transparent\n          focusBorder: '#00000000',\n        },\n      });\n\n      editorInstance = monaco.editor.create(editorContainer, {\n        value: content,\n        language: language,\n        automaticLayout: true,\n        scrollBeyondLastLine: false,\n        readOnly: readOnly,\n        theme: 'podmanDesktopTheme',\n        glyphMargin: true, // Enable glyph margin\n        minimap: {\n          enabled: !noMinimap,\n        },\n      });\n\n      editorInstance.onDidChangeModelContent(() => {\n        content = editorInstance.getValue();\n        onChange?.(content);\n      });\n    })\n    .catch(console.error);\n});\n\nonDestroy(() => {\n  editorInstance?.dispose();\n});\n</script>\n\n<div class=\"h-full w-full {className}\" {...restProps} bind:this={editorContainer}></div>\n"
  },
  {
    "path": "packages/frontend/src/lib/monaco-editor/monaco.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport * as monaco from 'monaco-editor';\nimport editorWorker from 'monaco-editor/esm/vs/editor/editor.worker?worker';\n\nself.MonacoEnvironment = {\n  getWorker(_: unknown): Worker {\n    return new editorWorker();\n  },\n};\n\nmonaco.typescript?.typescriptDefaults?.setEagerModelSync(true);\n"
  },
  {
    "path": "packages/frontend/src/lib/notification/ContainerConnectionStatusInfo.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport ContainerConnectionStatusInfo from './ContainerConnectionStatusInfo.svelte';\nimport type { ContainerConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { studioClient } from '/@/utils/client';\nimport { filesize } from 'filesize';\nimport userEvent from '@testing-library/user-event';\n\nvi.mock('/@/utils/client', async () => {\n  return {\n    studioClient: {\n      navigateToResources: vi.fn(),\n      navigateToEditConnectionProvider: vi.fn(),\n    },\n  };\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(studioClient.navigateToResources).mockReturnValue(Promise.resolve());\n  vi.mocked(studioClient.navigateToEditConnectionProvider).mockReturnValue(Promise.resolve());\n});\n\ntest('should not show anything if there is no title or description', async () => {\n  const connectionInfo: ContainerConnectionInfo = {\n    name: 'Podman machine',\n    status: 'running',\n    canRedirect: true,\n  };\n  render(ContainerConnectionStatusInfo, { connectionInfo });\n\n  const banner = screen.queryByLabelText('Container connection info banner');\n  expect(banner).not.toBeInTheDocument();\n});\n\ntest('should show no running machine banner if there is no running machine', async () => {\n  const navigateMock = vi.spyOn(studioClient, 'navigateToResources');\n  const noMachineInfo: ContainerConnectionInfo = {\n    status: 'no-machine',\n    canRedirect: true,\n  };\n\n  render(ContainerConnectionStatusInfo, { connectionInfo: noMachineInfo });\n\n  const banner = screen.getByLabelText('Container connection info banner');\n  expect(banner).toBeInTheDocument();\n  const titleDiv = screen.getByLabelText('title');\n  expect(titleDiv).toBeInTheDocument();\n  expect(titleDiv.textContent).equals('No Podman Machine is running');\n  const descriptionDiv = screen.getByLabelText('description');\n  expect(descriptionDiv).toBeInTheDocument();\n  expect(descriptionDiv.textContent).equals('Please start a Podman Machine before proceeding further.');\n\n  const btnStart = screen.getByRole('button', { name: 'Start now' });\n  expect(btnStart).toBeInTheDocument();\n\n  await userEvent.click(btnStart);\n\n  expect(navigateMock).toBeCalled();\n});\n\ntest('should show no running machine banner if there is no running machine and no action if canRedirect is disabled', async () => {\n  const noMachineInfo: ContainerConnectionInfo = {\n    status: 'no-machine',\n    canRedirect: false,\n  };\n\n  render(ContainerConnectionStatusInfo, { connectionInfo: noMachineInfo });\n\n  const banner = screen.getByLabelText('Container connection info banner');\n  expect(banner).toBeInTheDocument();\n  const titleDiv = screen.getByLabelText('title');\n  expect(titleDiv).toBeInTheDocument();\n  expect(titleDiv.textContent).equals('No Podman Machine is running');\n  const descriptionDiv = screen.getByLabelText('description');\n  expect(descriptionDiv).toBeInTheDocument();\n  expect(descriptionDiv.textContent).equals('Please start a Podman Machine before proceeding further.');\n\n  const btnStart = screen.queryByRole('button', { name: 'Start now' });\n  expect(btnStart).not.toBeInTheDocument();\n});\n\ntest('should show lowResourcesMachine banner if the running machine has not enough resources and both canEdit and canRedirect are true', async () => {\n  const navigateMock = vi.spyOn(studioClient, 'navigateToEditConnectionProvider');\n  const connectionInfo: ContainerConnectionInfo = {\n    name: 'Podman Machine',\n    canEdit: true,\n    canRedirect: true,\n    cpus: 4,\n    cpusExpected: 10,\n    memoryExpected: 10,\n    memoryIdle: 5,\n    status: 'low-resources',\n  };\n\n  render(ContainerConnectionStatusInfo, { connectionInfo });\n\n  const banner = screen.getByLabelText('Container connection info banner');\n  expect(banner).toBeInTheDocument();\n  const titleDiv = screen.getByLabelText('title');\n  expect(titleDiv).toBeInTheDocument();\n  expect(titleDiv.textContent).equals('Update your Podman Machine to improve performance');\n  const descriptionDiv = screen.getByLabelText('description');\n  expect(descriptionDiv).toBeInTheDocument();\n  expect(descriptionDiv.textContent).equals(\n    `Your Podman Machine has ${connectionInfo.cpus} vCPUs and ${filesize(connectionInfo.memoryIdle, { base: 2 })} of memory available. We recommend updating your Podman Machine to at least ${connectionInfo.cpusExpected} vCPUs and ${filesize(connectionInfo.memoryExpected, { base: 2 })} of memory for better AI performance.`,\n  );\n\n  const btnUpdate = screen.getByRole('button', { name: 'Update now' });\n  expect(btnUpdate).toBeInTheDocument();\n\n  await userEvent.click(btnUpdate);\n\n  expect(navigateMock).toBeCalledWith('Podman Machine');\n});\n\ntest('should show lowResourcesMachine banner if the running machine has not enough cpus and both canEdit and canRedirect are true', async () => {\n  const navigateMock = vi.spyOn(studioClient, 'navigateToEditConnectionProvider');\n  const connectionInfo: ContainerConnectionInfo = {\n    name: 'Podman Machine',\n    canEdit: true,\n    canRedirect: true,\n    cpus: 4,\n    cpusExpected: 10,\n    memoryExpected: 4,\n    memoryIdle: 5,\n    status: 'low-resources',\n  };\n\n  render(ContainerConnectionStatusInfo, { connectionInfo });\n\n  const banner = screen.getByLabelText('Container connection info banner');\n  expect(banner).toBeInTheDocument();\n  const titleDiv = screen.getByLabelText('title');\n  expect(titleDiv).toBeInTheDocument();\n  expect(titleDiv.textContent).equals('Update your Podman Machine to improve performance');\n  const descriptionDiv = screen.getByLabelText('description');\n  expect(descriptionDiv).toBeInTheDocument();\n  expect(descriptionDiv.textContent).equals(\n    `Your Podman Machine has ${connectionInfo.cpus} vCPUs. We recommend updating your Podman Machine to at least ${connectionInfo.cpusExpected} vCPUs for better AI performance.`,\n  );\n\n  const btnUpdate = screen.getByRole('button', { name: 'Update now' });\n  expect(btnUpdate).toBeInTheDocument();\n\n  await userEvent.click(btnUpdate);\n\n  expect(navigateMock).toBeCalledWith('Podman Machine');\n});\n\ntest('should show lowResourcesMachine banner if the running machine has not enough memory and both canEdit and canRedirect are true', async () => {\n  const navigateMock = vi.spyOn(studioClient, 'navigateToEditConnectionProvider');\n  const connectionInfo: ContainerConnectionInfo = {\n    name: 'Podman Machine',\n    canEdit: true,\n    canRedirect: true,\n    cpus: 12,\n    cpusExpected: 10,\n    memoryExpected: 10,\n    memoryIdle: 5,\n    status: 'low-resources',\n  };\n\n  render(ContainerConnectionStatusInfo, { connectionInfo });\n\n  const banner = screen.getByLabelText('Container connection info banner');\n  expect(banner).toBeInTheDocument();\n  const titleDiv = screen.getByLabelText('title');\n  expect(titleDiv).toBeInTheDocument();\n  expect(titleDiv.textContent).equals('Update your Podman Machine to improve performance');\n  const descriptionDiv = screen.getByLabelText('description');\n  expect(descriptionDiv).toBeInTheDocument();\n  expect(descriptionDiv.textContent).equals(\n    `Your Podman Machine has ${filesize(connectionInfo.memoryIdle, { base: 2 })} of memory available. We recommend updating your Podman Machine to at least ${filesize(connectionInfo.memoryExpected, { base: 2 })} of memory for better AI performance.`,\n  );\n\n  const btnUpdate = screen.getByRole('button', { name: 'Update now' });\n  expect(btnUpdate).toBeInTheDocument();\n\n  await userEvent.click(btnUpdate);\n\n  expect(navigateMock).toBeCalledWith('Podman Machine');\n});\n\ntest('should show lowResourcesMachine banner without action if the running machine has not enough resources but canEdit is false', async () => {\n  const connectionInfo: ContainerConnectionInfo = {\n    name: 'Podman Machine',\n    canEdit: false,\n    canRedirect: true,\n    cpus: 4,\n    cpusExpected: 10,\n    memoryExpected: 10,\n    memoryIdle: 5,\n    status: 'low-resources',\n  };\n\n  render(ContainerConnectionStatusInfo, { connectionInfo });\n\n  const banner = screen.getByLabelText('Container connection info banner');\n  expect(banner).toBeInTheDocument();\n  const titleDiv = screen.getByLabelText('title');\n  expect(titleDiv).toBeInTheDocument();\n  expect(titleDiv.textContent).equals('Update your Podman Machine to improve performance');\n  const descriptionDiv = screen.getByLabelText('description');\n  expect(descriptionDiv).toBeInTheDocument();\n  expect(descriptionDiv.textContent).equals(\n    `Your Podman Machine has ${connectionInfo.cpus} vCPUs and ${filesize(connectionInfo.memoryIdle, { base: 2 })} of memory available. We recommend freeing some resources on your Podman Machine to have at least ${connectionInfo.cpusExpected} vCPUs and ${filesize(connectionInfo.memoryExpected, { base: 2 })} of memory for better AI performance.`,\n  );\n\n  const btnUpdate = screen.queryByRole('button', { name: 'Update now' });\n  expect(btnUpdate).not.toBeInTheDocument();\n});\n\ntest('should show lowResourcesMachine banner without action if the running machine has not enough resources but canRedirect is false', async () => {\n  const connectionInfo: ContainerConnectionInfo = {\n    name: 'Podman Machine',\n    canEdit: true,\n    canRedirect: false,\n    cpus: 4,\n    cpusExpected: 10,\n    memoryExpected: 10,\n    memoryIdle: 5,\n    status: 'low-resources',\n  };\n\n  render(ContainerConnectionStatusInfo, { connectionInfo });\n\n  const banner = screen.getByLabelText('Container connection info banner');\n  expect(banner).toBeInTheDocument();\n  const titleDiv = screen.getByLabelText('title');\n  expect(titleDiv).toBeInTheDocument();\n  expect(titleDiv.textContent).equals('Update your Podman Machine to improve performance');\n  const descriptionDiv = screen.getByLabelText('description');\n  expect(descriptionDiv).toBeInTheDocument();\n  expect(descriptionDiv.textContent).equals(\n    `Your Podman Machine has ${connectionInfo.cpus} vCPUs and ${filesize(connectionInfo.memoryIdle, { base: 2 })} of memory available. We recommend updating your Podman Machine to at least ${connectionInfo.cpusExpected} vCPUs and ${filesize(connectionInfo.memoryExpected, { base: 2 })} of memory for better AI performance.`,\n  );\n\n  const btnUpdate = screen.queryByRole('button', { name: 'Update now' });\n  expect(btnUpdate).not.toBeInTheDocument();\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/notification/ContainerConnectionStatusInfo.svelte",
    "content": "<script lang=\"ts\">\nimport Fa from 'svelte-fa';\nimport { faTriangleExclamation } from '@fortawesome/free-solid-svg-icons';\nimport { filesize } from 'filesize';\nimport { studioClient } from '/@/utils/client';\nimport type { ContainerConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { Button } from '@podman-desktop/ui-svelte';\n\nexport let connectionInfo: ContainerConnectionInfo;\n\nlet title: string | undefined = '';\nlet description: string | undefined = '';\nlet actionName: string | undefined = '';\n$: updateTitleDescription(connectionInfo);\n\nfunction updateTitleDescription(connectionInfo: ContainerConnectionInfo): void {\n  if (connectionInfo.status === 'native') {\n    return;\n  }\n\n  if (connectionInfo.status === 'no-machine') {\n    title = 'No Podman Machine is running';\n    description = 'Please start a Podman Machine before proceeding further.';\n    actionName = connectionInfo.canRedirect ? 'Start now' : undefined;\n    return;\n  }\n\n  if (connectionInfo.status === 'low-resources') {\n    title = 'Update your Podman Machine to improve performance';\n\n    const hasEnoughCPU = connectionInfo.cpus >= connectionInfo.cpusExpected;\n    const hasEnoughMemory = connectionInfo.memoryIdle > connectionInfo.memoryExpected;\n\n    let machineCurrentStateDescription = '';\n    let machinePreferredStateDescription = '';\n    if (!hasEnoughCPU) {\n      machineCurrentStateDescription += `${connectionInfo.cpus} vCPUs`;\n      machinePreferredStateDescription += `${connectionInfo.cpusExpected} vCPUs`;\n      if (!hasEnoughMemory) {\n        machineCurrentStateDescription += ` and ${filesize(connectionInfo.memoryIdle, { base: 2 })} of memory available`;\n        machinePreferredStateDescription += ` and ${filesize(connectionInfo.memoryExpected, { base: 2 })} of memory`;\n      }\n    } else {\n      machineCurrentStateDescription += `${filesize(connectionInfo.memoryIdle, { base: 2 })} of memory available`;\n      machinePreferredStateDescription += `${filesize(connectionInfo.memoryExpected, { base: 2 })} of memory`;\n    }\n\n    const machineName = `${connectionInfo.name.includes('Podman Machine') ? connectionInfo.name : `Podman Machine ${connectionInfo.name}`}`;\n    description = `Your ${machineName} has ${machineCurrentStateDescription}. `;\n\n    if (connectionInfo?.canEdit) {\n      description += `We recommend updating your Podman Machine to at least ${machinePreferredStateDescription} for better AI performance.`;\n      actionName = connectionInfo.canRedirect ? 'Update now' : undefined;\n    } else {\n      description += `We recommend freeing some resources on your Podman Machine to have at least ${machinePreferredStateDescription} for better AI performance.`;\n    }\n    return;\n  }\n\n  title = undefined;\n  description = undefined;\n  actionName = undefined;\n}\n\nfunction executeCommand(): void {\n  if (connectionInfo.canRedirect) {\n    if (connectionInfo.status === 'low-resources' && connectionInfo.canEdit) {\n      studioClient\n        .navigateToEditConnectionProvider(connectionInfo.name)\n        .catch(err => console.error(`Error navigating to connection ${connectionInfo.name}:`, err));\n      return;\n    }\n    if (connectionInfo.status == 'no-machine') {\n      studioClient.navigateToResources().catch(err => console.error(`Error navigating to resources:`, err));\n    }\n  }\n}\n</script>\n\n{#if title && description}\n  <div\n    class=\"w-full bg-[var(--pd-content-card-bg)] text-[var(--pd-content-card-text)] border-t-[3px] border-amber-500 p-4 mt-5 shadow-inner\"\n    aria-label=\"Container connection info banner\">\n    <div class=\"flex flex-row space-x-3\">\n      <div class=\"flex\">\n        <Fa icon={faTriangleExclamation} class=\"text-amber-400\" />\n      </div>\n      <div class=\"flex flex-col grow\">\n        <span class=\"font-medium\" aria-label=\"title\">{title}</span>\n        <span aria-label=\"description\">{description}</span>\n      </div>\n      {#if actionName}\n        <div class=\"flex items-center\">\n          <Button class=\"grow text-gray-500\" on:click={executeCommand} aria-label={actionName}>{actionName}</Button>\n        </div>\n      {/if}\n    </div>\n  </div>\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/notification/ContainerConnectionWrapper.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, expect, test, vi } from 'vitest';\n\nimport { render } from '@testing-library/svelte';\nimport { studioClient } from '../../utils/client';\nimport ContainerConnectionWrapper from '/@/lib/notification/ContainerConnectionWrapper.svelte';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { VMType } from '@shared/models/IPodman';\n\nvi.mock('../../utils/client', async () => ({\n  studioClient: {\n    checkContainerConnectionStatusAndResources: vi.fn(),\n    getExtensionConfiguration: vi.fn(),\n  },\n  rpcBrowser: {\n    subscribe: (): unknown => {\n      return {\n        unsubscribe: (): void => {},\n      };\n    },\n  },\n}));\n\nconst modelMock: ModelInfo = {\n  name: 'Dummy',\n  description: '',\n  properties: {},\n  id: 'dummy-model-id',\n};\n\nconst connection: ContainerProviderConnectionInfo = {\n  status: 'started',\n  name: 'Podman machine',\n  type: 'podman',\n  providerId: 'podman',\n  vmType: VMType.QEMU,\n};\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(studioClient.checkContainerConnectionStatusAndResources).mockResolvedValue({\n    name: 'Podman',\n    canRedirect: false,\n    status: 'running',\n  });\n  vi.mocked(studioClient.getExtensionConfiguration).mockResolvedValue({\n    experimentalGPU: false,\n    apiPort: 0,\n    experimentalTuning: false,\n    modelsPath: '',\n    inferenceRuntime: 'llama-cpp',\n    modelUploadDisabled: false,\n    showGPUPromotion: false,\n    appearance: 'dark',\n  });\n});\n\ntest('model without memory should not check for status', async () => {\n  render(ContainerConnectionWrapper, {\n    model: modelMock,\n    containerProviderConnection: connection,\n  });\n  expect(studioClient.checkContainerConnectionStatusAndResources).not.toHaveBeenCalled();\n});\n\ntest('model with memory should check for status', async () => {\n  const memoryModel = { ...modelMock, memory: 1024 };\n  render(ContainerConnectionWrapper, {\n    model: memoryModel,\n    containerProviderConnection: connection,\n  });\n  expect(studioClient.checkContainerConnectionStatusAndResources).toHaveBeenCalledWith({\n    connection: connection,\n    model: memoryModel,\n    context: 'inference',\n  });\n});\n\ntest('context should be propagated', async () => {\n  const memoryModel = { ...modelMock, memory: 1024 };\n  render(ContainerConnectionWrapper, {\n    model: memoryModel,\n    containerProviderConnection: connection,\n    checkContext: 'recipe',\n  });\n  expect(studioClient.checkContainerConnectionStatusAndResources).toHaveBeenCalledWith({\n    connection: connection,\n    model: memoryModel,\n    context: 'recipe',\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/notification/ContainerConnectionWrapper.svelte",
    "content": "<script lang=\"ts\">\nimport type { ContainerConnectionInfo, ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport type { ModelCheckerContext, ModelInfo } from '@shared/models/IModelInfo';\nimport ContainerConnectionStatusInfo from './ContainerConnectionStatusInfo.svelte';\nimport { studioClient } from '/@/utils/client';\nimport { configuration } from '/@/stores/extensionConfiguration';\nimport { fromStore } from 'svelte/store';\nimport GPUEnabledMachine from '/@/lib/notification/GPUEnabledMachine.svelte';\nimport { VMType } from '@shared/models/IPodman';\n\ninterface Props {\n  containerProviderConnection?: ContainerProviderConnectionInfo;\n  model?: ModelInfo;\n  checkContext?: ModelCheckerContext;\n}\n\nlet { containerProviderConnection, model, checkContext = 'inference' }: Props = $props();\n\nlet connectionInfo: ContainerConnectionInfo | undefined = $state();\nlet gpuWarningRequired: boolean = $derived(\n  !!(\n    containerProviderConnection &&\n    fromStore(configuration)?.current?.experimentalGPU &&\n    shouldRecommendGPU(containerProviderConnection)\n  ),\n);\n\nfunction shouldRecommendGPU(connection: ContainerProviderConnectionInfo): boolean {\n  return connection.vmType === VMType.APPLEHV || connection.vmType === VMType.APPLEHV_LABEL;\n}\n\nasync function checkContainerConnectionStatusAndResources(): Promise<void> {\n  try {\n    connectionInfo = await studioClient.checkContainerConnectionStatusAndResources({\n      model: model as ModelInfo & { memory: number },\n      context: checkContext,\n      connection: containerProviderConnection,\n    });\n  } catch (err: unknown) {\n    console.error(err);\n    connectionInfo = undefined;\n  }\n}\n\n$effect(() => {\n  if (typeof model?.memory === 'number' && containerProviderConnection) {\n    checkContainerConnectionStatusAndResources().catch(console.error);\n  } else {\n    connectionInfo = undefined;\n  }\n});\n</script>\n\n{#if gpuWarningRequired}\n  <GPUEnabledMachine />\n{/if}\n{#if connectionInfo}\n  <ContainerConnectionStatusInfo connectionInfo={connectionInfo} />\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/notification/GPUEnabledMachine.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport { studioClient } from '/@/utils/client';\nimport GPUEnabledMachine from '/@/lib/notification/GPUEnabledMachine.svelte';\n\nvi.mock('/@/utils/client', async () => {\n  return {\n    studioClient: {\n      navigateToResources: vi.fn(),\n    },\n  };\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(studioClient.navigateToResources).mockResolvedValue(undefined);\n});\n\ntest('should show navigation to resources', async () => {\n  render(GPUEnabledMachine);\n\n  const banner = screen.getByLabelText('GPU machine banner');\n  expect(banner).toBeInTheDocument();\n  const titleDiv = screen.getByLabelText('title');\n  expect(titleDiv).toBeInTheDocument();\n  expect(titleDiv.textContent).equals('Non GPU enabled machine');\n  const descriptionDiv = screen.getByLabelText('description');\n  expect(descriptionDiv).toBeInTheDocument();\n  expect(descriptionDiv.textContent).equals(\n    `The selected Podman machine is not GPU enabled. On MacOS, you can run GPU workloads using the krunkit\\n        environment. Do you want to create a GPU enabled machine ?`,\n  );\n\n  const btnUpdate = screen.queryByRole('button', { name: 'Create GPU enabled machine' });\n  expect(btnUpdate).toBeInTheDocument();\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/notification/GPUEnabledMachine.svelte",
    "content": "<script lang=\"ts\">\nimport Fa from 'svelte-fa';\nimport { faTriangleExclamation } from '@fortawesome/free-solid-svg-icons';\nimport { Button } from '@podman-desktop/ui-svelte';\nimport { studioClient } from '/@/utils/client';\n\nconst actionName = 'Create GPU enabled machine';\n\nfunction executeCommand(): void {\n  studioClient.navigateToResources().catch(err => console.error('Error navigating to resources', err));\n}\n</script>\n\n<div\n  class=\"w-full bg-[var(--pd-content-card-bg)] text-[var(--pd-content-card-text)] border-t-[3px] border-amber-500 p-4 mt-5 shadow-inner\"\n  aria-label=\"GPU machine banner\">\n  <div class=\"flex flex-row space-x-3\">\n    <div class=\"flex\">\n      <Fa icon={faTriangleExclamation} class=\"text-amber-400\" />\n    </div>\n    <div class=\"flex flex-col grow\">\n      <span class=\"font-medium\" aria-label=\"title\">Non GPU enabled machine</span>\n      <span aria-label=\"description\"\n        >The selected Podman machine is not GPU enabled. On MacOS, you can run GPU workloads using the krunkit\n        environment. Do you want to create a GPU enabled machine ?</span>\n    </div>\n    <div class=\"flex items-center\">\n      <Button class=\"grow text-gray-500\" on:click={executeCommand} aria-label={actionName}>{actionName}</Button>\n    </div>\n  </div>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/notification/GPUPromotion.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { render, screen, fireEvent } from '@testing-library/svelte';\nimport { studioClient } from '/@/utils/client';\nimport type { ExtensionConfiguration } from '@shared/models/IExtensionConfiguration';\nimport GPUPromotion from '/@/lib/notification/GPUPromotion.svelte';\nimport { type Writable, writable } from 'svelte/store';\nimport { configuration } from '/@/stores/extensionConfiguration';\n\nvi.mock('/@/utils/client', async () => {\n  return {\n    studioClient: {\n      updateExtensionConfiguration: vi.fn(),\n      telemetryLogUsage: vi.fn(),\n    },\n  };\n});\n\nvi.mock('../../stores/extensionConfiguration', () => ({\n  configuration: {\n    subscribe: vi.fn(),\n    unsubscribe: vi.fn(),\n  },\n}));\n\nconst mockConfiguration: Writable<ExtensionConfiguration> = writable({\n  experimentalGPU: false,\n  modelsPath: '',\n  apiPort: -1,\n  inferenceRuntime: 'llama-cpp',\n  modelUploadDisabled: false,\n  experimentalTuning: false,\n  showGPUPromotion: false,\n  appearance: 'dark',\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(studioClient.updateExtensionConfiguration).mockResolvedValue(undefined);\n  vi.mocked(studioClient.telemetryLogUsage).mockResolvedValue(undefined);\n  vi.mocked(configuration).subscribe.mockImplementation(run => mockConfiguration.subscribe(run));\n});\n\ntest('should show banner if gpu support if off and gpu promotion on', async () => {\n  mockConfiguration.set({\n    experimentalGPU: false,\n    showGPUPromotion: true,\n    modelUploadDisabled: false,\n    modelsPath: '',\n    inferenceRuntime: 'llama-cpp',\n    experimentalTuning: false,\n    apiPort: -1,\n    appearance: 'dark',\n  });\n  render(GPUPromotion);\n\n  const btnUpdate = screen.queryByRole('button', { name: 'Enable GPU support' });\n  expect(btnUpdate).toBeInTheDocument();\n\n  // eslint-disable-next-line quotes\n  const btnHide = screen.queryByRole('button', { name: \"Don't display anymore\" });\n  expect(btnHide).toBeInTheDocument();\n  expect(studioClient.telemetryLogUsage).toHaveBeenCalledWith('gpuPromotionBanner', { action: 'show' });\n});\n\ntest('should not show banner if gpu support if on and gpu promotion on', async () => {\n  mockConfiguration.set({\n    experimentalGPU: true,\n    showGPUPromotion: true,\n    modelUploadDisabled: false,\n    modelsPath: '',\n    inferenceRuntime: 'llama-cpp',\n    experimentalTuning: false,\n    apiPort: -1,\n    appearance: 'dark',\n  });\n  render(GPUPromotion);\n\n  const btnUpdate = screen.queryByRole('button', { name: 'Enable GPU support' });\n  expect(btnUpdate).not.toBeInTheDocument();\n\n  // eslint-disable-next-line quotes\n  const btnHide = screen.queryByRole('button', { name: \"Don't display anymore\" });\n  expect(btnHide).not.toBeInTheDocument();\n  expect(studioClient.telemetryLogUsage).not.toHaveBeenCalled();\n});\n\ntest('should not show banner if gpu support if off and gpu promotion off', async () => {\n  mockConfiguration.set({\n    experimentalGPU: false,\n    showGPUPromotion: false,\n    modelUploadDisabled: false,\n    modelsPath: '',\n    inferenceRuntime: 'llama-cpp',\n    experimentalTuning: false,\n    apiPort: -1,\n    appearance: 'dark',\n  });\n  render(GPUPromotion);\n\n  // eslint-disable-next-line quotes\n  const btnUpdate = screen.queryByRole('button', { name: 'Enable GPU support' });\n  expect(btnUpdate).not.toBeInTheDocument();\n\n  // eslint-disable-next-line quotes\n  const btnHide = screen.queryByRole('button', { name: \"Don't display anymore\" });\n  expect(btnHide).not.toBeInTheDocument();\n  expect(studioClient.telemetryLogUsage).not.toHaveBeenCalled();\n});\n\ntest('click enable should call client', async () => {\n  mockConfiguration.set({\n    experimentalGPU: false,\n    showGPUPromotion: true,\n    modelUploadDisabled: false,\n    modelsPath: '',\n    inferenceRuntime: 'llama-cpp',\n    experimentalTuning: false,\n    apiPort: -1,\n    appearance: 'dark',\n  });\n  render(GPUPromotion);\n\n  // eslint-disable-next-line quotes\n  const btnUpdate = screen.queryByRole('button', { name: 'Enable GPU support' });\n  expect(btnUpdate).toBeInTheDocument();\n\n  // eslint-disable-next-line quotes\n  const btnHide = screen.queryByRole('button', { name: \"Don't display anymore\" });\n  expect(btnHide).toBeInTheDocument();\n\n  await fireEvent.click(btnUpdate!);\n\n  expect(studioClient.updateExtensionConfiguration).toHaveBeenCalledWith({ experimentalGPU: true });\n  expect(studioClient.telemetryLogUsage).toHaveBeenNthCalledWith(1, 'gpuPromotionBanner', { action: 'show' });\n  expect(studioClient.telemetryLogUsage).toHaveBeenNthCalledWith(2, 'gpuPromotionBanner', { action: 'enable' });\n});\n\ntest('click hide should call client', async () => {\n  mockConfiguration.set({\n    experimentalGPU: false,\n    showGPUPromotion: true,\n    modelUploadDisabled: false,\n    modelsPath: '',\n    inferenceRuntime: 'llama-cpp',\n    experimentalTuning: false,\n    apiPort: -1,\n    appearance: 'dark',\n  });\n  render(GPUPromotion);\n\n  // eslint-disable-next-line quotes\n  const btnUpdate = screen.queryByRole('button', { name: 'Enable GPU support' });\n  expect(btnUpdate).toBeInTheDocument();\n\n  // eslint-disable-next-line quotes\n  const btnHide = screen.queryByRole('button', { name: \"Don't display anymore\" });\n  expect(btnHide).toBeInTheDocument();\n\n  await fireEvent.click(btnHide!);\n\n  expect(studioClient.updateExtensionConfiguration).toHaveBeenCalledWith({ showGPUPromotion: false });\n  expect(studioClient.telemetryLogUsage).toHaveBeenNthCalledWith(1, 'gpuPromotionBanner', { action: 'show' });\n  expect(studioClient.telemetryLogUsage).toHaveBeenNthCalledWith(2, 'gpuPromotionBanner', { action: 'hide' });\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/notification/GPUPromotion.svelte",
    "content": "<script lang=\"ts\">\nimport { Button } from '@podman-desktop/ui-svelte';\nimport { studioClient } from '/@/utils/client';\nimport { configuration } from '/@/stores/extensionConfiguration';\nimport MarkdownRenderer from '/@/lib/markdown/MarkdownRenderer.svelte';\nimport { onMount } from 'svelte';\n\n// eslint-disable-next-line quotes\nconst actionName = \"Don't display anymore\";\n\nfunction hideGPUPromotionBanner(): void {\n  studioClient.telemetryLogUsage('gpuPromotionBanner', { action: 'hide' }).catch(console.error);\n  studioClient.updateExtensionConfiguration({ showGPUPromotion: false }).catch(console.error);\n}\n\nfunction enableGPUSupport(): void {\n  studioClient.telemetryLogUsage('gpuPromotionBanner', { action: 'enable' }).catch(console.error);\n  studioClient.updateExtensionConfiguration({ experimentalGPU: true }).catch(console.error);\n}\n\nonMount(() => {\n  if ($configuration?.showGPUPromotion && !$configuration?.experimentalGPU) {\n    studioClient.telemetryLogUsage('gpuPromotionBanner', { action: 'show' }).catch(console.error);\n  }\n});\n\nconst content =\n  'GPU support is not enabled. Podman AI Lab supports GPU on Windows and MacOS.\\n\\nThis greatly enhances the developer experience when running inference servers. We recommend you to enable it.';\n</script>\n\n{#if $configuration?.showGPUPromotion && !$configuration?.experimentalGPU}\n  <div\n    class=\"w-full bg-[var(--pd-content-card-bg)] text-[var(--pd-content-card-text)] border-t-[3px] border-amber-500 p-4 mt-5 shadow-inner\"\n    aria-label=\"GPU promotion banner\">\n    <div class=\"flex flex-col space-x-3\">\n      <span class=\"font-normal items-center\" aria-label=\"title\"\n        >⚡️ Supercharge Your AI: Enable GPU Acceleration and watch your LLM respond in a flash!\n      </span>\n      <div class=\"flex flex-row\">\n        <div class=\"grow\">\n          <MarkdownRenderer source={content} />\n        </div>\n        <div class=\"flex flex-col space-y-1\">\n          <Button class=\"w-auto ml-1\" on:click={enableGPUSupport} aria-label=\"Enable GPU support\"\n            >Enable GPU support</Button>\n          <Button class=\"w-auto ml-1\" on:click={hideGPUPromotionBanner} aria-label={actionName}>{actionName}</Button>\n        </div>\n      </div>\n    </div>\n  </div>\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/progress/TaskItem.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { test, expect, beforeEach, vi } from 'vitest';\nimport { render, screen, fireEvent } from '@testing-library/svelte';\nimport TaskItem from '/@/lib/progress/TaskItem.svelte';\nimport { studioClient } from '/@/utils/client';\n\nvi.mock('../../utils/client', async () => {\n  return {\n    studioClient: {\n      requestCancelToken: vi.fn(),\n    },\n  };\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(studioClient.requestCancelToken).mockResolvedValue(undefined);\n});\n\ntest('Task item should no show cancel button if no cancellation token provided', async () => {\n  // render the component\n  render(TaskItem, {\n    task: {\n      name: 'dummyName',\n      state: 'loading',\n      id: 'dummyId',\n    },\n  });\n\n  const cancelBtn = screen.queryByTitle('Cancel');\n  expect(cancelBtn).toBeNull();\n});\n\ntest('Task item should no show cancel button if state not loading', async () => {\n  // render the component\n  render(TaskItem, {\n    task: {\n      name: 'dummyName',\n      state: 'success',\n      id: 'dummyId',\n      cancellationToken: 1,\n    },\n  });\n\n  const cancelBtn = screen.queryByTitle('Cancel');\n  expect(cancelBtn).toBeNull();\n});\n\ntest('Task item should show cancel button if state loading and cancellation token provided', async () => {\n  // render the component\n  render(TaskItem, {\n    task: {\n      name: 'dummyName',\n      state: 'loading',\n      id: 'dummyId',\n      cancellationToken: 1,\n    },\n  });\n\n  const cancelBtn = screen.getByTitle('Cancel');\n  expect(cancelBtn).toBeDefined();\n\n  await fireEvent.click(cancelBtn);\n\n  expect(studioClient.requestCancelToken).toHaveBeenCalledWith(1);\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/progress/TaskItem.svelte",
    "content": "<script lang=\"ts\">\nimport type { Task } from '@shared/models/ITask';\nimport Fa from 'svelte-fa';\nimport { faClose } from '@fortawesome/free-solid-svg-icons';\nimport { studioClient } from '/@/utils/client';\n\nexport let task: Task;\n\nconst cancel = (): void => {\n  if (task.cancellationToken !== undefined) {\n    studioClient.requestCancelToken(task.cancellationToken).catch((err: unknown) => {\n      console.error('Something went wrong while trying to cancel token', err);\n    });\n  }\n};\n</script>\n\n<div class=\"flex flex-row items-center\">\n  <div class=\"min-w-4 mr-2 flex items-center justify-center\">\n    {#if task.state === 'success'}\n      <svg\n        role=\"img\"\n        class=\"w-4 h-4 text-green-500 shrink-0\"\n        xmlns=\"http://www.w3.org/2000/svg\"\n        fill=\"currentColor\"\n        viewBox=\"0 0 20 20\">\n        <path\n          d=\"M10 .5a9.5 9.5 0 1 0 9.5 9.5A9.51 9.51 0 0 0 10 .5Zm3.707 8.207-4 4a1 1 0 0 1-1.414 0l-2-2a1 1 0 0 1 1.414-1.414L9 10.586l3.293-3.293a1 1 0 0 1 1.414 1.414Z\"\n        ></path>\n      </svg>\n    {:else if task.state === 'loading'}\n      <svg\n        role=\"img\"\n        class=\"w-4 h-4 text-[var(--pd-content-card-text)] animate-spin fill-purple-500\"\n        viewBox=\"0 0 100 101\"\n        fill=\"none\"\n        xmlns=\"http://www.w3.org/2000/svg\">\n        <path\n          d=\"M100 50.5908C100 78.2051 77.6142 100.591 50 100.591C22.3858 100.591 0 78.2051 0 50.5908C0 22.9766 22.3858 0.59082 50 0.59082C77.6142 0.59082 100 22.9766 100 50.5908ZM9.08144 50.5908C9.08144 73.1895 27.4013 91.5094 50 91.5094C72.5987 91.5094 90.9186 73.1895 90.9186 50.5908C90.9186 27.9921 72.5987 9.67226 50 9.67226C27.4013 9.67226 9.08144 27.9921 9.08144 50.5908Z\"\n          fill=\"currentColor\"></path>\n        <path\n          d=\"M93.9676 39.0409C96.393 38.4038 97.8624 35.9116 97.0079 33.5539C95.2932 28.8227 92.871 24.3692 89.8167 20.348C85.8452 15.1192 80.8826 10.7238 75.2124 7.41289C69.5422 4.10194 63.2754 1.94025 56.7698 1.05124C51.7666 0.367541 46.6976 0.446843 41.7345 1.27873C39.2613 1.69328 37.813 4.19778 38.4501 6.62326C39.0873 9.04874 41.5694 10.4717 44.0505 10.1071C47.8511 9.54855 51.7191 9.52689 55.5402 10.0491C60.8642 10.7766 65.9928 12.5457 70.6331 15.2552C75.2735 17.9648 79.3347 21.5619 82.5849 25.841C84.9175 28.9121 86.7997 32.2913 88.1811 35.8758C89.083 38.2158 91.5421 39.6781 93.9676 39.0409Z\"\n          fill=\"currentFill\"></path>\n      </svg>\n    {:else}\n      <svg\n        role=\"img\"\n        class=\"shrink-0 inline w-4 h-4 text-red-600 fe\"\n        xmlns=\"http://www.w3.org/2000/svg\"\n        fill=\"currentColor\"\n        viewBox=\"0 0 20 20\">\n        <path\n          d=\"M10 .5a9.5 9.5 0 1 0 9.5 9.5A9.51 9.51 0 0 0 10 .5ZM9.5 4a1.5 1.5 0 1 1 0 3 1.5 1.5 0 0 1 0-3ZM12 15H8a1 1 0 0 1 0-2h1v-3H8a1 1 0 0 1 0-2h2a1 1 0 0 1 1 1v4h1a1 1 0 0 1 0 2Z\"\n        ></path>\n      </svg>\n    {/if}\n  </div>\n  <span>\n    {task.name}\n    {#if task.progress}({Math.floor(task.progress)}%){/if}\n  </span>\n  <div class=\"flex grow justify-end\">\n    {#if task.state === 'loading' && task.cancellationToken !== undefined}\n      <button on:click={cancel} title=\"Cancel\"><Fa icon={faClose} /></button>\n    {/if}\n  </div>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/progress/TasksBanner.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { vi, test, expect } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport TasksBanner from '/@/lib/progress/TasksBanner.svelte';\n\nvi.mock('../../utils/client', async () => {\n  return {\n    studioClient: {},\n  };\n});\n\nconst mocks = vi.hoisted(() => {\n  return {\n    // tasks store\n    getTasksMock: vi.fn(),\n  };\n});\n\nvi.mock('../../stores/tasks', async () => {\n  return {\n    tasks: {\n      subscribe: (f: (msg: unknown) => void) => {\n        f(mocks.getTasksMock());\n        return (): void => {};\n      },\n    },\n  };\n});\n\ntest('expect all tasks to be displayed', () => {\n  mocks.getTasksMock.mockReturnValue([\n    {\n      state: 'loading',\n      labels: {},\n      name: 'Task-1',\n      id: 'task-1',\n    },\n    {\n      state: 'loading',\n      labels: {},\n      name: 'Task-2',\n      id: 'task-2',\n    },\n  ]);\n\n  render(TasksBanner, { labels: {}, title: 'Tasks list' });\n\n  expect(screen.getByText('Task-1')).toBeDefined();\n  expect(screen.getByText('Task-2')).toBeDefined();\n});\n\ntest('expect loading tasks to be displayed', () => {\n  mocks.getTasksMock.mockReturnValue([\n    {\n      state: 'loading',\n      labels: {},\n      name: 'Task-1',\n      id: 'task-1',\n    },\n    {\n      state: 'success',\n      labels: {},\n      name: 'Task-2',\n      id: 'task-2',\n    },\n  ]);\n\n  render(TasksBanner, { labels: {}, title: 'Tasks list' });\n\n  expect(screen.getByText('Task-1')).toBeDefined();\n  expect(screen.queryByText('Task-2')).toBeNull();\n});\n\ntest('expect tasks with specified labels to be displayed', () => {\n  mocks.getTasksMock.mockReturnValue([\n    {\n      state: 'loading',\n      labels: {\n        hello: 'world',\n      },\n      name: 'Task-1',\n      id: 'task-1',\n    },\n    {\n      state: 'loading',\n      labels: {},\n      name: 'Task-2',\n      id: 'task-2',\n    },\n  ]);\n\n  render(TasksBanner, { labels: { hello: undefined }, title: 'Tasks list' });\n\n  expect(screen.getByText('Task-1')).toBeDefined();\n  expect(screen.queryByText('Task-2')).toBeNull();\n});\n\ntest('expect tasks with specified pair label/value to be displayed', () => {\n  mocks.getTasksMock.mockReturnValue([\n    {\n      state: 'loading',\n      labels: {\n        hello: 'saturn',\n      },\n      name: 'Task-1',\n      id: 'task-1',\n    },\n    {\n      state: 'loading',\n      labels: {\n        hello: 'world',\n      },\n      name: 'Task-2',\n      id: 'task-2',\n    },\n  ]);\n\n  render(TasksBanner, { labels: { hello: 'world' }, title: 'Tasks list' });\n\n  expect(screen.queryByText('Task-1')).toBeNull();\n  expect(screen.getByText('Task-2')).toBeDefined();\n});\n\ntest('expect tasks with specified pairs labels/values to be displayed', () => {\n  mocks.getTasksMock.mockReturnValue([\n    {\n      state: 'loading',\n      labels: {\n        hello: 'saturn',\n        dummy: 'hello',\n      },\n      name: 'Task-1',\n      id: 'task-1',\n    },\n    {\n      state: 'loading',\n      labels: {\n        hello: 'saturn',\n      },\n      name: 'Task-2',\n      id: 'task-2',\n    },\n  ]);\n\n  render(TasksBanner, { labels: { hello: 'saturn', dummy: 'hello' }, title: 'Tasks list' });\n\n  expect(screen.getByText('Task-1')).toBeDefined();\n  expect(screen.queryByText('Task-2')).toBeNull();\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/progress/TasksBanner.svelte",
    "content": "<script lang=\"ts\">\nimport { tasks } from '/@/stores/tasks';\nimport type { Task } from '@shared/models/ITask';\nimport Card from '/@/lib/Card.svelte';\nimport TasksProgress from '/@/lib/progress/TasksProgress.svelte';\nimport { onMount } from 'svelte';\n\n/**\n * labels that should be matching on tasks\n * @example\n * labels: { 'hello': undefined }\n * will match any tasks with the `hello` label, regardless of the value of the labels\n *\n * @example\n * labels: { 'hello': 'world' }\n * will match tasks with the `hello` label, and the `world` value\n */\nexport let labels: Record<string, string | undefined> = {};\nexport let title: string;\n\nlet loadingTasks: Task[] = [];\n\nonMount(() => {\n  const entries: [string, string | undefined][] = Object.entries(labels);\n  return tasks.subscribe(items => {\n    loadingTasks = items.reduce((output, task) => {\n      // only display failed / loading tasks\n      if (task.state === 'success') return output;\n\n      const taskLabels = task.labels ?? {};\n      for (const [key, value] of entries) {\n        // ensure the label requested is in the task labels\n        if (!(key in taskLabels)) return output;\n\n        // if we defined a value for the label, remove any value not matching\n        if (value && taskLabels[key] !== value) return output;\n      }\n\n      output.push(task);\n\n      return output;\n    }, [] as Task[]);\n  });\n});\n</script>\n\n{#if loadingTasks.length > 0}\n  <Card classes=\"bg-[var(--pd-content-card-bg)] mt-4 mx-5\">\n    <div slot=\"content\" class=\"font-normal p-2 w-full\">\n      <div class=\"mb-2 text-[var(--pd-content-card-title)]\">{title}</div>\n      <TasksProgress tasks={loadingTasks} />\n    </div>\n  </Card>\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/progress/TasksProgress.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { test, expect, describe, vi } from 'vitest';\nimport { render, screen, fireEvent, waitFor } from '@testing-library/svelte';\nimport TasksProgress from '/@/lib/progress/TasksProgress.svelte';\n\nvi.mock('../../utils/client', async () => {\n  return {\n    studioClient: {\n      requestCancelToken: vi.fn(),\n    },\n  };\n});\n\ntest('TasksProgress should not renderer any tasks', async () => {\n  // render the component\n  render(TasksProgress, { tasks: [] });\n\n  const items = screen.queryAllByRole('listitem');\n  expect(items).toBeDefined();\n  expect(items.length).toBe(0);\n});\n\ntest('TasksProgress should renderer one tasks', async () => {\n  // render the component\n  render(TasksProgress, {\n    tasks: [\n      {\n        id: 'random',\n        state: 'success',\n        name: 'random',\n      },\n    ],\n  });\n\n  const items = screen.queryAllByRole('listitem');\n  expect(items).toBeDefined();\n  expect(items.length).toBe(1);\n});\n\ntest('TasksProgress should renderer multiple tasks', async () => {\n  // render the component\n  render(TasksProgress, {\n    tasks: [\n      {\n        id: 'random',\n        state: 'success',\n        name: 'random',\n      },\n      {\n        id: 'random-2',\n        state: 'error',\n        name: 'random',\n      },\n    ],\n  });\n\n  const items = screen.queryAllByRole('listitem');\n  expect(items).toBeDefined();\n  expect(items.length).toBe(2);\n});\n\ndescribe('tasks types', () => {\n  test('TasksProgress should renderer success task', async () => {\n    // render the component\n    render(TasksProgress, {\n      tasks: [\n        {\n          id: 'random',\n          state: 'success',\n          name: 'random',\n        },\n      ],\n    });\n\n    const item = screen.getByRole('img');\n    expect(item).toHaveClass('text-green-500');\n    expect(item).not.toHaveClass('animate-spin');\n  });\n\n  test('TasksProgress should renderer loading task', async () => {\n    // render the component\n    render(TasksProgress, {\n      tasks: [\n        {\n          id: 'random',\n          state: 'loading',\n          name: 'random',\n        },\n      ],\n    });\n\n    const item = screen.getByRole('img');\n    expect(item).toHaveClass('fill-purple-500');\n    expect(item).toHaveClass('animate-spin');\n  });\n\n  test('TasksProgress should renderer error task', async () => {\n    // render the component\n    render(TasksProgress, {\n      tasks: [\n        {\n          id: 'random',\n          state: 'error',\n          name: 'random',\n        },\n      ],\n    });\n\n    const item = screen.getByRole('img');\n    expect(item).toHaveClass('text-red-600');\n    expect(item).not.toHaveClass('animate-spin');\n  });\n});\n\ndescribe('error expandable', () => {\n  test('TasksProgress should renderer one error task without expandable error message', async () => {\n    // render the component\n    render(TasksProgress, {\n      tasks: [\n        {\n          id: 'random',\n          state: 'error',\n          name: 'random',\n        },\n      ],\n    });\n\n    const message = screen.queryByText('View Error');\n    expect(message).toBeNull();\n  });\n\n  test('TasksProgress should renderer one error task without showing error message', async () => {\n    // render the component\n    render(TasksProgress, {\n      tasks: [\n        {\n          id: 'random',\n          state: 'error',\n          name: 'random',\n          error: 'message about error.',\n        },\n      ],\n    });\n\n    const message = screen.queryByText('View Error');\n    expect(message).toBeDefined();\n    const note = screen.getByRole('note');\n    expect(note).toHaveClass('hidden');\n  });\n\n  test('TasksProgress should renderer one error task and show error message on click', async () => {\n    // render the component\n    render(TasksProgress, {\n      tasks: [\n        {\n          id: 'random',\n          state: 'error',\n          name: 'random',\n          error: 'message about error.',\n        },\n      ],\n    });\n\n    const message = screen.getByText('View Error');\n    await fireEvent.click(message);\n\n    const note = screen.getByRole('note');\n    await waitFor(() => {\n      expect(note).not.toHaveClass('hidden');\n    });\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/progress/TasksProgress.svelte",
    "content": "<script lang=\"ts\">\nimport type { Task } from '@shared/models/ITask';\nimport ExpandableMessage from '/@/lib/ExpandableMessage.svelte';\nimport TaskItem from './TaskItem.svelte';\n\nexport let tasks: Task[] = [];\n</script>\n\n<ul class=\"space-y-2 text-[var(--pd-content-card-text)] list-inside\">\n  {#each tasks as task (task.id)}\n    <li class=\"flex flex-col rounded-md bg-[var(--pd-content-card-bg)] p-2\">\n      <TaskItem task={task} />\n      <ExpandableMessage message={task.error} title=\"View Error\" />\n    </li>\n  {/each}\n</ul>\n"
  },
  {
    "path": "packages/frontend/src/lib/progress/TrackedTasks.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { test, expect, vi } from 'vitest';\nimport { render } from '@testing-library/svelte';\nimport TrackedTasks from '/@/lib/progress/TrackedTasks.svelte';\n\nvi.mock('../../utils/client', () => ({\n  studioClient: {\n    requestCancelToken: vi.fn(),\n  },\n}));\n\ntest('empty task should not have any content', async () => {\n  const { queryByRole } = render(TrackedTasks, {\n    tasks: [],\n  });\n\n  const status = queryByRole('status');\n  expect(status).toBeNull();\n});\n\ntest('task without matching trackingId should not have any content', async () => {\n  const { queryByRole } = render(TrackedTasks, {\n    tasks: [\n      {\n        id: 'dummy-id',\n        name: 'Hello World',\n        state: 'loading',\n        labels: {\n          trackingId: 'dummyTrackingId',\n        },\n      },\n    ],\n    trackingId: 'notMatching',\n  });\n\n  const status = queryByRole('status');\n  expect(status).toBeNull();\n});\n\ntest('task with matching trackingId should be visible', () => {\n  const { getByRole } = render(TrackedTasks, {\n    tasks: [\n      {\n        id: 'dummy-id',\n        name: 'Hello World',\n        state: 'loading',\n        labels: {\n          trackingId: 'dummyTrackingId',\n        },\n      },\n    ],\n    trackingId: 'dummyTrackingId',\n  });\n\n  const status = getByRole('status');\n  expect(status).toBeInTheDocument();\n});\n\ntest('onChange should provide task with matching trackingId', () => {\n  const onChangeMock = vi.fn();\n  render(TrackedTasks, {\n    tasks: [\n      {\n        id: 'dummy-id',\n        name: 'Hello World',\n        state: 'loading',\n        labels: {\n          trackingId: 'dummyTrackingId',\n        },\n      },\n      {\n        id: 'dummy-id-2',\n        name: 'Hello World 2',\n        state: 'loading',\n      },\n    ],\n    trackingId: 'dummyTrackingId',\n    onChange: onChangeMock,\n  });\n\n  expect(onChangeMock).toHaveBeenCalledWith([\n    {\n      id: 'dummy-id',\n      name: 'Hello World',\n      state: 'loading',\n      labels: {\n        trackingId: 'dummyTrackingId',\n      },\n    },\n  ]);\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/progress/TrackedTasks.svelte",
    "content": "<script lang=\"ts\">\nimport type { Task } from '@shared/models/ITask';\nimport { filterByLabel } from '/@/utils/taskUtils';\nimport TasksProgress from '/@/lib/progress/TasksProgress.svelte';\n\ninterface Props {\n  trackingId?: string;\n  tasks: Task[];\n  class?: string;\n  onChange?: (tasks: Task[]) => void;\n}\nlet { trackingId, tasks, onChange, class: classes }: Props = $props();\n\nlet trackedTasks: Task[] = $derived.by(() => {\n  if (trackingId === undefined) {\n    return [];\n  }\n\n  return filterByLabel(tasks, {\n    trackingId: trackingId,\n  });\n});\n\n$effect(() => {\n  onChange?.($state.snapshot(trackedTasks));\n});\n</script>\n\n{#if trackedTasks.length > 0}\n  <div role=\"status\" class={classes}>\n    <TasksProgress tasks={trackedTasks} />\n  </div>\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/select/ContainerProviderConnectionSelect.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { fireEvent, render, within } from '@testing-library/svelte';\nimport ContainerProviderConnectionSelect from '/@/lib/select/ContainerProviderConnectionSelect.svelte';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { VMType } from '@shared/models/IPodman';\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  // mock scrollIntoView\n  window.HTMLElement.prototype.scrollIntoView = vi.fn();\n});\n\nconst wslConnection: ContainerProviderConnectionInfo = {\n  name: 'Machine 1',\n  type: 'podman',\n  status: 'started',\n  providerId: 'podman',\n  vmType: VMType.WSL,\n};\n\nconst qemuConnection: ContainerProviderConnectionInfo = {\n  name: 'Machine 2',\n  type: 'podman',\n  status: 'started',\n  providerId: 'podman',\n  vmType: VMType.QEMU,\n};\n\ntest('Should list all container provider connections', async () => {\n  const { container } = render(ContainerProviderConnectionSelect, {\n    value: undefined,\n    containerProviderConnections: [wslConnection, qemuConnection],\n  });\n\n  // first get the select input\n  const input = within(container).getByLabelText('Select Container Engine');\n  await fireEvent.pointerUp(input); // they are using the pointer up event instead of click.\n\n  // get all options available\n  const items: NodeListOf<HTMLElement> = container.querySelectorAll('div[class~=\"list-item\"]');\n  // ensure we have two options\n  expect(items.length).toBe(2);\n  expect(items[0]).toHaveTextContent(wslConnection.name);\n  expect(items[1]).toHaveTextContent(qemuConnection.name);\n});\n\ntest('default value should be visible', async () => {\n  const { container } = render(ContainerProviderConnectionSelect, {\n    value: qemuConnection,\n    containerProviderConnections: [wslConnection, qemuConnection],\n  });\n\n  // first get the select input\n  const select = within(container).getByText(qemuConnection.name);\n  expect(select).toBeDefined();\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/select/ContainerProviderConnectionSelect.svelte",
    "content": "<script lang=\"ts\">\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport Select from '/@/lib/select/Select.svelte';\n\nexport let disabled: boolean = false;\nimport { VMType } from '@shared/models/IPodman';\n\n/**\n * Current value selected\n */\nexport let value: ContainerProviderConnectionInfo | undefined = undefined;\nexport let containerProviderConnections: ContainerProviderConnectionInfo[] = [];\n/**\n * Handy mechanism to provide the mandatory property `label` and `value` to the Select component\n */\nlet selected: (ContainerProviderConnectionInfo & { label: string; value: string }) | undefined = undefined;\n$: {\n  // let's select a default model\n  if (value) {\n    selected = { ...value, label: value.name, value: value.name };\n  }\n}\n\nfunction handleOnChange(nValue: ContainerProviderConnectionInfo | undefined): void {\n  value = nValue;\n}\n</script>\n\n<Select\n  label=\"Select Container Engine\"\n  name=\"select-container-engine\"\n  disabled={disabled}\n  value={selected}\n  onchange={handleOnChange}\n  placeholder=\"Select container provider to use\"\n  items={containerProviderConnections.map(containerProviderConnection => ({\n    ...containerProviderConnection,\n    value: containerProviderConnection.name,\n    label: containerProviderConnection.name,\n  }))}>\n  <div slot=\"item\" let:item>\n    <div class=\"flex items-center\">\n      <div class=\"grow\">\n        <span>{item.name}</span>\n      </div>\n\n      {#if item.vmType !== VMType.UNKNOWN}\n        <div>({item.vmType})</div>\n      {/if}\n    </div>\n  </div>\n</Select>\n"
  },
  {
    "path": "packages/frontend/src/lib/select/InferenceRuntimeSelect.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, vi, test, expect } from 'vitest';\nimport { render, fireEvent, within } from '@testing-library/svelte';\nimport InferenceRuntimeSelect from '/@/lib/select/InferenceRuntimeSelect.svelte';\nimport { InferenceType } from '@shared/models/IInference';\n\nconst providers: InferenceType[] = [InferenceType.LLAMA_CPP, InferenceType.OPENVINO, InferenceType.WHISPER_CPP];\n\nbeforeEach(() => {\n  // mock scrollIntoView\n  window.HTMLElement.prototype.scrollIntoView = vi.fn();\n});\n\ntest('Lists all runtime options', async () => {\n  const { container } = render(InferenceRuntimeSelect, {\n    value: undefined,\n    providers,\n    disabled: false,\n  });\n\n  const input = within(container).getByLabelText('Select Inference Runtime');\n  await fireEvent.pointerUp(input);\n\n  const items = container.querySelectorAll('div[class~=\"list-item\"]');\n  const expectedOptions = providers;\n\n  expect(items.length).toBe(expectedOptions.length);\n\n  expectedOptions.forEach((option, i) => {\n    expect(items[i]).toHaveTextContent(option);\n  });\n});\n\ntest('Selected value should be visible', async () => {\n  const { container } = render(InferenceRuntimeSelect, {\n    value: undefined,\n    providers,\n    disabled: false,\n  });\n\n  const input = within(container).getByLabelText('Select Inference Runtime');\n  await fireEvent.pointerUp(input);\n\n  const items = container.querySelectorAll('div[class~=\"list-item\"]');\n  const expectedOptions = providers;\n\n  await fireEvent.click(items[0]);\n\n  const valueContainer = container.querySelector('.value-container');\n  if (!(valueContainer instanceof HTMLElement)) throw new Error('Missing value container');\n\n  const selectedLabel = within(valueContainer).getByText(expectedOptions[0]);\n  expect(selectedLabel).toBeDefined();\n});\n\ntest('Exclude specific runtime from list', async () => {\n  const excluded = [InferenceType.WHISPER_CPP, InferenceType.OPENVINO];\n\n  const { container } = render(InferenceRuntimeSelect, {\n    value: undefined,\n    providers,\n    disabled: false,\n    exclude: excluded,\n  });\n\n  const input = within(container).getByLabelText('Select Inference Runtime');\n  await fireEvent.pointerUp(input);\n\n  const items = container.querySelectorAll('div[class~=\"list-item\"]');\n  const itemTexts = Array.from(items).map(item => item.textContent?.trim());\n\n  excluded.forEach(excludedType => {\n    expect(itemTexts).not.toContain(excludedType);\n  });\n\n  const expected = providers.filter(type => !excluded.includes(type));\n\n  expected.forEach(included => {\n    expect(itemTexts).toContain(included);\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/select/InferenceRuntimeSelect.svelte",
    "content": "<script lang=\"ts\">\nimport Select from '/@/lib/select/Select.svelte';\nimport type { InferenceType } from '@shared/models/IInference';\n\ninterface Props {\n  disabled?: boolean;\n  value: InferenceType | undefined;\n  providers: InferenceType[];\n  exclude?: InferenceType[];\n}\nlet { value = $bindable(), disabled, providers, exclude = [] }: Props = $props();\n\n// Filter options based on optional exclude list\nconst options = $derived(() =>\n  providers.filter(type => !exclude.includes(type)).map(type => ({ value: type, label: type })),\n);\n\nfunction handleOnChange(nValue: { value: string } | undefined): void {\n  if (nValue) {\n    value = nValue.value as InferenceType;\n  } else {\n    value = undefined;\n  }\n}\n</script>\n\n<Select\n  label=\"Select Inference Runtime\"\n  name=\"select-inference-runtime\"\n  disabled={disabled}\n  value={value ? { label: value, value: value } : undefined}\n  onchange={handleOnChange}\n  placeholder=\"Select Inference Runtime to use\"\n  items={options()} />\n"
  },
  {
    "path": "packages/frontend/src/lib/select/ModelSelect.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, vi, test, expect } from 'vitest';\nimport { render, fireEvent, within } from '@testing-library/svelte';\nimport ModelSelect from '/@/lib/select/ModelSelect.svelte';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { InferenceType } from '@shared/models/IInference';\n\nconst fakeRecommendedModel: ModelInfo = {\n  id: 'dummy-model-1',\n  backend: InferenceType.LLAMA_CPP,\n  name: 'Dummy Model 1',\n  file: {\n    file: 'dummy-model-file',\n    path: 'dummy-model-path',\n  },\n} as unknown as ModelInfo;\n\nconst fakeRemoteModel: ModelInfo = {\n  id: 'dummy-model-2',\n  backend: InferenceType.LLAMA_CPP,\n  name: 'Dummy Model 2',\n} as unknown as ModelInfo;\n\nconst fakeRecommendedRemoteModel: ModelInfo = {\n  id: 'dummy-model-3',\n  backend: InferenceType.LLAMA_CPP,\n  name: 'Dummy Model 3',\n} as unknown as ModelInfo;\n\nbeforeEach(() => {\n  // mock scrollIntoView\n  window.HTMLElement.prototype.scrollIntoView = vi.fn();\n});\n\ntest('ModelSelect should list all models provided', async () => {\n  const { container } = render(ModelSelect, {\n    value: undefined,\n    disabled: undefined,\n    models: [fakeRecommendedModel, fakeRemoteModel],\n    recommended: [],\n  });\n\n  // first get the select input\n  const input = within(container).getByLabelText('Select Model');\n  await fireEvent.pointerUp(input); // they are using the pointer up event instead of click.\n\n  // get all options available\n  const items = container.querySelectorAll('div[class~=\"list-item\"]');\n  // ensure we have two options\n  expect(items.length).toBe(2);\n  expect(items[0]).toHaveTextContent(fakeRecommendedModel.name);\n  expect(items[1]).toHaveTextContent(fakeRemoteModel.name);\n});\n\ntest('ModelSelect should set star icon next to recommended model', async () => {\n  const { container } = render(ModelSelect, {\n    value: undefined,\n    disabled: undefined,\n    models: [fakeRecommendedModel, fakeRemoteModel],\n    recommended: [fakeRecommendedModel.id],\n  });\n\n  // first get the select input\n  const input = within(container).getByLabelText('Select Model');\n  await fireEvent.pointerUp(input); // they are using the pointer up event instead of click.\n\n  // get all options available\n  const items: NodeListOf<HTMLElement> = container.querySelectorAll('div[class~=\"list-item\"]');\n  // ensure we have two options\n  expect(items.length).toBe(2);\n  expect(within(items[0]).getByTitle('Recommended model')).toBeDefined();\n  expect(within(items[1]).queryByTitle('Recommended model')).toBeNull();\n});\n\ntest('models should be sorted', async () => {\n  const { container } = render(ModelSelect, {\n    value: undefined,\n    disabled: undefined,\n    models: [fakeRemoteModel, fakeRecommendedRemoteModel, fakeRecommendedModel],\n    recommended: [fakeRecommendedModel.id, fakeRecommendedRemoteModel.id],\n  });\n\n  // first get the select input\n  const input = within(container).getByLabelText('Select Model');\n  await fireEvent.pointerUp(input); // they are using the pointer up event instead of click.\n\n  // get all options available\n  const items: NodeListOf<HTMLElement> = container.querySelectorAll('div[class~=\"list-item\"]');\n  // ensure we have two options\n  expect(items.length).toBe(3);\n  expect(items[0]).toHaveTextContent(fakeRecommendedModel.name);\n  expect(items[1]).toHaveTextContent(fakeRecommendedRemoteModel.name);\n  expect(items[2]).toHaveTextContent(fakeRemoteModel.name);\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/select/ModelSelect.svelte",
    "content": "<script lang=\"ts\">\nimport { faCheckCircle, faDownload } from '@fortawesome/free-solid-svg-icons';\nimport Select from './Select.svelte';\nimport Fa from 'svelte-fa';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\n\ninterface Props {\n  disabled?: boolean;\n  /**\n   * Recommended model ids\n   */\n  recommended?: string[];\n  /**\n   * List of models\n   */\n  models: ModelInfo[];\n  /**\n   * Current value selected\n   */\n  value: ModelInfo | undefined;\n}\n\nlet { disabled = false, recommended, models, value = $bindable() }: Props = $props();\n\nfunction getModelSortingScore(modelInfo: ModelInfo): number {\n  let score: number = 0;\n  if (modelInfo.file) score -= 1;\n  if (recommended?.includes(modelInfo.id)) score -= 1;\n  return score;\n}\n\n/**\n * Handy mechanism to provide the mandatory property `label` and `value` to the Select component\n */\nlet selected: (ModelInfo & { label: string; value: string }) | undefined = $derived.by(() => {\n  // let's select a default model\n  if (value) {\n    return { ...value, label: value.name, value: value.id };\n  } else {\n    return undefined;\n  }\n});\n\nfunction handleOnChange(nValue: (ModelInfo & { label: string; value: string }) | undefined): void {\n  value = nValue;\n}\n</script>\n\n<Select\n  label=\"Select Model\"\n  name=\"select-model\"\n  disabled={disabled}\n  value={selected}\n  onchange={handleOnChange}\n  placeholder=\"Select model to use\"\n  items={models\n    .toSorted((a, b) => getModelSortingScore(a) - getModelSortingScore(b))\n    .map(model => ({ ...model, value: model.id, label: model.name }))}>\n  <div slot=\"item\" let:item>\n    <div class=\"flex items-center\">\n      <div class=\"grow\">\n        <span>{item.name}</span>\n        {#if recommended?.includes(item.id)}\n          <i class=\"fas fa-star fa-xs\" title=\"Recommended model\"></i>\n        {/if}\n      </div>\n\n      {#if item.file !== undefined}\n        <Fa icon={faCheckCircle} />\n      {:else}\n        <Fa icon={faDownload} />\n      {/if}\n    </div>\n  </div>\n</Select>\n"
  },
  {
    "path": "packages/frontend/src/lib/select/Select.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, vi, test, expect } from 'vitest';\nimport { render, fireEvent, within } from '@testing-library/svelte';\nimport Select from '/@/lib/select/Select.svelte';\n\nbeforeEach(() => {\n  // mock scrollIntoView\n  window.HTMLElement.prototype.scrollIntoView = vi.fn();\n});\n\ntest('empty slot should use basic list', async () => {\n  const { container } = render(Select, {\n    label: 'Select Item',\n    items: [\n      {\n        label: 'Dummy Item 1',\n        value: 'item-1',\n      },\n      {\n        label: 'Dummy Item 2',\n        value: 'item-2',\n      },\n    ],\n  });\n\n  // first get the select input\n  const input = within(container).getByLabelText('Select Item');\n  await fireEvent.pointerUp(input); // they are using the pointer up event instead of click.\n\n  // get all options available\n  const items: NodeListOf<HTMLElement> = container.querySelectorAll('div[class~=\"list-item\"]');\n  // ensure we have two options\n  expect(items.length).toBe(2);\n  expect(items[0]).toHaveTextContent('Dummy Item 1');\n  expect(items[1]).toHaveTextContent('Dummy Item 2');\n});\n\ntest('defined value should have corresponding active class to item', async () => {\n  const { container } = render(Select, {\n    label: 'Select Item',\n    items: [\n      {\n        label: 'Dummy Item 1',\n        value: 'item-1',\n      },\n      {\n        label: 'Dummy Item 2',\n        value: 'item-2',\n      },\n    ],\n    value: {\n      label: 'Dummy Item 2',\n      value: 'item-2',\n    },\n  });\n\n  // first get the select input\n  const input = within(container).getByLabelText('Select Item');\n  await fireEvent.pointerUp(input); // they are using the pointer up event instead of click.\n\n  // get all options available\n  const items: NodeListOf<HTMLElement> = container.querySelectorAll('div[class~=\"list-item\"]');\n  // ensure we have two options\n  expect(items.length).toBe(2);\n  expect(items[0].children.length).toBe(1);\n  expect(items[0].children[0].classList.value).not.toContain('active');\n  expect(items[1].children.length).toBe(1);\n  expect(items[1].children[0].classList.value).toContain('active');\n});\n\ntest('selecting value should call onchange callback', async () => {\n  const onChangeMock = vi.fn();\n  const { container } = render(Select, {\n    label: 'Select Item',\n    items: [\n      {\n        label: 'Dummy Item 1',\n        value: 'item-1',\n      },\n      {\n        label: 'Dummy Item 2',\n        value: 'item-2',\n      },\n    ],\n    onchange: onChangeMock,\n  });\n\n  // first get the select input\n  const input = within(container).getByLabelText('Select Item');\n  await fireEvent.pointerUp(input); // they are using the pointer up event instead of click.\n\n  // get all options available\n  const items: NodeListOf<HTMLElement> = container.querySelectorAll('div[class~=\"list-item\"]');\n  // ensure we have two options\n  expect(items.length).toBe(2);\n\n  await fireEvent.click(items[1]);\n\n  await vi.waitFor(() => {\n    expect(onChangeMock).toHaveBeenCalledWith({\n      label: 'Dummy Item 2',\n      value: 'item-2',\n    });\n  });\n});\n\ntest('clearing value should call onchange callback with undefined', async () => {\n  const onChangeMock = vi.fn();\n  const { container } = render(Select, {\n    label: 'Select Item',\n    items: [\n      {\n        label: 'Dummy Item 1',\n        value: 'item-1',\n      },\n      {\n        label: 'Dummy Item 2',\n        value: 'item-2',\n      },\n    ],\n    value: {\n      label: 'Dummy Item 2',\n      value: 'item-2',\n    },\n    onchange: onChangeMock,\n  });\n\n  // get clear HTMLElement\n  const clear = container.querySelector('button[class~=\"clear-select\"]');\n  // ensure we have two options\n  expect(clear).not.toBeNull();\n  if (!clear) throw new Error('clear is null');\n\n  await fireEvent.click(clear);\n\n  await vi.waitFor(() => {\n    expect(onChangeMock).toHaveBeenCalledWith(undefined);\n    expect(onChangeMock).toHaveBeenCalledOnce();\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/select/Select.svelte",
    "content": "<script lang=\"ts\">\nimport Select from 'svelte-select';\n\ntype T = $$Generic<{ label: string; value: string }>;\nexport let disabled: boolean = false;\n\nexport let value: T | undefined = undefined;\nexport let items: T[] = [];\nexport let placeholder: string | undefined = undefined;\nexport let label: string | undefined = undefined;\nexport let name: string | undefined = undefined;\nexport let onchange: ((value: T | undefined) => void) | undefined = undefined;\n\nfunction handleOnChange(e: CustomEvent<T | undefined>): void {\n  value = e.detail;\n  onchange?.(value);\n}\n\nfunction handleOnClear(): void {\n  value = undefined;\n  onchange?.(value);\n}\n</script>\n\n<Select\n  inputAttributes={{ 'aria-label': label }}\n  name={name}\n  disabled={disabled}\n  value={value}\n  on:clear={handleOnClear}\n  on:change={handleOnChange}\n  --item-color=\"var(--pd-dropdown-item-text)\"\n  --item-is-active-color=\"var(--pd-dropdown-item-text)\"\n  --item-hover-color=\"var(--pd-dropdown-item-hover-text)\"\n  --item-active-background=\"var(--pd-input-field-hover-stroke)\"\n  --item-is-active-bg=\"var(--pd-input-field-hover-stroke)\"\n  --background=\"var(--pd-dropdown-bg)\"\n  --list-background=\"var(--pd-dropdown-bg)\"\n  --item-hover-bg=\"var(--pd-dropdown-item-hover-bg)\"\n  --border=\"1px solid var(--pd-input-field-focused-bg)\"\n  --border-hover=\"1px solid var(--pd-input-field-hover-stroke)\"\n  --list-border=\"1px solid var(--pd-input-field-focused-bg)\"\n  --border-focused=\"var(--pd-input-field-focused-bg)\"\n  placeholder={placeholder}\n  class=\"bg-(--pd-content-bg)! text-(--pd-content-card-text)!\"\n  items={items}\n  showChevron>\n  <div slot=\"item\" let:item>\n    <slot name=\"item\" item={item}>\n      <div>{item.label}</div>\n    </slot>\n  </div>\n</Select>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ApplicationTable.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, test, expect, vi } from 'vitest';\nimport { render, within } from '@testing-library/svelte';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\nimport ApplicationTable from '/@/lib/table/application/ApplicationTable.svelte';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\n\nconst mocks = vi.hoisted(() => ({\n  getApplicationStates: vi.fn<() => ApplicationState[]>(),\n}));\n\nvi.mock('../../../stores/application-states', () => ({\n  applicationStates: {\n    subscribe: (fn: (items: ApplicationState[]) => void): unknown => {\n      fn(mocks.getApplicationStates());\n      return vi.fn();\n    },\n  },\n}));\n\nvi.mock('../../../stores/catalog', () => ({\n  catalog: {\n    subscribe: (fn: (item: ApplicationCatalog) => void): unknown => {\n      fn({ categories: [], models: [], recipes: [] });\n      return vi.fn();\n    },\n  },\n}));\n\nvi.mock('../../../utils/client', async () => ({\n  studioClient: {},\n}));\n\nbeforeEach(() => {\n  mocks.getApplicationStates.mockReturnValue([]);\n});\n\ntest('expect pod to be displayed', async () => {\n  mocks.getApplicationStates.mockReturnValue([\n    {\n      appPorts: [],\n      health: 'healthy',\n      modelId: 'model-id-1',\n      modelPorts: [],\n      pod: {\n        engineId: 'dummy-engine-id',\n        Id: 'pod-id-1',\n        Status: 'Running',\n        Name: 'Test Pod 1',\n      },\n      recipeId: 'recipe-id-1',\n    } as unknown as ApplicationState,\n  ]);\n\n  const { container } = render(ApplicationTable);\n\n  const div = within(container).getByText('Test Pod 1');\n  expect(div).toBeDefined();\n});\n\ntest('expect all pods to be displayed', async () => {\n  mocks.getApplicationStates.mockReturnValue([\n    {\n      appPorts: [],\n      health: 'healthy',\n      modelId: 'model-id-1',\n      modelPorts: [],\n      pod: {\n        engineId: 'dummy-engine-id',\n        Id: 'pod-id-1',\n        Status: 'Running',\n        Name: 'Test Pod 1',\n      },\n      recipeId: 'recipe-id-1',\n    } as unknown as ApplicationState,\n    {\n      appPorts: [],\n      health: 'healthy',\n      modelId: 'model-id-2',\n      modelPorts: [],\n      pod: {\n        engineId: 'dummy-engine-id',\n        Id: 'pod-id-2',\n        Status: 'Running',\n        Name: 'Test Pod 2',\n      },\n      recipeId: 'recipe-id-1',\n    } as unknown as ApplicationState,\n  ]);\n\n  const { container } = render(ApplicationTable);\n\n  const pod1 = within(container).getByText('Test Pod 1');\n  expect(pod1).toBeDefined();\n\n  const pod2 = within(container).getByText('Test Pod 2');\n  expect(pod2).toBeDefined();\n});\n\ntest('expect filter to work as expected', async () => {\n  mocks.getApplicationStates.mockReturnValue([\n    {\n      appPorts: [],\n      health: 'healthy',\n      modelId: 'model-id-1',\n      modelPorts: [],\n      pod: {\n        engineId: 'dummy-engine-id',\n        Id: 'pod-id-1',\n        Status: 'Running',\n        Name: 'Test Pod 1',\n      },\n      recipeId: 'recipe-id-1',\n    } as unknown as ApplicationState,\n    {\n      appPorts: [],\n      health: 'healthy',\n      modelId: 'model-id-2',\n      modelPorts: [],\n      pod: {\n        engineId: 'dummy-engine-id',\n        Id: 'pod-id-2',\n        Status: 'Running',\n        Name: 'Test Pod 2',\n      },\n      recipeId: 'recipe-id-2',\n    } as unknown as ApplicationState,\n  ]);\n\n  const { container } = render(ApplicationTable, {\n    filter: (items: ApplicationState[]) => items.filter((item: ApplicationState) => item.recipeId !== 'recipe-id-2'),\n  });\n\n  const pod1 = within(container).getByText('Test Pod 1');\n  expect(pod1).toBeDefined();\n\n  const pod2 = within(container).queryByText('Test Pod 2');\n  expect(pod2).toBeNull();\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ApplicationTable.svelte",
    "content": "<script lang=\"ts\">\nimport { applicationStates } from '/@/stores/application-states';\nimport ColumnActions from './ColumnActions.svelte';\nimport ColumnStatus from './ColumnStatus.svelte';\nimport ColumnRecipe from './ColumnRecipe.svelte';\nimport ColumnModel from './ColumnModel.svelte';\nimport ColumnPod from './ColumnPod.svelte';\nimport ColumnAge from './ColumnAge.svelte';\nimport { onMount } from 'svelte';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\nimport { Table, TableColumn, TableRow } from '@podman-desktop/ui-svelte';\nimport ColumnRuntime from './ColumnRuntime.svelte';\n\nexport let filter: ((items: ApplicationState[]) => ApplicationState[]) | undefined = undefined;\nconst columns: TableColumn<ApplicationState>[] = [\n  new TableColumn<ApplicationState>('Status', { width: '70px', align: 'center', renderer: ColumnStatus }),\n  new TableColumn<ApplicationState>('Model', { width: '3fr', renderer: ColumnModel }),\n  new TableColumn<ApplicationState>('Recipe', { width: '2fr', renderer: ColumnRecipe }),\n  new TableColumn<ApplicationState>('Pod', { width: '3fr', renderer: ColumnPod }),\n  new TableColumn<ApplicationState>('Age', { width: '2fr', renderer: ColumnAge }),\n  new TableColumn<ApplicationState>('Runtime', { width: '90px', renderer: ColumnRuntime }),\n  new TableColumn<ApplicationState>('Actions', {\n    align: 'right',\n    width: '120px',\n    renderer: ColumnActions,\n    overflow: true,\n  }),\n];\nconst row = new TableRow<ApplicationState>({});\nlet data: ApplicationState[] = [];\nonMount(() => {\n  return applicationStates.subscribe(items => {\n    data = filter ? filter(items) : items;\n  });\n});\n</script>\n\n{#if data?.length > 0}\n  <Table kind=\"AI App\" data={data} columns={columns} row={row}></Table>\n{:else}\n  <slot name=\"empty-screen\" />\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ColumnActions.svelte",
    "content": "<script lang=\"ts\">\nimport ApplicationActions from '../../ApplicationActions.svelte';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\n\nexport let object: ApplicationState;\n</script>\n\n<ApplicationActions\n  recipeId={object.recipeId}\n  modelId={object.modelId}\n  object={object}\n  dropdownMenu={true}\n  enableGoToRecipeAction={true} />\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ColumnAge.svelte",
    "content": "<script lang=\"ts\">\nimport moment from 'moment';\nimport { humanizeAge } from '/@/utils/dimensions';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\n\nexport let object: ApplicationState;\n</script>\n\n<div class=\"text-[var(--pd-table-body-text)] overflow-hidden text-ellipsis\">\n  {humanizeAge(moment(object.pod.Created).unix())}\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ColumnModel.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { test, expect, vi } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport * as catalogStore from '/@/stores/catalog';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\nimport { readable } from 'svelte/store';\nimport ColumnModel from './ColumnModel.svelte';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\n\nvi.mock('/@/stores/catalog', async () => {\n  return {\n    catalog: vi.fn(),\n  };\n});\n\nconst initialCatalog: ApplicationCatalog = {\n  categories: [],\n  models: [\n    {\n      id: 'model1',\n      name: 'Model 1',\n      description: '',\n      registry: '',\n      license: '',\n      url: '',\n      memory: 1000,\n    },\n    {\n      id: 'model2',\n      name: 'Model 2',\n      description: '',\n      registry: '',\n      license: '',\n      url: '',\n      memory: 1000,\n    },\n  ],\n  recipes: [],\n};\n\ntest('display model name', async () => {\n  const obj = {\n    modelId: 'model1',\n  } as unknown as ApplicationState;\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  render(ColumnModel, { object: obj });\n\n  const text = screen.getByText('Model 1');\n  expect(text).toBeInTheDocument();\n});\n\ntest('display model port', async () => {\n  const obj = {\n    modelId: 'model1',\n    modelPorts: [8080],\n  } as unknown as ApplicationState;\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  render(ColumnModel, { object: obj });\n\n  const text = screen.getByText('Model 1');\n  expect(text).toBeInTheDocument();\n  const ports = screen.getByText('PORT 8080');\n  expect(ports).toBeInTheDocument();\n});\n\ntest('display multiple model ports', async () => {\n  const obj = {\n    modelId: 'model1',\n    modelPorts: [8080, 5000],\n  } as unknown as ApplicationState;\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  render(ColumnModel, { object: obj });\n\n  const text = screen.getByText('Model 1');\n  expect(text).toBeInTheDocument();\n  const ports = screen.getByText('PORTS 8080, 5000');\n  expect(ports).toBeInTheDocument();\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ColumnModel.svelte",
    "content": "<script lang=\"ts\">\nimport type { ApplicationState } from '@shared/models/IApplicationState';\nimport { catalog } from '/@/stores/catalog';\nimport { displayPorts } from '/@/utils/printers';\n\nexport let object: ApplicationState;\n\n$: name = $catalog.models.find(r => r.id === object.modelId)?.name;\n</script>\n\n<div class=\"flex flex-col\">\n  <div class=\"text-[var(--pd-table-body-text-highlight)] overflow-hidden text-ellipsis\">\n    {name}\n  </div>\n  <div class=\"text-[var(--pd-table-body-text)] overflow-hidden text-ellipsis\">\n    {displayPorts(object.modelPorts)}\n  </div>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ColumnPod.svelte",
    "content": "<script lang=\"ts\">\nimport type { ApplicationState } from '@shared/models/IApplicationState';\n\nexport let object: ApplicationState;\n</script>\n\n<div class=\"text-[var(--pd-table-body-text)] overflow-hidden text-ellipsis\">\n  {object.pod.Name}\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ColumnRecipe.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { test, expect, vi, beforeEach } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport * as catalogStore from '/@/stores/catalog';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\nimport { readable } from 'svelte/store';\nimport ColumnRecipe from './ColumnRecipe.svelte';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\n\nconst mocks = vi.hoisted(() => {\n  return {\n    openURL: vi.fn(),\n  };\n});\n\nvi.mock('/@/stores/catalog', async () => {\n  return {\n    catalog: vi.fn(),\n  };\n});\n\nvi.mock('/@/utils/client', async () => {\n  return {\n    studioClient: {\n      openURL: mocks.openURL,\n    },\n  };\n});\n\nconst initialCatalog: ApplicationCatalog = {\n  categories: [],\n  models: [],\n  recipes: [\n    {\n      id: 'recipe 1',\n      name: 'Recipe 1',\n      readme: 'readme 1',\n      categories: [],\n      recommended: ['model1', 'model2'],\n      description: 'description 1',\n      repository: 'repo 1',\n    },\n    {\n      id: 'recipe 2',\n      name: 'Recipe 2',\n      readme: 'readme 2',\n      categories: [],\n      description: 'description 2',\n      repository: 'repo 2',\n    },\n  ],\n};\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ntest('display recipe name', async () => {\n  const obj = {\n    recipeId: 'recipe 1',\n  } as unknown as ApplicationState;\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  render(ColumnRecipe, { object: obj });\n\n  const text = screen.getByText('Recipe 1');\n  expect(text).toBeInTheDocument();\n});\n\ntest('display recipe port', async () => {\n  const obj = {\n    recipeId: 'recipe 1',\n    appPorts: [3000],\n  } as unknown as ApplicationState;\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  render(ColumnRecipe, { object: obj });\n\n  const text = screen.getByText('Recipe 1');\n  expect(text).toBeInTheDocument();\n  const ports = screen.getByText('PORT 3000');\n  expect(ports).toBeInTheDocument();\n});\n\ntest('display multiple recipe ports', async () => {\n  const obj = {\n    recipeId: 'recipe 1',\n    appPorts: [3000, 5000],\n  } as unknown as ApplicationState;\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  render(ColumnRecipe, { object: obj });\n\n  const text = screen.getByText('Recipe 1');\n  expect(text).toBeInTheDocument();\n  const ports = screen.getByText('PORTS 3000, 5000');\n  expect(ports).toBeInTheDocument();\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ColumnRecipe.svelte",
    "content": "<script lang=\"ts\">\nimport { catalog } from '/@/stores/catalog';\nimport { displayPorts } from '/@/utils/printers';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\n\nexport let object: ApplicationState;\n\nlet name: string | undefined;\n$: name = $catalog.recipes.find(r => r.id === object.recipeId)?.name;\n</script>\n\n<div class=\"flex flex-col\">\n  <div class=\"text-[var(--pd-table-body-text-highlight)] overflow-hidden text-ellipsis\">\n    {name}\n  </div>\n  <div class=\"text-[var(--pd-table-body-text)] overflow-hidden text-ellipsis\">\n    {displayPorts(object.appPorts)}\n  </div>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ColumnRuntime.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { test, vi, beforeEach } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport { InferenceType } from '@shared/models/IInference';\nimport ColumnRuntime from './ColumnRuntime.svelte';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ntest('should display label for backend', async () => {\n  render(ColumnRuntime, {\n    object: {\n      backend: InferenceType.LLAMA_CPP,\n    } as ApplicationState,\n  });\n\n  screen.getByText('llamacpp');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ColumnRuntime.svelte",
    "content": "<script lang=\"ts\">\nimport type { ApplicationState } from '@shared/models/IApplicationState';\nimport Badge from '../../Badge.svelte';\nimport { inferenceTypeLabel } from '@shared/models/IInference';\n\nexport let object: ApplicationState;\n</script>\n\n<Badge content={inferenceTypeLabel(object.backend)} />\n"
  },
  {
    "path": "packages/frontend/src/lib/table/application/ColumnStatus.svelte",
    "content": "<script lang=\"ts\">\nimport { getApplicationStatus } from '../../../pages/applications';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\nimport PodIcon from '../../images/PodIcon.svelte';\nimport { Spinner, StatusIcon } from '@podman-desktop/ui-svelte';\n\nexport let object: ApplicationState;\n\nlet status: string;\n$: status = getApplicationStatus(object);\n</script>\n\n{#if status === 'STARTING'}\n  <Spinner class=\"text-[var(--pd-table-body-text-highlight)]\" />\n{:else}\n  <StatusIcon size={22} status={status} icon={PodIcon} />\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/table/instructlab/InstructlabColumnAge.svelte",
    "content": "<script lang=\"ts\">\nimport { humanizeAge } from '/@/utils/dimensions';\nimport type { InstructlabSession } from '@shared/models/instructlab/IInstructlabSession';\n\nexport let object: InstructlabSession;\n</script>\n\n<div class=\"text-[var(--pd-table-body-text)] overflow-hidden text-ellipsis\">\n  {humanizeAge(object.createdTime)}\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/instructlab/InstructlabColumnModelName.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { fireEvent, render, screen } from '@testing-library/svelte';\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport InstructlabColumnModelName from './InstructlabColumnModelName.svelte';\nimport type { InstructlabSession } from '@shared/models/instructlab/IInstructlabSession';\nimport * as catalogStore from '/@/stores/catalog';\nimport { readable } from 'svelte/store';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\nimport { router } from 'tinro';\n\nvi.mock('/@/stores/catalog', async () => {\n  return {\n    catalog: vi.fn(),\n  };\n});\n\nconst initialCatalog: ApplicationCatalog = {\n  categories: [],\n  models: [\n    {\n      id: 'model1',\n      name: 'Model 1',\n      description: '',\n      registry: '',\n      license: '',\n      url: '',\n      memory: 1000,\n    },\n    {\n      id: 'model2',\n      name: 'Model 2',\n      description: '',\n      registry: '',\n      license: '',\n      url: '',\n      memory: 1000,\n    },\n  ],\n  recipes: [],\n};\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ntest('display model name', async () => {\n  const obj = {\n    modelId: 'model1',\n  } as unknown as InstructlabSession;\n\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  render(InstructlabColumnModelName, { object: obj });\n\n  const text = screen.getByText('Model 1');\n  expect(text).toBeInTheDocument();\n});\n\ntest('click on name should open details page', async () => {\n  const gotoMock = vi.spyOn(router, 'goto');\n  const obj = {\n    modelId: 'model1',\n  } as unknown as InstructlabSession;\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  render(InstructlabColumnModelName, { object: obj });\n\n  const nameBtn = screen.getByTitle('Open model details');\n  expect(nameBtn).toBeDefined();\n  await fireEvent.click(nameBtn);\n\n  expect(gotoMock).toHaveBeenCalledWith('/model/model1');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/instructlab/InstructlabColumnModelName.svelte",
    "content": "<script lang=\"ts\">\nimport type { InstructlabSession } from '@shared/models/instructlab/IInstructlabSession';\nimport { router } from 'tinro';\nimport { catalog } from '/@/stores/catalog';\n\nexport let object: InstructlabSession;\n\n$: name = $catalog.models.find(r => r.id === object.modelId)?.name;\n\nfunction openDetails(): void {\n  router.goto(`/model/${encodeURIComponent(object.modelId)}`);\n}\n</script>\n\n<button title=\"Open model details\" class=\"text-[var(--pd-table-body-text-highlight)]\" on:click={openDetails}>\n  {name}\n</button>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/instructlab/InstructlabColumnName.svelte",
    "content": "<script lang=\"ts\">\nimport type { InstructlabSession } from '@shared/models/instructlab/IInstructlabSession';\n\nexport let object: InstructlabSession;\n</script>\n\n<div class=\"text-[var(--pd-table-body-text)] w-full text-ellipsis overflow-hidden\">\n  {object.name}\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/instructlab/InstructlabColumnRepository.svelte",
    "content": "<script lang=\"ts\">\nimport type { InstructlabSession } from '@shared/models/instructlab/IInstructlabSession';\n\nexport let object: InstructlabSession;\n</script>\n\n<span class=\"text-[var(--pd-table-body-text)]\">\n  {object.repository}\n</span>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/instructlab/InstructlabColumnStatus.svelte",
    "content": "<script lang=\"ts\">\nimport type { InstructlabSession, InstructlabSessionStatus } from '@shared/models/instructlab/IInstructlabSession';\n\nexport let object: InstructlabSession;\n\nfunction getStatusLabel(code: InstructlabSessionStatus): string {\n  switch (code) {\n    case 'fine-tuned':\n      return 'Fine tuned';\n    case 'generating-instructions':\n      return 'Generating Instructions';\n  }\n}\n</script>\n\n<div class=\"text-[var(--pd-table-body-text)] whitespace-normal break-normal\">\n  {getStatusLabel(object.status)}\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/instructlab/InstructlabColumnTargetModelName.svelte",
    "content": "<script lang=\"ts\">\nimport type { InstructlabSession } from '@shared/models/instructlab/IInstructlabSession';\n\nexport let object: InstructlabSession;\n</script>\n\n<span class=\"text-[var(--pd-table-body-text)]\">\n  {object.targetModel}\n</span>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/model/ModelColumnAction.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { test, expect, vi, beforeEach } from 'vitest';\nimport { fireEvent, render, screen, waitFor } from '@testing-library/svelte';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport ModelColumnActions from '/@/lib/table/model/ModelColumnActions.svelte';\nimport { router } from 'tinro';\nimport { type InferenceServer, InferenceType } from '@shared/models/IInference';\n\nconst mocks = vi.hoisted(() => ({\n  requestRemoveLocalModel: vi.fn(),\n  openFile: vi.fn(),\n  downloadModel: vi.fn(),\n  getInferenceServersMock: vi.fn<() => InferenceServer[]>(),\n}));\n\nvi.mock('/@/utils/client', () => ({\n  studioClient: {\n    requestRemoveLocalModel: mocks.requestRemoveLocalModel,\n    openFile: mocks.openFile,\n    downloadModel: mocks.downloadModel,\n  },\n}));\n\nvi.mock('../../../stores/inferenceServers', () => ({\n  inferenceServers: {\n    subscribe: (f: (msg: InferenceServer[]) => void) => {\n      f(mocks.getInferenceServersMock());\n      return (): void => {};\n    },\n  },\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  mocks.getInferenceServersMock.mockReturnValue([]);\n\n  mocks.downloadModel.mockResolvedValue(undefined);\n  mocks.openFile.mockResolvedValue(undefined);\n  mocks.requestRemoveLocalModel.mockResolvedValue(undefined);\n});\n\ntest('Expect folder and delete button in document', async () => {\n  const d = new Date();\n  d.setDate(d.getDate() - 2);\n\n  const object: ModelInfo = {\n    id: 'my-model',\n    description: '',\n    license: '',\n    name: '',\n    registry: '',\n    url: '',\n    file: {\n      file: 'file',\n      creation: d,\n      size: 1000,\n      path: 'path',\n    },\n    memory: 1000,\n  };\n  render(ModelColumnActions, { object });\n\n  const explorerBtn = screen.getByTitle('Open Model Folder');\n  expect(explorerBtn).toBeInTheDocument();\n\n  const deleteBtn = screen.getByTitle('Delete Model');\n  expect(deleteBtn).toBeInTheDocument();\n\n  const rocketBtn = screen.getByTitle('Create Model Service');\n  expect(rocketBtn).toBeInTheDocument();\n\n  const downloadBtn = screen.queryByTitle('Download Model');\n  expect(downloadBtn).toBeNull();\n});\n\ntest('Expect download button in document', async () => {\n  const object: ModelInfo = {\n    id: 'my-model',\n    description: '',\n    license: '',\n    name: '',\n    registry: '',\n    url: '',\n    file: undefined,\n    memory: 1000,\n  };\n  render(ModelColumnActions, { object });\n\n  const explorerBtn = screen.queryByTitle('Open Model Folder');\n  expect(explorerBtn).toBeNull();\n\n  const deleteBtn = screen.queryByTitle('Delete Model');\n  expect(deleteBtn).toBeNull();\n\n  const rocketBtn = screen.queryByTitle('Create Model Service');\n  expect(rocketBtn).toBeNull();\n\n  const downloadBtn = screen.getByTitle('Download Model');\n  expect(downloadBtn).toBeInTheDocument();\n});\n\ntest('Expect downloadModel to be call on click', async () => {\n  const object: ModelInfo = {\n    id: 'my-model',\n    description: '',\n    license: '',\n    name: '',\n    registry: '',\n    url: '',\n    file: undefined,\n    memory: 1000,\n  };\n  render(ModelColumnActions, { object });\n\n  const downloadBtn = screen.getByTitle('Download Model');\n  expect(downloadBtn).toBeInTheDocument();\n\n  await fireEvent.click(downloadBtn);\n  await waitFor(() => {\n    expect(mocks.downloadModel).toHaveBeenCalledWith('my-model');\n  });\n});\n\ntest('Expect router to be called when rocket icon clicked', async () => {\n  const gotoMock = vi.spyOn(router, 'goto');\n  const replaceMock = vi.spyOn(router.location.query, 'replace');\n\n  const object: ModelInfo = {\n    id: 'my-model',\n    description: '',\n    license: '',\n    name: '',\n    registry: '',\n    url: '',\n    file: {\n      file: 'file',\n      creation: new Date(),\n      size: 1000,\n      path: 'path',\n    },\n    memory: 1000,\n  };\n  render(ModelColumnActions, { object });\n\n  const rocketBtn = screen.getByTitle('Create Model Service');\n\n  await fireEvent.click(rocketBtn);\n  await waitFor(() => {\n    expect(gotoMock).toHaveBeenCalledWith('/service/create');\n    expect(replaceMock).toHaveBeenCalledWith({ 'model-id': 'my-model' });\n  });\n});\n\ntest('Expect delete button to be disabled when model in use', async () => {\n  const object: ModelInfo = {\n    id: 'my-model',\n    description: '',\n    license: '',\n    name: '',\n    registry: '',\n    url: '',\n    file: {\n      file: 'file',\n      creation: new Date(),\n      size: 1000,\n      path: 'path',\n    },\n    memory: 1000,\n  };\n\n  mocks.getInferenceServersMock.mockReturnValue([\n    {\n      models: [object],\n      type: InferenceType.LLAMA_CPP,\n      status: 'running',\n      container: {\n        containerId: '',\n        engineId: '',\n      },\n      connection: {\n        port: 0,\n      },\n      health: undefined,\n      labels: {},\n    },\n  ]);\n  render(ModelColumnActions, { object });\n\n  const deleteBtn = screen.getByTitle('Delete Model');\n  expect(deleteBtn).toBeDefined();\n\n  await vi.waitFor(() => {\n    // disable class\n    expect(deleteBtn.classList).toContain('text-[var(--pd-action-button-disabled-text)]');\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/model/ModelColumnActions.svelte",
    "content": "<script lang=\"ts\">\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { faDownload, faRocket, faTrash, faFolderOpen } from '@fortawesome/free-solid-svg-icons';\nimport ListItemButtonIcon from '../../button/ListItemButtonIcon.svelte';\nimport { studioClient } from '/@/utils/client';\nimport { router } from 'tinro';\nimport { onMount } from 'svelte';\nimport { inferenceServers } from '/@/stores/inferenceServers';\n\nexport let object: ModelInfo;\n\nlet inUse: boolean = false;\n$: inUse;\n\nfunction deleteModel(): void {\n  studioClient.requestRemoveLocalModel(object.id).catch(err => {\n    console.error(`Something went wrong while trying to delete model ${String(err)}.`);\n  });\n}\n\nfunction openModelFolder(): void {\n  if (object?.file) {\n    studioClient\n      .openFile(object.file.path)\n      .catch(err => console.error(`Error opening file ${object?.file?.path}:`, err));\n  }\n}\n\nfunction downloadModel(): void {\n  if (object && object.file === undefined) {\n    studioClient.downloadModel(object.id).catch((err: unknown) => {\n      console.error(`Something went wrong while trying to download model ${object.id}`, err);\n    });\n  }\n}\n\nfunction createModelService(): void {\n  router.goto('/service/create');\n  router.location.query.replace({ 'model-id': object.id });\n}\n\nonMount(() => {\n  return inferenceServers.subscribe(servers => {\n    inUse = servers.some(server => server.models.some(model => model.id === object.id));\n  });\n});\n</script>\n\n{#if object.file !== undefined}\n  <ListItemButtonIcon\n    icon={faRocket}\n    title=\"Create Model Service\"\n    enabled={!object.state}\n    onClick={createModelService} />\n  <ListItemButtonIcon icon={faFolderOpen} onClick={openModelFolder} title=\"Open Model Folder\" enabled={!object.state} />\n  <ListItemButtonIcon icon={faTrash} onClick={deleteModel} title=\"Delete Model\" enabled={!inUse && !object.state} />\n{:else}\n  <ListItemButtonIcon icon={faDownload} onClick={downloadModel} title=\"Download Model\" enabled={!object.state} />\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/table/model/ModelColumnAge.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { test, expect } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport ModelColumnCreation from './ModelColumnAge.svelte';\n\ntest('Expect simple column styling', async () => {\n  const d = new Date();\n  d.setDate(d.getDate() - 2);\n\n  const object: ModelInfo = {\n    id: 'my-model',\n    description: '',\n    license: '',\n    name: '',\n    registry: '',\n    url: '',\n    file: {\n      file: 'file',\n      creation: d,\n      size: 1000,\n      path: 'path',\n    },\n    memory: 1000,\n  };\n  render(ModelColumnCreation, { object });\n\n  const text = screen.getByText('2 days');\n  expect(text).toBeInTheDocument();\n  expect(text).toHaveClass('text-[var(--pd-table-body-text)]');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/model/ModelColumnAge.svelte",
    "content": "<script lang=\"ts\">\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { humanizeAge } from '/@/utils/dimensions';\n\nexport let object: ModelInfo;\n</script>\n\n<div class=\"text-[var(--pd-table-body-text)]\">\n  {#if object.file?.creation}\n    {humanizeAge(object.file.creation.getTime() / 1000)}\n  {/if}\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/model/ModelColumnLabels.svelte",
    "content": "<script lang=\"ts\">\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { faMemory } from '@fortawesome/free-solid-svg-icons';\nimport { filesize } from 'filesize';\nimport Badge from '../../Badge.svelte';\n\nexport let object: ModelInfo;\n</script>\n\n<div>\n  <div class=\"flex gap-x-2\">\n    <Badge icon={faMemory} content=\"RAM usage: {object.memory ? filesize(object.memory, { base: 2 }) : 'N/A'}\" />\n  </div>\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/model/ModelColumnName.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { test, expect, vi } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport ModelColumnName from './ModelColumnName.svelte';\nimport userEvent from '@testing-library/user-event';\nimport { router } from 'tinro';\n\ntest('Expect model info lower bar to be visible', async () => {\n  const routerMock = vi.spyOn(router, 'goto');\n  const object: ModelInfo = {\n    id: 'my-model',\n    description: '',\n    license: 'apache-2',\n    name: 'My model',\n    registry: 'registry',\n    url: 'url',\n    file: {\n      file: 'file',\n      size: 1000,\n      path: 'path',\n    },\n    memory: 1000,\n  };\n  render(ModelColumnName, { object });\n  const name = screen.getByLabelText('Model Name');\n  expect(name.textContent).equal('My model');\n\n  const info = screen.getByLabelText('Model Info');\n  expect(info.textContent).equal('registry - apache-2');\n\n  const importedInfo = screen.queryByLabelText('Imported Model Info');\n  expect(importedInfo).not.toBeInTheDocument();\n\n  const modelNameBtn = screen.getByRole('button', { name: 'Open Model Details' });\n  await userEvent.click(modelNameBtn);\n  expect(routerMock).toBeCalledWith('/model/my-model');\n});\n\ntest('Expect model info lower bar to be visible', async () => {\n  const routerMock = vi.spyOn(router, 'goto');\n  const object: ModelInfo = {\n    id: 'my-model',\n    description: '',\n    license: '',\n    name: 'My model',\n    registry: '',\n    url: '',\n    file: {\n      file: 'file',\n      size: 1000,\n      path: 'path',\n    },\n    memory: 1000,\n  };\n  render(ModelColumnName, { object });\n  const name = screen.getByLabelText('Model Name');\n  expect(name.textContent).equal('My model');\n\n  const info = screen.queryByLabelText('Model Info');\n  expect(info).not.toBeInTheDocument();\n\n  const importedInfo = screen.getByLabelText('Imported Model Info');\n  expect(importedInfo.textContent).equal('Imported by User');\n\n  const modelNameBtn = screen.getByRole('button', { name: 'Open Model Details' });\n  await userEvent.click(modelNameBtn);\n  expect(routerMock).toBeCalledWith('/model/my-model');\n});\n\ntest('Expect model id to be encoded', async () => {\n  const routerMock = vi.spyOn(router, 'goto');\n  const object: ModelInfo = {\n    id: 'org/my-model',\n    description: '',\n    license: 'apache-2',\n    name: 'My model',\n    registry: 'registry',\n    url: 'url',\n    file: {\n      file: 'file',\n      size: 1000,\n      path: 'path',\n    },\n    memory: 1000,\n  };\n  render(ModelColumnName, { object });\n  const name = screen.getByLabelText('Model Name');\n  expect(name.textContent).equal('My model');\n\n  const info = screen.getByLabelText('Model Info');\n  expect(info.textContent).equal('registry - apache-2');\n\n  const importedInfo = screen.queryByLabelText('Imported Model Info');\n  expect(importedInfo).not.toBeInTheDocument();\n\n  const modelNameBtn = screen.getByRole('button', { name: 'Open Model Details' });\n  await userEvent.click(modelNameBtn);\n  expect(routerMock).toBeCalledWith('/model/org%2Fmy-model');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/model/ModelColumnName.svelte",
    "content": "<script lang=\"ts\">\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { router } from 'tinro';\n\nexport let object: ModelInfo;\n\nfunction openDetails(): void {\n  router.goto(`/model/${encodeURIComponent(object.id)}`);\n}\n</script>\n\n<button class=\"flex flex-col w-full\" title={object.name} on:click={openDetails} aria-label=\"Open Model Details\">\n  <div\n    class=\"text-[var(--pd-table-body-text-highlight)] overflow-hidden text-ellipsis w-full text-left\"\n    aria-label=\"Model Name\">\n    {object.name}\n  </div>\n  {#if object.registry ?? object.license}\n    <span class=\"text-sm text-[var(--pd-table-body-text)]\" aria-label=\"Model Info\"\n      >{object.registry} - {object.license}</span>\n  {/if}\n  {#if !object.registry && !object.license && !object.url}\n    <span class=\"text-sm text-[var(--pd-table-body-text)]\" aria-label=\"Imported Model Info\">Imported by User</span>\n  {/if}\n</button>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/model/ModelColumnRecipeSelection.svelte",
    "content": ""
  },
  {
    "path": "packages/frontend/src/lib/table/model/ModelColumnSize.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { test, expect } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport ModelColumnSize from './ModelColumnSize.svelte';\n\ntest('Expect simple column styling', async () => {\n  const object: ModelInfo = {\n    id: 'my-model',\n    description: '',\n    license: '',\n    name: '',\n    registry: '',\n    url: '',\n    file: {\n      file: 'file',\n      creation: new Date(),\n      size: 1000,\n      path: 'path',\n    },\n    memory: 1024,\n  };\n  render(ModelColumnSize, { object });\n\n  const text = screen.getByText('1 kB');\n  expect(text).toBeInTheDocument();\n  expect(text.parentElement).toHaveClass('text-[var(--pd-table-body-text)]');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/model/ModelColumnSize.svelte",
    "content": "<script lang=\"ts\">\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { filesize } from 'filesize';\n\nexport let object: ModelInfo;\n</script>\n\n<div class=\"text-sm text-[var(--pd-table-body-text)] flex-flex-row\">\n  {#if object.file?.size}\n    <div>{filesize(object.file.size)}</div>\n  {/if}\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/playground/ConversationColumnAction.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test, vi, beforeEach } from 'vitest';\nimport { render, screen, fireEvent } from '@testing-library/svelte';\nimport { studioClient } from '/@/utils/client';\nimport ConversationColumnAction from '/@/lib/table/playground/ConversationColumnAction.svelte';\n\nvi.mock('../../../utils/client', async () => ({\n  studioClient: {\n    requestDeleteConversation: vi.fn(),\n  },\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(studioClient.requestDeleteConversation).mockResolvedValue(undefined);\n});\n\ntest('should call requestDeleteConversation when click delete', async () => {\n  render(ConversationColumnAction, {\n    object: {\n      id: 'dummyConversationId',\n      name: 'dummyName',\n      modelId: 'dummyModelId',\n    },\n  });\n\n  const startBtn = screen.getByTitle('Delete conversation');\n  await fireEvent.click(startBtn);\n  expect(studioClient.requestDeleteConversation).toHaveBeenCalledWith('dummyConversationId');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/playground/ConversationColumnAction.svelte",
    "content": "<script lang=\"ts\">\nimport type { Conversation } from '@shared/models/IPlaygroundMessage';\nimport ConversationActions from '../../conversation/ConversationActions.svelte';\n\nexport let object: Conversation;\n</script>\n\n<ConversationActions conversation={object} />\n"
  },
  {
    "path": "packages/frontend/src/lib/table/playground/PlaygroundColumnIcon.svelte",
    "content": "<script lang=\"ts\">\nimport PlaygroundWhite from '../../icons/PlaygroundWhite.svelte';\n// svelte-ignore unused-export-let\nexport let object: unknown;\n</script>\n\n<PlaygroundWhite />\n"
  },
  {
    "path": "packages/frontend/src/lib/table/playground/PlaygroundColumnModel.svelte",
    "content": "<script lang=\"ts\">\nimport type { Conversation } from '@shared/models/IPlaygroundMessage';\n\nexport let object: Conversation;\nimport { catalog } from '/@/stores/catalog';\n\n$: name = $catalog.models.find(r => r.id === object.modelId)?.name;\n</script>\n\n<div class=\"text-[var(--pd-table-body-text)] overflow-hidden text-ellipsis\">\n  {name}\n</div>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/playground/PlaygroundColumnName.svelte",
    "content": "<script lang=\"ts\">\nimport { router } from 'tinro';\nimport type { Conversation } from '@shared/models/IPlaygroundMessage';\n\nexport let object: Conversation;\n\nfunction openDetails(): void {\n  router.goto(`/playground/${object.id}`);\n}\n</script>\n\n<button on:click={openDetails}>\n  <div class=\"text-[var(--pd-table-body-text-highlight)] overflow-hidden text-ellipsis\">\n    {object.name}\n  </div>\n</button>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/playground/PlaygroundColumnRuntime.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { test, vi, beforeEach } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport { InferenceType } from '@shared/models/IInference';\nimport type { ConversationWithBackend } from '/@/stores/conversations';\nimport PlaygroundColumnRuntime from './PlaygroundColumnRuntime.svelte';\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ntest('should display label for backend', async () => {\n  render(PlaygroundColumnRuntime, {\n    object: {\n      backend: InferenceType.LLAMA_CPP,\n    } as ConversationWithBackend,\n  });\n\n  screen.getByText('llamacpp');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/playground/PlaygroundColumnRuntime.svelte",
    "content": "<script lang=\"ts\">\nimport { inferenceTypeLabel } from '@shared/models/IInference';\nimport Badge from '../../Badge.svelte';\nimport type { ConversationWithBackend } from '/@/stores/conversations';\n\nexport let object: ConversationWithBackend;\n</script>\n\n<Badge content={inferenceTypeLabel(object.backend)} />\n"
  },
  {
    "path": "packages/frontend/src/lib/table/service/ServiceAction.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test, vi, beforeEach } from 'vitest';\nimport { render, screen, fireEvent } from '@testing-library/svelte';\nimport ServiceAction from './ServiceAction.svelte';\nimport { studioClient } from '/@/utils/client';\nimport { InferenceType } from '@shared/models/IInference';\n\nvi.mock('../../../utils/client', async () => ({\n  studioClient: {\n    startInferenceServer: vi.fn(),\n    stopInferenceServer: vi.fn(),\n    requestDeleteInferenceServer: vi.fn(),\n  },\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(studioClient.startInferenceServer).mockResolvedValue(undefined);\n  vi.mocked(studioClient.stopInferenceServer).mockResolvedValue(undefined);\n  vi.mocked(studioClient.requestDeleteInferenceServer).mockResolvedValue(undefined);\n});\n\ntest('should display stop button when status running', async () => {\n  render(ServiceAction, {\n    object: {\n      type: InferenceType.LLAMA_CPP,\n      health: undefined,\n      models: [],\n      connection: { port: 8888 },\n      status: 'running',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n    },\n  });\n\n  const stopBtn = screen.getByTitle('Stop service');\n  expect(stopBtn).toBeDefined();\n});\n\ntest('should display start button when status stopped', async () => {\n  render(ServiceAction, {\n    object: {\n      type: InferenceType.LLAMA_CPP,\n      health: undefined,\n      models: [],\n      connection: { port: 8888 },\n      status: 'stopped',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n    },\n  });\n\n  const startBtn = screen.getByTitle('Start service');\n  expect(startBtn).toBeDefined();\n});\n\ntest('should call stopInferenceServer when click stop', async () => {\n  render(ServiceAction, {\n    object: {\n      type: InferenceType.LLAMA_CPP,\n      health: undefined,\n      models: [],\n      connection: { port: 8888 },\n      status: 'running',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n    },\n  });\n\n  const stopBtn = screen.getByTitle('Stop service');\n  await fireEvent.click(stopBtn);\n  expect(studioClient.stopInferenceServer).toHaveBeenCalledWith('dummyContainerId');\n});\n\ntest('should call startInferenceServer when click start', async () => {\n  render(ServiceAction, {\n    object: {\n      type: InferenceType.LLAMA_CPP,\n      health: undefined,\n      models: [],\n      connection: { port: 8888 },\n      status: 'stopped',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n    },\n  });\n\n  const startBtn = screen.getByTitle('Start service');\n  await fireEvent.click(startBtn);\n  expect(studioClient.startInferenceServer).toHaveBeenCalledWith('dummyContainerId');\n});\n\ntest('should call deleteInferenceServer when click delete', async () => {\n  render(ServiceAction, {\n    object: {\n      type: InferenceType.LLAMA_CPP,\n      health: undefined,\n      models: [],\n      connection: { port: 8888 },\n      status: 'stopped',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n    },\n  });\n\n  const startBtn = screen.getByTitle('Delete service');\n  await fireEvent.click(startBtn);\n  expect(studioClient.requestDeleteInferenceServer).toHaveBeenCalledWith('dummyContainerId');\n});\n\ntest('should be disabled on transition', async () => {\n  render(ServiceAction, {\n    object: {\n      type: InferenceType.LLAMA_CPP,\n      health: undefined,\n      models: [],\n      connection: { port: 8888 },\n      status: 'stopping',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n    },\n  });\n\n  const startBtn = screen.getByTitle('Start service');\n  expect(startBtn.classList).toContain('text-[var(--pd-action-button-disabled-text)]');\n\n  const deleteBtn = screen.getByTitle('Delete service');\n  expect(deleteBtn.classList).toContain('text-[var(--pd-action-button-disabled-text)]');\n});\n\ntest('should have background on details', async () => {\n  render(ServiceAction, {\n    object: {\n      type: InferenceType.LLAMA_CPP,\n      health: undefined,\n      models: [],\n      connection: { port: 8888 },\n      status: 'stopped',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n    },\n    detailed: true,\n  });\n\n  const startBtn = screen.getByTitle('Start service');\n  expect(startBtn.classList).toContain('bg-[var(--pd-action-button-details-bg)]');\n  expect(startBtn.classList).toContain('rounded-lg');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/service/ServiceAction.svelte",
    "content": "<script lang=\"ts\">\nimport type { InferenceServer } from '@shared/models/IInference';\nimport { studioClient } from '/@/utils/client';\nimport { faPlay, faStop, faTrash } from '@fortawesome/free-solid-svg-icons';\nimport ListItemButtonIcon from '/@/lib/button/ListItemButtonIcon.svelte';\n\nexport let object: InferenceServer;\nexport let detailed: boolean = false;\n\nfunction stopInferenceServer(): void {\n  studioClient.stopInferenceServer(object.container.containerId).catch((err: unknown) => {\n    console.error('Something went wrong while trying to stop inference server', err);\n  });\n}\n\nfunction startInferenceServer(): void {\n  studioClient.startInferenceServer(object.container.containerId).catch((err: unknown) => {\n    console.error('Something went wrong while trying to start inference server', err);\n  });\n}\n\nfunction deleteInferenceServer(): void {\n  studioClient.requestDeleteInferenceServer(object.container.containerId).catch((err: unknown) => {\n    console.error('Something went wrong while trying to delete inference server', err);\n  });\n}\n\nlet loading: boolean;\n$: {\n  loading = ['deleting', 'stopping', 'starting'].includes(object.status);\n}\n</script>\n\n{#if object.status === 'running'}\n  <ListItemButtonIcon detailed={detailed} icon={faStop} onClick={stopInferenceServer} title=\"Stop service\" />\n{:else}\n  <ListItemButtonIcon\n    detailed={detailed}\n    enabled={!loading}\n    icon={faPlay}\n    onClick={startInferenceServer}\n    title=\"Start service\" />\n{/if}\n<ListItemButtonIcon\n  detailed={detailed}\n  enabled={!loading}\n  icon={faTrash}\n  onClick={deleteInferenceServer}\n  title=\"Delete service\" />\n"
  },
  {
    "path": "packages/frontend/src/lib/table/service/ServiceColumnModelName.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test, vi, beforeEach } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport ServiceColumnModelName from '/@/lib/table/service/ServiceColumnModelName.svelte';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ntest('the model name should be displayed', async () => {\n  render(ServiceColumnModelName, {\n    object: {\n      health: undefined,\n      models: [\n        {\n          id: 'model1',\n          name: 'dummyName',\n        } as unknown as ModelInfo,\n      ],\n      connection: { port: 8888 },\n      status: 'running',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n    },\n  });\n\n  const modelName = screen.getByText('dummyName');\n  expect(modelName).toBeDefined();\n  expect(modelName.localName).toBe('span');\n});\n\ntest('multiple models name should be displayed as list', async () => {\n  render(ServiceColumnModelName, {\n    object: {\n      health: undefined,\n      models: [\n        {\n          id: 'model1',\n          name: 'dummyName-1',\n        } as unknown as ModelInfo,\n        {\n          id: 'model2',\n          name: 'dummyName-2',\n        } as unknown as ModelInfo,\n      ],\n      connection: { port: 8888 },\n      status: 'running',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n    },\n  });\n\n  const model1Name = screen.getByText('dummyName-1');\n  expect(model1Name).toBeDefined();\n  expect(model1Name.localName).toBe('li');\n\n  const model2Name = screen.getByText('dummyName-2');\n  expect(model2Name).toBeDefined();\n  expect(model2Name.localName).toBe('li');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/service/ServiceColumnModelName.svelte",
    "content": "<script lang=\"ts\">\nimport type { InferenceServer } from '@shared/models/IInference';\n\nexport let object: InferenceServer;\n</script>\n\n{#if object.models.length === 1}\n  <span class=\"text-[var(--pd-table-body-text)]\">\n    {object.models[0].name}\n  </span>\n{:else}\n  <ul>\n    {#each object.models as model (model.id)}\n      <li class=\"text-[var(--pd-table-body-text)]\">{model.name}</li>\n    {/each}\n  </ul>\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/lib/table/service/ServiceColumnName.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test, vi, beforeEach } from 'vitest';\nimport { render, screen, fireEvent } from '@testing-library/svelte';\nimport { router } from 'tinro';\nimport ServiceColumnName from '/@/lib/table/service/ServiceColumnName.svelte';\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ntest('click on name should open details page', async () => {\n  const gotoMock = vi.spyOn(router, 'goto');\n  render(ServiceColumnName, {\n    object: {\n      health: undefined,\n      models: [],\n      connection: { port: 8888 },\n      status: 'running',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n    },\n  });\n\n  const nameBtn = screen.getByTitle('Open service details');\n  expect(nameBtn).toBeDefined();\n  await fireEvent.click(nameBtn);\n\n  expect(gotoMock).toHaveBeenCalledWith('/service/dummyContainerId');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/service/ServiceColumnName.svelte",
    "content": "<script lang=\"ts\">\nimport { router } from 'tinro';\nimport type { InferenceServer } from '@shared/models/IInference';\n\nexport let object: InferenceServer;\n\nfunction openDetails(): void {\n  router.goto(`/service/${object.container.containerId}`);\n}\n</script>\n\n<button\n  title=\"Open service details\"\n  class=\"text-[var(--pd-table-body-text-highlight)] w-full text-ellipsis overflow-hidden\"\n  on:click={openDetails}>\n  {object.container.containerId}\n</button>\n"
  },
  {
    "path": "packages/frontend/src/lib/table/service/ServiceColumnRuntime.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { test, vi, beforeEach } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport ServiceColumnRuntime from './ServiceColumnRuntime.svelte';\nimport { InferenceType, type InferenceServer } from '@shared/models/IInference';\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n});\n\ntest('should display label for type', async () => {\n  render(ServiceColumnRuntime, {\n    object: {\n      type: InferenceType.LLAMA_CPP,\n    } as InferenceServer,\n  });\n\n  screen.getByText('llamacpp');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/service/ServiceColumnRuntime.svelte",
    "content": "<script lang=\"ts\">\nimport { inferenceTypeLabel, type InferenceServer } from '@shared/models/IInference';\nimport Badge from '../../Badge.svelte';\n\nexport let object: InferenceServer;\n</script>\n\n<Badge content={inferenceTypeLabel(object.type)} />\n"
  },
  {
    "path": "packages/frontend/src/lib/table/service/ServiceStatus.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test, vi, describe } from 'vitest';\nimport { render, screen, fireEvent, waitFor } from '@testing-library/svelte';\nimport ServiceStatus from './ServiceStatus.svelte';\nimport { studioClient } from '/@/utils/client';\nimport { type InferenceServerStatus, InferenceType } from '@shared/models/IInference';\n\nvi.mock('../../../utils/client', async () => ({\n  studioClient: {\n    navigateToContainer: vi.fn().mockReturnValue(Promise.resolve()),\n  },\n}));\n\ndescribe('transition statuses', () => {\n  test.each(['starting', 'stopping', 'deleting'] as InferenceServerStatus[])(\n    'status %s should display a spinner',\n    status => {\n      render(ServiceStatus, {\n        object: {\n          health: undefined,\n          models: [],\n          connection: { port: 8888 },\n          status: status,\n          container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n          type: InferenceType.LLAMA_CPP,\n          labels: {},\n        },\n      });\n\n      const spinner = screen.queryByRole('progressbar');\n      expect(spinner).toBeDefined();\n\n      const button = screen.queryByRole('button');\n      expect(button).toBeNull();\n    },\n  );\n});\n\ndescribe('stable statuses', () => {\n  test.each(['running', 'stopped', 'error'] as InferenceServerStatus[])(\n    'status %s should not display a spinner',\n    status => {\n      render(ServiceStatus, {\n        object: {\n          health: undefined,\n          models: [],\n          connection: { port: 8888 },\n          status: status,\n          container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n          type: InferenceType.LLAMA_CPP,\n          labels: {},\n        },\n      });\n\n      const spinner = screen.queryByRole('progressbar');\n      expect(spinner).toBeNull();\n\n      const button = screen.getByRole('button');\n      expect(button).toBeDefined();\n    },\n  );\n});\n\ntest('defined health should not display a spinner', async () => {\n  render(ServiceStatus, {\n    object: {\n      health: {\n        Status: 'starting',\n        Log: [],\n        FailingStreak: 1,\n      },\n      models: [],\n      connection: { port: 8888 },\n      status: 'running',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n      type: InferenceType.LLAMA_CPP,\n      labels: {},\n    },\n  });\n\n  const spinner = screen.queryByRole('progressbar');\n  expect(spinner).toBeNull();\n\n  const button = screen.getByRole('button');\n  expect(button).toBeDefined();\n});\n\ntest('click on status icon should redirect to container', async () => {\n  render(ServiceStatus, {\n    object: {\n      health: {\n        Status: 'starting',\n        Log: [],\n        FailingStreak: 1,\n      },\n      models: [],\n      connection: { port: 8888 },\n      status: 'running',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n      type: InferenceType.LLAMA_CPP,\n      labels: {},\n    },\n  });\n  // Get button and click on it\n  const button = screen.getByRole('button');\n  await fireEvent.click(button);\n\n  await waitFor(() => {\n    expect(studioClient.navigateToContainer).toHaveBeenCalledWith('dummyContainerId');\n  });\n});\n\ntest('error status should show degraded', async () => {\n  render(ServiceStatus, {\n    object: {\n      models: [],\n      connection: { port: 8888 },\n      status: 'error',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n      type: InferenceType.LLAMA_CPP,\n      labels: {},\n    },\n  });\n  // Get button and click on it\n  const status = screen.getByRole('status');\n  expect(status.title).toBe('DEGRADED');\n});\n\ntest('running status with no healthcheck should show starting', async () => {\n  render(ServiceStatus, {\n    object: {\n      models: [],\n      connection: { port: 8888 },\n      status: 'running',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n      type: InferenceType.LLAMA_CPP,\n      labels: {},\n    },\n  });\n  // Get button and click on it\n  const status = screen.getByRole('status');\n  expect(status.title).toBe('STARTING');\n});\n"
  },
  {
    "path": "packages/frontend/src/lib/table/service/ServiceStatus.svelte",
    "content": "<script lang=\"ts\">\nimport type { InferenceServer } from '@shared/models/IInference';\nimport { studioClient } from '/@/utils/client';\nimport { Spinner, StatusIcon } from '@podman-desktop/ui-svelte';\nimport { ContainerIcon } from '@podman-desktop/ui-svelte/icons';\n\nexport let object: InferenceServer;\n\nfunction navigateToContainer(): void {\n  studioClient\n    .navigateToContainer(object.container.containerId)\n    .catch(err => console.error(`Error navigating to container ${object.container.containerId}:`, err));\n}\n\nlet status: string;\nlet loading: boolean;\n$: {\n  status = getStatus();\n  loading = ['deleting', 'stopping', 'starting'].includes(object.status);\n}\n\nfunction getStatus(): 'RUNNING' | 'STARTING' | 'DEGRADED' | '' {\n  switch (object.status) {\n    case 'stopped':\n      return '';\n    case 'error':\n      return 'DEGRADED';\n    default:\n      break;\n  }\n\n  // Special case: when the health check is undefined, and the container is running\n  // it is not ready, so still showing starting\n  if (object.health === undefined && object.status === 'running') {\n    return 'STARTING';\n  }\n\n  switch (object.health?.Status) {\n    case 'healthy':\n      return 'RUNNING';\n    case 'unhealthy':\n    case 'error':\n      return 'DEGRADED';\n    case 'starting':\n      return 'STARTING';\n    default:\n      return '';\n  }\n}\n</script>\n\n{#if loading}\n  <Spinner class=\"text-[var(--pd-table-body-text-highlight)]\" />\n{:else}\n  <button on:click={navigateToContainer}>\n    <StatusIcon status={status} icon={ContainerIcon} />\n  </button>\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/main.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { mount } from 'svelte';\nimport App from './App.svelte';\n\nconst target = document.getElementById('app');\nlet app;\nif (target) {\n  app = mount(App, { target });\n}\nexport default app;\n"
  },
  {
    "path": "packages/frontend/src/models/IRouterState.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport interface RouterState {\n  url: string;\n}\n"
  },
  {
    "path": "packages/frontend/src/pages/Applications.svelte",
    "content": "<script lang=\"ts\">\nimport { NavPage, EmptyScreen, Button } from '@podman-desktop/ui-svelte';\nimport { router } from 'tinro';\nimport { faServer } from '@fortawesome/free-solid-svg-icons';\nimport TasksBanner from '/@/lib/progress/TasksBanner.svelte';\nimport ApplicationTable from '/@/lib/table/application/ApplicationTable.svelte';\n\nconst openApplicationCatalog = (): void => {\n  router.goto('/recipes');\n};\n</script>\n\n<NavPage title=\"AI Apps\" searchEnabled={false}>\n  {#snippet content()}\n    <div class=\"flex flex-col min-w-full min-h-full space-y-5\">\n      <!-- showing running tasks -->\n      <div class=\"w-full\">\n        <TasksBanner title=\"Pulling recipes\" labels={{ 'recipe-pulling': undefined }} />\n      </div>\n\n      <div class=\"flex w-full h-full\">\n        <ApplicationTable>\n          <svelte:fragment slot=\"empty-screen\">\n            <EmptyScreen\n              icon={faServer}\n              title=\"No application running\"\n              message=\"There is no AI App running. You can go to Recipes page to start an application.\">\n              <div class=\"flex gap-2 justify-center\">\n                <Button type=\"link\" on:click={openApplicationCatalog}>Recipes</Button>\n              </div>\n            </EmptyScreen>\n          </svelte:fragment>\n        </ApplicationTable>\n      </div>\n    </div>\n  {/snippet}\n</NavPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/CreateService.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { studioClient } from '/@/utils/client';\nimport { fireEvent, render, screen } from '@testing-library/svelte';\nimport CreateService from '/@/pages/CreateService.svelte';\nimport type { Task } from '@shared/models/ITask';\nimport userEvent from '@testing-library/user-event';\nimport type { InferenceServer } from '@shared/models/IInference';\n\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { readable, writable } from 'svelte/store';\nimport { router } from 'tinro';\nimport type { ContainerConnectionInfo, ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport * as path from 'node:path';\nimport * as os from 'node:os';\nimport { VMType } from '@shared/models/IPodman';\n// stores\nimport * as ConnectionStore from '/@/stores/containerProviderConnections';\nimport * as InferenceStore from '/@/stores/inferenceServers';\nimport * as ModelsInfoStore from '/@/stores/modelsInfo';\nimport * as TaskStore from '/@/stores/tasks';\n\nvi.mock('/@/stores/containerProviderConnections');\nvi.mock('/@/stores/inferenceServers');\nvi.mock('/@/stores/modelsInfo');\nvi.mock('/@/stores/tasks');\nvi.mock('/@/stores/extensionConfiguration');\nvi.mock('../utils/client', () => ({\n  studioClient: {\n    requestCreateInferenceServer: vi.fn(),\n    getHostFreePort: vi.fn(),\n    checkContainerConnectionStatusAndResources: vi.fn(),\n    getExtensionConfiguration: vi.fn(),\n  },\n  rpcBrowser: {\n    subscribe: (): unknown => {\n      return {\n        unsubscribe: (): void => {},\n      };\n    },\n  },\n}));\n\nconst DUMMY_DOWNLOADED_MODEL: ModelInfo = {\n  id: 'dummy-model-id',\n  file: {\n    file: 'fake-file',\n    path: 'fake-path',\n  },\n  name: 'dummy-name',\n  description: 'fake description',\n  properties: {},\n  memory: 10,\n};\n\nconst noMachineConnectionInfo: ContainerConnectionInfo = {\n  status: 'no-machine',\n  canRedirect: true,\n};\n\nconst runningMachineConnectionInfo: ContainerConnectionInfo = {\n  name: 'Podman machine',\n  status: 'running',\n  canRedirect: true,\n};\n\nconst lowResourceMachineConnectionInfo: ContainerConnectionInfo = {\n  name: 'Podman Machine',\n  canEdit: true,\n  canRedirect: true,\n  cpus: 12,\n  cpusExpected: 10,\n  memoryExpected: 10,\n  memoryIdle: 5,\n  status: 'low-resources',\n};\n\nconst containerProviderConnection: ContainerProviderConnectionInfo = {\n  name: 'Dummy connainter connection provider',\n  status: 'started',\n  type: 'podman',\n  vmType: VMType.QEMU,\n  providerId: 'podman',\n};\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(InferenceStore).inferenceServers = readable([\n    { container: { containerId: 'dummyContainerId' } } as InferenceServer,\n  ]);\n  vi.mocked(ModelsInfoStore).modelsInfo = readable([DUMMY_DOWNLOADED_MODEL]);\n  vi.mocked(TaskStore).tasks = readable([]);\n  vi.mocked(ConnectionStore).containerProviderConnections = readable([containerProviderConnection]);\n\n  vi.mocked(studioClient.checkContainerConnectionStatusAndResources).mockResolvedValue(runningMachineConnectionInfo);\n  vi.mocked(studioClient.requestCreateInferenceServer).mockResolvedValue('dummyTrackingId');\n  vi.mocked(studioClient.getHostFreePort).mockResolvedValue(8888);\n  vi.mocked(studioClient.getExtensionConfiguration).mockResolvedValue({\n    experimentalGPU: false,\n    apiPort: 0,\n    inferenceRuntime: 'llama-cpp',\n    experimentalTuning: false,\n    modelsPath: '',\n    modelUploadDisabled: false,\n    showGPUPromotion: false,\n    appearance: 'dark',\n  });\n\n  window.HTMLElement.prototype.scrollIntoView = vi.fn();\n});\n\ntest('create button should be disabled when no model id provided', async () => {\n  vi.mocked(ModelsInfoStore).modelsInfo = readable([]);\n  render(CreateService);\n\n  await vi.waitFor(() => {\n    const createBtn = screen.getByTitle('Create service');\n    expect(createBtn).toBeDefined();\n    expect(createBtn.attributes.getNamedItem('disabled')).toBeTruthy();\n  });\n});\n\ntest('expect error message to be displayed when no model locally', async () => {\n  // mock an empty store to simulate no models\n  vi.mocked(ModelsInfoStore).modelsInfo = readable([]);\n  render(CreateService);\n\n  await vi.waitFor(() => {\n    const alert = screen.getByRole('alert');\n    expect(alert).toBeDefined();\n  });\n});\n\ntest('expect error message to be hidden when models locally', () => {\n  render(CreateService);\n\n  const alert = screen.queryByRole('alert');\n  expect(alert).toBeNull();\n});\n\ntest('button click should call createInferenceServer', async () => {\n  render(CreateService);\n\n  let createBtn: HTMLElement | undefined = undefined;\n  await vi.waitFor(() => {\n    createBtn = screen.getByTitle('Create service');\n    expect(createBtn).toBeDefined();\n    expect(createBtn).toBeEnabled();\n  });\n\n  if (createBtn === undefined) throw new Error('createBtn undefined');\n\n  await fireEvent.click(createBtn);\n  expect(vi.mocked(studioClient.requestCreateInferenceServer)).toHaveBeenCalledWith({\n    modelsInfo: [DUMMY_DOWNLOADED_MODEL],\n    port: 8888,\n    connection: containerProviderConnection,\n  });\n});\n\ntest('no containerProviderConnections should have no running container error', async () => {\n  // mock an empty store\n  vi.mocked(ConnectionStore).containerProviderConnections = readable([]);\n\n  const { getByTitle, getByRole } = render(CreateService);\n\n  const createBtn: HTMLElement = await vi.waitFor(() => {\n    const element = getByTitle('Create service');\n    expect(element).toBeDefined();\n    return element;\n  });\n  expect(createBtn).toBeDisabled();\n\n  const alert = getByRole('alert');\n  expect(alert).toHaveTextContent('No running container engine found');\n});\n\ntest('no container error should disappear if one get available', async () => {\n  // mock an empty store\n  const store = writable<ContainerProviderConnectionInfo[]>([]);\n  vi.mocked(ConnectionStore).containerProviderConnections = store;\n\n  const { getByRole, queryByRole } = render(CreateService);\n\n  // First we should have the error\n  await vi.waitFor(() => {\n    const alert = getByRole('alert');\n    expect(alert).toHaveTextContent('No running container engine found');\n  });\n\n  // let's fill the store\n  store.set([containerProviderConnection]);\n\n  // wait for error to be removed\n  await vi.waitFor(() => {\n    const alert = queryByRole('alert');\n    expect(alert).toBeNull();\n  });\n});\n\ntest('tasks progress should not be visible by default', async () => {\n  render(CreateService);\n\n  const status = screen.queryByRole('status');\n  expect(status).toBeNull();\n});\n\ntest('tasks should be displayed after requestCreateInferenceServer', async () => {\n  const store = writable<Task[]>([]);\n  vi.mocked(TaskStore).tasks = store;\n\n  render(CreateService, {\n    trackingId: 'dummyTrackingId',\n  });\n\n  const createBtn: HTMLElement = await vi.waitFor(() => {\n    const element = screen.getByTitle('Create service');\n    expect(element).toBeDefined();\n    return element;\n  });\n\n  await fireEvent.click(createBtn);\n\n  await vi.waitFor(() => {\n    expect(studioClient.requestCreateInferenceServer).toHaveBeenCalled();\n  });\n\n  store.set([\n    {\n      id: 'dummyTaskId',\n      labels: {\n        trackingId: 'dummyTrackingId',\n      },\n      name: 'Dummy Task name',\n      state: 'loading',\n    },\n  ]);\n\n  await vi.waitFor(() => {\n    const status = screen.getByRole('status');\n    expect(status).toBeDefined();\n  });\n});\n\ntest('port input should update on user input', async () => {\n  render(CreateService);\n\n  const portInput: HTMLInputElement = screen.getByRole('textbox', { name: 'Port input' });\n  expect(portInput).toBeDefined();\n\n  await fireEvent.input(portInput, '8888');\n\n  await vi.waitFor(() => {\n    expect(portInput.value).toBe('8888');\n  });\n});\n\ntest('form should be disabled when loading', async () => {\n  const store = writable<Task[]>([]);\n  vi.mocked(TaskStore).tasks = store;\n\n  render(CreateService, {\n    trackingId: 'dummyTrackingId',\n  });\n\n  const createBtn: HTMLElement = await vi.waitFor(() => {\n    const element = screen.getByTitle('Create service');\n    expect(element).toBeDefined();\n    return element;\n  });\n\n  await fireEvent.click(createBtn);\n\n  await vi.waitFor(() => {\n    expect(studioClient.requestCreateInferenceServer).toHaveBeenCalled();\n  });\n\n  store.set([\n    {\n      id: 'dummyTaskId',\n      labels: {\n        trackingId: 'dummyTrackingId',\n      },\n      name: 'Dummy Task name',\n      state: 'loading',\n    },\n  ]);\n\n  await vi.waitFor(() => {\n    const input = screen.getByRole('textbox', { name: 'Port input' });\n    expect(input).toBeDisabled();\n  });\n});\n\ntest('should display error message if createService fails', async () => {\n  vi.mocked(studioClient.requestCreateInferenceServer).mockRejectedValue('error creating service');\n  render(CreateService);\n\n  const createBtn: HTMLElement = await vi.waitFor(() => {\n    const element = screen.getByTitle('Create service');\n    expect(element).toBeDefined();\n    return element;\n  });\n\n  const errorMessage = screen.queryByLabelText('Error Message Content');\n  expect(errorMessage).not.toBeInTheDocument();\n\n  await userEvent.click(createBtn);\n\n  const errorMessageAfterSubmit = screen.getByLabelText('Error Message Content');\n  expect(errorMessageAfterSubmit).toBeInTheDocument();\n  expect(errorMessageAfterSubmit?.textContent).equal('error creating service');\n});\n\ntest('should display connectionInfo message if there is no running connection', async () => {\n  vi.mocked(studioClient.checkContainerConnectionStatusAndResources).mockResolvedValue(noMachineConnectionInfo);\n  render(CreateService);\n\n  await vi.waitFor(() => {\n    const banner = screen.getByLabelText('Container connection info banner');\n    expect(banner).toBeInTheDocument();\n  });\n});\n\ntest('should display connectionInfo message if there is a podman connection with low resources', async () => {\n  vi.mocked(studioClient.checkContainerConnectionStatusAndResources).mockResolvedValue(\n    lowResourceMachineConnectionInfo,\n  );\n  const modelsInfoList = writable<ModelInfo[]>([\n    {\n      id: 'id',\n      file: {\n        file: 'file',\n        path: path.resolve(os.tmpdir(), 'path'),\n      },\n      memory: 10,\n    } as unknown as ModelInfo,\n  ]);\n  vi.mocked(ModelsInfoStore).modelsInfo = modelsInfoList;\n  render(CreateService);\n\n  await vi.waitFor(() => {\n    const banner = screen.getByLabelText('Container connection info banner');\n    expect(banner).toBeInTheDocument();\n  });\n});\n\ntest('there should be NO banner if there is a running podman connection having enough resources', async () => {\n  const modelsInfoList = writable<ModelInfo[]>([\n    {\n      id: 'id',\n      file: {\n        file: 'file',\n        path: path.resolve(os.tmpdir(), 'path'),\n      },\n      memory: 10,\n    } as unknown as ModelInfo,\n  ]);\n  vi.mocked(ModelsInfoStore).modelsInfo = modelsInfoList;\n  render(CreateService);\n\n  await vi.waitFor(() => {\n    const banner = screen.queryByLabelText('Container connection info banner');\n    expect(banner).not.toBeInTheDocument();\n  });\n});\n\ntest('model-id query should be used to select default model', async () => {\n  const modelsInfoList = writable<ModelInfo[]>([\n    {\n      id: 'model-id-1',\n      file: {\n        file: 'file',\n        path: '/path',\n      },\n    } as unknown as ModelInfo,\n    {\n      id: 'model-id-2',\n      file: {\n        file: 'file',\n        path: '/path',\n      },\n    } as unknown as ModelInfo,\n  ]);\n  vi.mocked(ModelsInfoStore).modelsInfo = modelsInfoList;\n  router.location.query.set('model-id', 'model-id-2');\n\n  render(CreateService);\n  const createBtn = screen.getByTitle('Create service');\n\n  await vi.waitFor(() => {\n    expect(createBtn).toBeEnabled();\n  });\n\n  await fireEvent.click(createBtn);\n\n  await vi.waitFor(() => {\n    expect(studioClient.requestCreateInferenceServer).toHaveBeenCalledWith({\n      modelsInfo: [expect.objectContaining({ id: 'model-id-2' })],\n      port: 8888,\n      connection: containerProviderConnection,\n    });\n  });\n});\n\ntest('models with backend \"none\" should be filtered out', async () => {\n  const modelsInfoList = writable<ModelInfo[]>([\n    {\n      id: 'model-valid',\n      name: 'Valid Model',\n      description: 'A model with a valid backend',\n      backend: 'llama-cpp',\n      file: {\n        file: 'file',\n        path: '/valid-path',\n      },\n    } as unknown as ModelInfo,\n    {\n      id: 'model-none',\n      name: 'None Backend Model',\n      description: 'A model with backend none',\n      backend: 'none',\n      file: {\n        file: 'file',\n        path: '/none-path',\n      },\n    } as unknown as ModelInfo,\n  ]);\n\n  vi.mocked(ModelsInfoStore).modelsInfo = modelsInfoList;\n  router.location.query.set('model-id', 'model-valid');\n\n  render(CreateService);\n  expect(screen.queryByText('None Backend Model')).toBeNull();\n  const createBtn = screen.getByTitle('Create service');\n\n  await vi.waitFor(() => {\n    expect(createBtn).toBeEnabled();\n  });\n\n  await fireEvent.click(createBtn);\n\n  expect(vi.mocked(studioClient.requestCreateInferenceServer)).toHaveBeenCalledWith(\n    expect.objectContaining({\n      modelsInfo: [expect.objectContaining({ id: 'model-valid' })],\n    }),\n  );\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/CreateService.svelte",
    "content": "<script lang=\"ts\">\nimport { faExclamationCircle, faLocationArrow, faPlus, faPlusCircle } from '@fortawesome/free-solid-svg-icons';\nimport { modelsInfo } from '/@/stores/modelsInfo';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport Fa from 'svelte-fa';\nimport { router } from 'tinro';\nimport { onMount } from 'svelte';\nimport { studioClient } from '/@/utils/client';\nimport { tasks } from '/@/stores/tasks';\nimport type { Task } from '@shared/models/ITask';\nimport { inferenceServers } from '/@/stores/inferenceServers';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { Button, ErrorMessage, FormPage, Input } from '@podman-desktop/ui-svelte';\nimport ModelSelect from '../lib/select/ModelSelect.svelte';\nimport { containerProviderConnections } from '/@/stores/containerProviderConnections';\nimport ContainerProviderConnectionSelect from '/@/lib/select/ContainerProviderConnectionSelect.svelte';\nimport ContainerConnectionWrapper from '/@/lib/notification/ContainerConnectionWrapper.svelte';\nimport TrackedTasks from '/@/lib/progress/TrackedTasks.svelte';\n\ninterface Props {\n  // The tracking id is a unique identifier provided by the\n  // backend when calling requestCreateInferenceServer\n  trackingId?: string;\n}\n\nlet { trackingId }: Props = $props();\n\n// List of the models available locally exlude models with none backend\nlet localModels: ModelInfo[] = $derived($modelsInfo.filter(model => model.file && model.backend !== 'none'));\n\n// The container provider connection to use\nlet containerProviderConnection: ContainerProviderConnectionInfo | undefined = $state(undefined);\n\n// Filtered connections (started)\nlet startedContainerProviderConnectionInfo: ContainerProviderConnectionInfo[] = $derived(\n  $containerProviderConnections.filter(connection => connection.status === 'started'),\n);\n\n// The containerPort is the bind value to form input\nlet containerPort: number | undefined = $state(undefined);\n// The model is the bind value to ModelSelect form\nlet model: ModelInfo | undefined = $state(undefined);\n// If the creation of a new inference service fail\nlet errorMsg: string | undefined = $state(undefined);\n// The containerId will be included in the tasks when the creation\n// process will be completed\nlet containerId: string | undefined = $state(undefined);\n// available means the server is started\nlet available: boolean = $derived(!!containerId && $inferenceServers.some(server => server.container.containerId));\n// loading state\nlet loading = $derived(trackingId !== undefined && !errorMsg);\n\n$effect(() => {\n  // Select default model\n  if (!model && localModels.length > 0) {\n    model = localModels[0];\n  }\n\n  // Select default connection\n  if (!containerProviderConnection && startedContainerProviderConnectionInfo.length > 0) {\n    containerProviderConnection = startedContainerProviderConnectionInfo[0];\n  }\n});\n\nconst onContainerPortInput = (event: Event): void => {\n  const raw = (event.target as HTMLInputElement).value;\n  try {\n    containerPort = parseInt(raw);\n  } catch (e: unknown) {\n    console.warn('invalid value for container port', e);\n    containerPort = 8888;\n  }\n};\n\n// Submit method when the form is valid\nconst submit = async (): Promise<void> => {\n  errorMsg = undefined;\n  if (model === undefined) throw new Error('model id not valid.');\n  if (containerPort === undefined) throw new Error('invalid container port');\n\n  try {\n    const trackingId = await studioClient.requestCreateInferenceServer({\n      modelsInfo: [$state.snapshot(model)],\n      port: $state.snapshot(containerPort),\n      connection: $state.snapshot(containerProviderConnection),\n    });\n    router.location.query.set('trackingId', trackingId);\n  } catch (err: unknown) {\n    console.error('Something wrong while trying to create the inference server.', err);\n    errorMsg = String(err);\n  }\n};\n\n// Navigate to the list of models\nconst openModelsPage = (): void => {\n  router.goto(`/models`);\n};\n\n// Navigate to the new created service\nconst openServiceDetails = (): void => {\n  router.goto(`/service/${containerId}`);\n};\n\n// Utility method to filter the tasks properly based on the tracking Id\nconst processTasks = (trackedTasks: Task[]): void => {\n  // Check for errors\n  // hint: we do not need to display them as the TasksProgress component will\n  errorMsg = trackedTasks.find(task => task.error)?.error;\n\n  const task: Task | undefined = trackedTasks.find(task => 'containerId' in (task.labels ?? {}));\n  if (task === undefined) return;\n\n  containerId = task.labels?.['containerId'];\n\n  // if we re-open the page, we might need to restore the model selected\n  populateModelFromTasks(trackedTasks);\n};\n\n// This method uses the trackedTasks to restore the selected value of model\n// It is useful when the page has been restored\nfunction populateModelFromTasks(trackedTasks: Task[]): void {\n  const task = trackedTasks.find(\n    task => task.labels && 'model-id' in task.labels && typeof task.labels['model-id'] === 'string',\n  );\n  const modelId = task?.labels?.['model-id'];\n  if (!modelId) return;\n\n  const mModel = localModels.find(model => model.id === modelId);\n  if (!mModel) return;\n\n  model = mModel;\n}\n\nonMount(() => {\n  studioClient\n    .getHostFreePort()\n    .then(port => {\n      containerPort = port;\n    })\n    .catch((err: unknown) => {\n      console.error(err);\n    });\n\n  // we might have a query parameter, then we should use it\n  const queryModelId = router.location.query.get('model-id');\n  if (queryModelId !== undefined && typeof queryModelId === 'string') {\n    model = localModels.find(mModel => mModel.id === queryModelId);\n  }\n});\n\nexport function goToUpPage(): void {\n  router.goto('/services');\n}\n</script>\n\n<FormPage\n  title=\"Creating Model service\"\n  breadcrumbLeftPart=\"Model Services\"\n  breadcrumbRightPart=\"Creating Model service\"\n  onclose={goToUpPage}\n  onbreadcrumbClick={goToUpPage}>\n  {#snippet icon()}\n    <div class=\"rounded-full w-8 h-8 flex items-center justify-center\">\n      <Fa size=\"1.125x\" class=\"text-[var(--pd-content-header-icon)]\" icon={faPlus} />\n    </div>\n  {/snippet}\n  {#snippet content()}\n    <div class=\"flex flex-col w-full\">\n      <!-- warning machine resources -->\n      {#if containerProviderConnection}\n        <div class=\"mx-5\">\n          <ContainerConnectionWrapper\n            model={$state.snapshot(model)}\n            containerProviderConnection={$state.snapshot(containerProviderConnection)} />\n        </div>\n      {/if}\n\n      <!-- tasks tracked -->\n      <TrackedTasks onChange={processTasks} class=\"mx-5 mt-5\" trackingId={trackingId} tasks={$tasks} />\n\n      <!-- form -->\n      <div class=\"bg-[var(--pd-content-card-bg)] m-5 space-y-6 px-8 sm:pb-6 xl:pb-8 rounded-lg h-fit\">\n        <div class=\"w-full\">\n          <!-- container provider connection input -->\n          {#if startedContainerProviderConnectionInfo.length > 1}\n            <label for=\"model\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n              >Container engine</label>\n            <ContainerProviderConnectionSelect\n              bind:value={containerProviderConnection}\n              containerProviderConnections={startedContainerProviderConnectionInfo} />\n          {/if}\n\n          <!-- model input -->\n          <label for=\"model\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\">Model</label>\n          <ModelSelect models={localModels} disabled={loading} bind:value={model} />\n          {#if localModels.length === 0}\n            <div class=\"text-red-500 p-1 flex flex-row items-center\">\n              <Fa size=\"1.1x\" class=\"cursor-pointer text-red-500\" icon={faExclamationCircle} />\n              <div role=\"alert\" aria-label=\"Error Message Content\" class=\"ml-2\">\n                You don't have any models downloaded. You can download them in <a\n                  href=\"javascript:void(0);\"\n                  class=\"underline\"\n                  title=\"Models page\"\n                  on:click={openModelsPage}>models page</a\n                >.\n              </div>\n            </div>\n          {/if}\n          <!-- container port input -->\n          <label for=\"containerPort\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n            >Container port</label>\n          <Input\n            value={String(containerPort ?? 0)}\n            on:input={onContainerPortInput}\n            class=\"w-full\"\n            placeholder=\"8888\"\n            name=\"containerPort\"\n            aria-label=\"Port input\"\n            disabled={loading}\n            required />\n          <!-- Removed \"type\" above, svelte 5 \"Input\" no longer uses it -->\n        </div>\n        {#if errorMsg !== undefined || !containerProviderConnection}\n          <ErrorMessage error={errorMsg ?? 'No running container engine found'} />\n        {/if}\n        <footer>\n          <div class=\"w-full flex flex-col\">\n            {#if containerId === undefined}\n              <Button\n                title=\"Create service\"\n                inProgress={loading}\n                on:click={submit}\n                disabled={!model || !containerPort || !containerProviderConnection}\n                icon={faPlusCircle}>\n                Create service\n              </Button>\n            {:else}\n              <Button\n                inProgress={!available}\n                title=\"Open service details\"\n                on:click={openServiceDetails}\n                icon={faLocationArrow}>\n                Open service details\n              </Button>\n            {/if}\n          </div>\n        </footer>\n      </div>\n    </div>\n  {/snippet}\n</FormPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/Dashboard.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { test, expect, vi } from 'vitest';\nimport { screen, render } from '@testing-library/svelte';\nimport Dashboard from '/@/pages/Dashboard.svelte';\n\nvi.mock('../utils/client', async () => {\n  return {\n    studioClient: {},\n  };\n});\n\ntest('ensure dashboard is not empty', async () => {\n  render(Dashboard);\n\n  const innerContent = screen.getByLabelText('inner-content');\n  expect(innerContent).toBeDefined();\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/Dashboard.svelte",
    "content": "<script lang=\"ts\">\nimport { router } from 'tinro';\nimport { faGithub } from '@fortawesome/free-brands-svg-icons';\nimport { studioClient } from '/@/utils/client';\nimport DashboardBanner from '/@/lib/images/DashboardBanner.svelte';\nimport { Button, NavPage } from '@podman-desktop/ui-svelte';\n\nconst openRecipesCatalog = (): void => {\n  router.goto('/recipes');\n};\n\nconst openModelsPage = (): void => {\n  router.goto('/models');\n};\n\nconst openPlaygroundsPage = (): void => {\n  router.goto('/playgrounds');\n};\n\nconst openServicesPage = (): void => {\n  router.goto('/services');\n};\n\nconst openGithub = (): void => {\n  studioClient\n    .openURL('https://github.com/containers/podman-desktop-extension-ai-lab')\n    .catch(err =>\n      console.error('Error opening URL https://github.com/containers/podman-desktop-extension-ai-lab :', err),\n    );\n};\n\nconst openIssuesPage = (): void => {\n  studioClient\n    .openURL('https://github.com/containers/podman-desktop-extension-ai-lab/issues')\n    .catch(err =>\n      console.error('Error opening URL https://github.com/containers/podman-desktop-extension-ai-lab/issues :', err),\n    );\n};\n\nconst openDiscussionsPage = (): void => {\n  studioClient\n    .openURL('https://github.com/containers/podman-desktop/discussions')\n    .catch(err =>\n      console.error(\n        'Error opening URL https://github.com/containers/podman-desktop-extension-ai-lab/discussions :',\n        err,\n      ),\n    );\n};\n</script>\n\n<NavPage title=\"Welcome to Podman AI Lab\" searchEnabled={false}>\n  {#snippet additionalActions()}\n    <Button type=\"secondary\" title=\"Open GitHub repository\" on:click={openGithub} icon={faGithub}>Github</Button>\n  {/snippet}\n  {#snippet content()}\n    <div class=\"flex flex-col min-w-full min-h-full text-[var(--pd-details-body-text)]\">\n      <div class=\"mt-4 px-5 space-y-5\" aria-label=\"inner-content\">\n        <DashboardBanner class=\"rounded-md my-2 w-full\" />\n        <p>\n          Podman AI Lab is an open source extension for Podman Desktop to work with LLMs (Large Language Models) on a\n          local environment. It provides key open-source technologies to start building on AI. A curated catalog of\n          so-called recipes helps navigate the jungle of AI use cases and AI models. AI Lab further ships playgrounds:\n          environments to experiment with and test AI models, for instance, a chat bot.\n        </p>\n\n        <h1 class=\"text-lg first-letter:uppercase underline\">Recipes Catalog</h1>\n        <p>\n          Podman AI Lab ships with a so-called\n          <button class=\"underline\" title=\"Open the Recipes Catalog page\" on:click={openRecipesCatalog}\n            >Recipes Catalog</button>\n          that helps you navigate a number of core AI use cases and problem domains such as Chat Bots, Code Generators and\n          Text Summarizers. Each recipe comes with detailed explanations and sample applications that can be run with various\n          large language models (LLMs). Experimenting with multiple models allows finding the optimal one for your use case.\n        </p>\n\n        <h1 class=\"text-lg first-letter:uppercase underline\">AI Models</h1>\n        <p>\n          Podman AI Lab provides a curated list of open source\n          <button class=\"underline\" title=\"Open the Models page\" on:click={openModelsPage}>AI Models and LLMs</button>.\n          Once downloaded, the models are available to be used for AI applications, model services and playgrounds.\n        </p>\n\n        <h1 class=\"text-lg first-letter:uppercase underline\">Model Serving</h1>\n        <p>\n          Once a model has been downloaded, you can start an inference server for the model. This allows to test the\n          model using a playground environment or to connect applications as the inference server is exposing a well\n          know chat API.\n        </p>\n\n        <h1 class=\"text-lg first-letter:uppercase underline\">Playgrounds</h1>\n        <p>\n          The integrated\n          <button class=\"underline\" title=\"Open the Playgrounds page\" on:click={openPlaygroundsPage}\n            >Playground environments</button>\n          allow for experimenting with available models in a local environment. An intuitive user prompt helps in exploring\n          the capabilities and accuracy of various models and aids in finding the best model for the use case at hand.\n        </p>\n        <p>\n          Once started, each playground ships with a generic chat client to interact with the model service. The <button\n            class=\"underline\"\n            title=\"Open the Services page\"\n            on:click={openServicesPage}>Services</button>\n          page allows for accessing running model services and provides further details and code snippets to interact with\n          them.\n        </p>\n\n        <h1 class=\"text-lg first-letter:uppercase underline\">Feedback</h1>\n        <p>\n          If you discover an issue or want to request a new feature, you can open an issue\n          <button class=\"underline\" title=\"Open the issues page\" on:click={openIssuesPage}>here</button>.\n        </p>\n        <p>\n          If you want to get more information about this project or exchange with the community, you can start a\n          discussion\n          <button class=\"underline\" title=\"Open the discussions page\" on:click={openDiscussionsPage}>here</button>.\n        </p>\n      </div>\n    </div>\n  {/snippet}\n</NavPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/ImportModel.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\n/* eslint-disable @typescript-eslint/no-explicit-any */\n\nimport '@testing-library/jest-dom/vitest';\n\nimport { render, screen } from '@testing-library/svelte';\nimport userEvent from '@testing-library/user-event';\nimport { router } from 'tinro';\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { studioClient } from '../utils/client';\n\nimport ImportModels from './ImportModel.svelte';\nimport type { Uri } from '@shared/uri/Uri';\n\nvi.mock('../utils/client', async () => {\n  return {\n    studioClient: {\n      openDialog: vi.fn(),\n      importModels: vi.fn(),\n      validateLocalModel: vi.fn(),\n    },\n  };\n});\n\nvi.mock('tinro', () => ({\n  router: {\n    goto: vi.fn(),\n  },\n}));\n\nbeforeEach(() => {\n  vi.clearAllMocks();\n  // mock validateLocalModel\n  vi.mocked(studioClient.validateLocalModel).mockResolvedValue(undefined);\n});\n\ntest('Expect import button to be disabled', async () => {\n  render(ImportModels);\n  const btnImportModels = screen.getByRole('button', { name: 'Import model' });\n  expect(btnImportModels).toBeInTheDocument();\n  expect(btnImportModels).toBeDisabled();\n});\n\ntest('Expect importModel button to be enabled when model selected', async () => {\n  vi.mocked(studioClient.openDialog).mockResolvedValue([\n    {\n      path: 'path/file.gguf',\n    } as Uri,\n  ]);\n  vi.mocked(studioClient.validateLocalModel).mockResolvedValue(undefined);\n\n  render(ImportModels);\n  const btnOpenFileExplorer = screen.getByRole('button', { name: 'model input' });\n  expect(btnOpenFileExplorer).toBeInTheDocument();\n\n  // click on open file explorer\n  await userEvent.click(btnOpenFileExplorer);\n\n  await vi.waitFor(() => {\n    const btnImportModels = screen.getByRole('button', { name: 'Import model' });\n    expect(btnImportModels).toBeInTheDocument();\n    expect(btnImportModels).not.toBeDisabled();\n  });\n});\n\ntest('Expect import submit to call studioClient.importModels', async () => {\n  vi.mocked(studioClient.openDialog).mockResolvedValue([\n    {\n      path: 'path/file.gguf',\n    } as Uri,\n  ]);\n\n  render(ImportModels);\n  const btnOpenFileExplorer = screen.getByRole('button', { name: 'model input' });\n  await userEvent.click(btnOpenFileExplorer);\n\n  const btnImportModels = screen.getByRole('button', { name: 'Import model' });\n\n  await vi.waitFor(() => {\n    expect(btnImportModels).toBeInTheDocument();\n    expect(btnImportModels).not.toBeDisabled();\n  });\n\n  // submit model\n  await userEvent.click(btnImportModels);\n\n  await vi.waitFor(() => {\n    expect(studioClient.importModels).toHaveBeenCalledWith([\n      {\n        path: 'path/file.gguf',\n        name: 'file',\n        backend: 'llama-cpp',\n      },\n    ]);\n  });\n  expect(router.goto).toHaveBeenCalledWith('/models/imported');\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/ImportModel.svelte",
    "content": "<script lang=\"ts\">\nimport { faCircleInfo, faFileImport } from '@fortawesome/free-solid-svg-icons';\nimport { studioClient } from '../utils/client';\nimport { Uri } from '@shared/uri/Uri';\nimport type { LocalModelImportInfo } from '@shared/models/ILocalModelInfo';\nimport { Button, ErrorMessage, FormPage, Input, Tooltip } from '@podman-desktop/ui-svelte';\nimport { InferenceType } from '@shared/models/IInference';\nimport Fa from 'svelte-fa';\nimport { getFilesFromDropEvent } from '/@/utils/fileUtils';\nimport { router } from 'tinro';\n\nlet localModel: LocalModelImportInfo | undefined = undefined;\nlet errorMessage: string = '';\nlet dragging: boolean = false;\nlet loading: boolean = false;\n\nfunction onBackendChange(e: Event & { currentTarget: HTMLSelectElement }): void {\n  if (!localModel) return;\n  // update the local model\n  localModel.backend = e.currentTarget.value as InferenceType;\n}\n\nasync function submit(): Promise<void> {\n  if (!localModel) return;\n\n  loading = true;\n  try {\n    // ensure the model is valid\n    await studioClient.validateLocalModel(localModel);\n\n    // import the local model\n    await studioClient.importModels([localModel]);\n    router.goto(`/models/imported`);\n  } catch (err: unknown) {\n    errorMessage = `Something went wrong while importing the model: ${String(err)}`;\n  } finally {\n    loading = false;\n  }\n}\n\nasync function requestExplorerModal(): Promise<void> {\n  dragging = false;\n  errorMessage = '';\n  try {\n    const models = await studioClient.openDialog({\n      title: 'Select model to import',\n      selectors: ['openFile'],\n      filters: [\n        {\n          name: 'GGUF files',\n          extensions: ['gguf'],\n        },\n        {\n          name: 'BIN files',\n          extensions: ['bin'],\n        },\n      ],\n    });\n    if (models?.length !== 1) {\n      return;\n    }\n\n    const mModel = Uri.revive(models[0]);\n    const modelPath = mModel.path;\n    const lastSlashIndex = modelPath.replace(/\\\\/g, '/').lastIndexOf('/') + 1;\n    localModel = {\n      path: mModel.path,\n      name: mModel.path.substring(lastSlashIndex).replace('.gguf', ''),\n      backend: InferenceType.LLAMA_CPP,\n    };\n  } catch (e) {\n    localModel = undefined;\n    errorMessage = `Error while adding models: ${String(e)}`;\n  }\n}\n\n/**\n * User can drag&drop a file, this\n * function is the drag event handler\n * @param event\n */\nasync function onFile(event: DragEvent): Promise<void> {\n  dragging = false;\n  const files = getFilesFromDropEvent(event);\n  if (files.length !== 1) {\n    return;\n  }\n  localModel = {\n    ...files[0],\n    backend: InferenceType.LLAMA_CPP,\n  };\n}\n\nexport function goToUpPage(): void {\n  router.goto('/models');\n}\n\nfunction handleDragOver(): void {\n  dragging = true;\n}\n\nfunction handleDragLeave(): void {\n  dragging = false;\n}\n</script>\n\n<FormPage\n  title=\"Import Model\"\n  breadcrumbLeftPart=\"Models\"\n  breadcrumbRightPart=\"Import Model\"\n  onclose={goToUpPage}\n  onbreadcrumbClick={goToUpPage}>\n  {#snippet icon()}\n    <div class=\"rounded-full w-8 h-8 flex items-center justify-center\">\n      <Fa size=\"1.125x\" class=\"text-[var(--pd-content-header-icon)]\" icon={faFileImport} />\n    </div>\n  {/snippet}\n  {#snippet content()}\n    <div class=\"flex m-5 flex-col w-full\">\n      <!-- Error banner -->\n      <div aria-label=\"importError\">\n        {#if errorMessage !== ''}\n          <ErrorMessage class=\"py-2\" error={errorMessage} />\n        {/if}\n      </div>\n\n      <!-- form -->\n      <div\n        class=\"bg-[var(--pd-content-card-bg)] space-y-6 px-8 sm:py-6 xl:py-8 rounded-lg h-fit text-[var(--pd-content-card-text)]\">\n        <div class=\"w-full\">\n          <!-- model input -->\n          {#if localModel === undefined}\n            <button\n              aria-label=\"model input\"\n              title=\"Click to open file explorer\"\n              class:border-purple-400={dragging}\n              class:border-gray-800={!dragging}\n              on:click={requestExplorerModal}\n              on:drop|preventDefault={onFile}\n              on:dragover|preventDefault={handleDragOver}\n              on:dragleave|preventDefault={handleDragLeave}\n              class=\"w-full cursor-pointer flex-col px-4 py-8 border-2 border-dashed rounded-xs flex justify-center items-center\">\n              <Fa size=\"1.1x\" class=\"cursor-pointer text-[var(--pd-link)]\" icon={faFileImport} />\n              <span>Drag & Drop or <strong class=\"text-[var(--pd-link)]\">Choose file</strong> to import</span>\n              <span class=\"opacity-50 text-sm\">Supported formats: .gguf, .bin</span>\n            </button>\n          {:else}\n            <!-- showing path -->\n            <label for=\"path\" class=\"w-full block mb-2 font-bold text-[var(--pd-content-card-header-text)]\">Path</label>\n            <Input class=\"grow\" bind:value={localModel.path} name=\"path\" aria-label=\"model path\" readonly={true} />\n\n            <!-- Model name -->\n            <label for=\"name\" class=\"pt-4 w-full block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n              >Name</label>\n            <Input\n              bind:value={localModel.name}\n              name=\"name\"\n              aria-label=\"model importing name\"\n              placeholder=\"Model Name displayed\"\n              class=\"grow\" />\n\n            <!-- selecting backend -->\n            <div class=\"flex flex-row items-center justify-center\">\n              <label for=\"backend\" class=\"pt-4 grow block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n                >Backend</label>\n              <Tooltip left>\n                <Fa size=\"1.1x\" class=\"cursor-pointer\" icon={faCircleInfo} />\n                <svelte:fragment slot=\"tip\">\n                  <span class=\"inline-block py-2 px-4 rounded-md\"\n                    ><code>backends</code> represents the technology required to run the models.</span>\n                </svelte:fragment>\n              </Tooltip>\n            </div>\n            <select\n              on:change={onBackendChange}\n              name=\"backend\"\n              class=\"border rounded-lg w-full focus:ring-purple-500 focus:border-purple-500 block p-2.5 bg-charcoal-900 border-charcoal-900 placeholder-gray-700 text-white\">\n              {#each Object.values(InferenceType) as type (type)}\n                <option value={type}>{type}</option>\n              {/each}\n            </select>\n          {/if}\n        </div>\n\n        <!-- action buttons -->\n        <div class=\"mt-4 flex\">\n          <Button\n            class=\"grow\"\n            on:click={submit}\n            inProgress={loading}\n            icon={faFileImport}\n            disabled={localModel === undefined}\n            aria-label=\"Import model\">\n            Import Models\n          </Button>\n        </div>\n      </div>\n    </div>\n  {/snippet}\n</FormPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/InferenceServerDetails.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, describe, expect, test, vi } from 'vitest';\nimport { fireEvent, render, screen } from '@testing-library/svelte';\nimport { type InferenceServer, InferenceType } from '@shared/models/IInference';\nimport InferenceServerDetails from '/@/pages/InferenceServerDetails.svelte';\nimport type { Language } from 'postman-code-generators';\nimport { studioClient } from '/@/utils/client';\nimport { router } from 'tinro';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport MonacoEditor from '/@/lib/monaco-editor/MonacoEditor.svelte';\n\nconst mocks = vi.hoisted(() => {\n  return {\n    getInferenceServersMock: vi.fn<() => InferenceServer[]>(),\n    getSnippetLanguagesMock: vi.fn(),\n  };\n});\n\nvi.mock('/@/lib/monaco-editor/MonacoEditor.svelte', () => ({\n  default: vi.fn(),\n}));\n\nvi.mock('../stores/inferenceServers', () => ({\n  inferenceServers: {\n    subscribe: (f: (msg: InferenceServer[]) => void) => {\n      f(mocks.getInferenceServersMock());\n      return (): void => {};\n    },\n  },\n}));\n\nvi.mock('../stores/snippetLanguages', () => ({\n  snippetLanguages: {\n    subscribe: (f: (msg: unknown) => void) => {\n      f(mocks.getSnippetLanguagesMock());\n      return (): void => {};\n    },\n  },\n}));\n\nvi.mock('../utils/client', () => {\n  return {\n    studioClient: {\n      openURL: vi.fn(),\n      createSnippet: vi.fn(),\n      copyToClipboard: vi.fn(),\n      telemetryLogUsage: vi.fn(),\n    },\n  };\n});\n\nconst inferenceServerMock: InferenceServer = {\n  health: {\n    Status: 'healthy',\n    Log: [],\n    FailingStreak: 0,\n  },\n  models: [\n    {\n      id: 'dummyModelId',\n      name: 'Dummy model id',\n    } as unknown as ModelInfo,\n  ],\n  connection: { port: 9999 },\n  status: 'running',\n  container: {\n    containerId: 'dummyContainerId',\n    engineId: 'dummyEngineId',\n  },\n  type: InferenceType.LLAMA_CPP,\n  labels: {},\n};\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(studioClient.copyToClipboard).mockResolvedValue(undefined);\n  vi.mocked(studioClient.telemetryLogUsage).mockResolvedValue(undefined);\n  vi.mocked(studioClient.openURL).mockResolvedValue(true);\n\n  mocks.getSnippetLanguagesMock.mockReturnValue([\n    {\n      key: 'dummyLanguageKey',\n      label: 'dummyLanguageLabel',\n      syntax_mode: 'dummySynthaxMode',\n      variants: [\n        {\n          key: 'dummyLanguageVariant1',\n        },\n        {\n          key: 'dummyLanguageVariant2',\n        },\n      ],\n    },\n    {\n      key: 'curl',\n      label: 'cURL',\n      syntax_mode: '?',\n      variants: [\n        {\n          key: 'cURL',\n        },\n      ],\n    },\n  ] as Language[]);\n\n  mocks.getInferenceServersMock.mockReturnValue([inferenceServerMock]);\n});\n\ntest('ensure documentation button is displayed', async () => {\n  mocks.getInferenceServersMock.mockReturnValue([\n    {\n      ...inferenceServerMock,\n      labels: {\n        docs: 'http://localhost:9999/docs',\n      },\n    },\n  ]);\n  const { getByRole } = render(InferenceServerDetails, {\n    containerId: 'dummyContainerId',\n  });\n\n  const address = getByRole('link', { name: 'swagger documentation' });\n  expect(address).toBeDefined();\n\n  await fireEvent.click(address);\n\n  expect(studioClient.openURL).toHaveBeenCalledWith('http://localhost:9999/docs');\n});\n\ntest('language select must have the mocked snippet languages', async () => {\n  render(InferenceServerDetails, {\n    containerId: 'dummyContainerId',\n  });\n\n  const select: HTMLSelectElement = screen.getByLabelText('snippet language selection');\n  expect(select).toBeDefined();\n  expect(select.options.length).toBe(2);\n  expect(select.options[0].value).toBe('dummyLanguageKey');\n});\n\ntest('default render should show curl', async () => {\n  render(InferenceServerDetails, {\n    containerId: 'dummyContainerId',\n  });\n\n  const variantSelect: HTMLSelectElement = screen.getByLabelText('snippet language variant');\n  expect(variantSelect.value).toBe('cURL');\n});\n\ndescribe('snippets', () => {\n  test('on mount should call createSnippet', async () => {\n    render(InferenceServerDetails, {\n      containerId: 'dummyContainerId',\n    });\n\n    expect(studioClient.createSnippet).toHaveBeenCalledWith(\n      {\n        body: expect.anything(),\n        header: expect.anything(),\n        url: 'http://localhost:9999/v1/chat/completions',\n        method: 'POST',\n      },\n      'curl',\n      'cURL',\n    );\n  });\n\n  test('whisper-cpp inference server type should generate whisper snippet', async () => {\n    mocks.getInferenceServersMock.mockReturnValue([\n      {\n        ...inferenceServerMock,\n        type: InferenceType.WHISPER_CPP,\n      },\n    ]);\n\n    render(InferenceServerDetails, {\n      containerId: 'dummyContainerId',\n    });\n\n    expect(studioClient.createSnippet).toHaveBeenCalledWith(\n      {\n        body: {\n          mode: 'formdata',\n          formdata: [\n            {\n              key: 'file',\n              value: './local.mp3',\n              type: 'file',\n            },\n          ],\n        },\n        header: [\n          {\n            key: 'Accept',\n            value: 'application/json',\n          },\n        ],\n        url: 'http://localhost:9999/inference',\n        method: 'POST',\n      },\n      'curl',\n      'cURL',\n    );\n  });\n\n  test('generated snippet should be sent to the monaco component', async () => {\n    const DUMMY_SNIPPET = 'dummy generated snippet';\n    vi.mocked(studioClient.createSnippet).mockResolvedValue(DUMMY_SNIPPET);\n    render(InferenceServerDetails, {\n      containerId: 'dummyContainerId',\n    });\n\n    await vi.waitFor(() => {\n      expect(MonacoEditor).toHaveBeenCalledWith(\n        expect.anything(),\n        expect.objectContaining({ content: DUMMY_SNIPPET, language: 'curl', readOnly: true, noMinimap: true }),\n      );\n    });\n  });\n\n  test('copy snippet should call copyToClipboard', async () => {\n    vi.mocked(studioClient.createSnippet).mockResolvedValue('dummy generated snippet');\n    render(InferenceServerDetails, {\n      containerId: 'dummyContainerId',\n    });\n\n    await vi.waitFor(async () => {\n      const copyBtn = screen.getByTitle('Copy');\n      expect(copyBtn).toBeDefined();\n      await fireEvent.click(copyBtn);\n    });\n\n    await vi.waitFor(() => {\n      expect(studioClient.copyToClipboard).toHaveBeenCalledWith('dummy generated snippet');\n    });\n  });\n});\n\ntest('invalid container id should redirect to services page', async () => {\n  const gotoSpy = vi.spyOn(router, 'goto');\n  render(InferenceServerDetails, {\n    containerId: 'fakeContainerId',\n  });\n\n  expect(gotoSpy).toHaveBeenCalledWith('/services');\n});\n\ntest('ensure dummyContainerId is visible', async () => {\n  render(InferenceServerDetails, {\n    containerId: 'dummyContainerId',\n  });\n\n  const span = screen.getByText('dummyContainerId');\n  expect(span).toBeDefined();\n});\n\ntest('ensure models to be clickable', async () => {\n  render(InferenceServerDetails, {\n    containerId: 'dummyContainerId',\n  });\n\n  const a = screen.getByText('Dummy model id');\n  expect(a).toBeDefined();\n  expect(a.getAttribute('href')).toBe('/model/dummyModelId');\n});\n\ntest('runtime label is displayed', async () => {\n  render(InferenceServerDetails, {\n    containerId: 'dummyContainerId',\n  });\n\n  const span = screen.getByText('llamacpp');\n  expect(span).toBeDefined();\n});\n\ndescribe('labels', () => {\n  test('GPU label should display GPU Inference', async () => {\n    mocks.getInferenceServersMock.mockReturnValue([\n      {\n        ...inferenceServerMock,\n        labels: {\n          gpu: 'NVIDIA',\n        },\n      },\n    ]);\n    render(InferenceServerDetails, {\n      containerId: 'dummyContainerId',\n    });\n\n    const span = screen.getByText('GPU Inference');\n    expect(span).toBeDefined();\n  });\n\n  test('no label should display CPU Inference', async () => {\n    render(InferenceServerDetails, {\n      containerId: 'dummyContainerId',\n    });\n\n    const span = screen.getByText('CPU Inference');\n    expect(span).toBeDefined();\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/InferenceServerDetails.svelte",
    "content": "<script lang=\"ts\">\nimport { inferenceServers } from '/@/stores/inferenceServers';\nimport ServiceStatus from '/@/lib/table/service/ServiceStatus.svelte';\nimport ServiceAction from '/@/lib/table/service/ServiceAction.svelte';\nimport Fa from 'svelte-fa';\nimport {\n  faArrowUpRightFromSquare,\n  faBuildingColumns,\n  faCheck,\n  faCopy,\n  faFan,\n  faMicrochip,\n  faScaleBalanced,\n} from '@fortawesome/free-solid-svg-icons';\nimport { type InferenceServer, InferenceType, inferenceTypeLabel } from '@shared/models/IInference';\nimport { snippetLanguages } from '/@/stores/snippetLanguages';\nimport type { LanguageVariant } from 'postman-code-generators';\nimport { studioClient } from '/@/utils/client';\nimport { onMount } from 'svelte';\nimport { router } from 'tinro';\nimport { Button, DetailsPage, Tooltip, Link } from '@podman-desktop/ui-svelte';\nimport CopyButton from '/@/lib/button/CopyButton.svelte';\nimport type { RequestOptions } from '@shared/models/RequestOptions';\nimport { filesize } from 'filesize';\nimport MonacoEditor from '/@/lib/monaco-editor/MonacoEditor.svelte';\n\ninterface Props {\n  containerId?: string;\n}\n\nlet { containerId }: Props = $props();\n\nlet service: InferenceServer | undefined = $state();\nlet selectedLanguage: string = $state('curl');\n\nlet variants: LanguageVariant[] = $derived(\n  $snippetLanguages.find(language => language.key === selectedLanguage)?.variants ?? [],\n);\n\nlet selectedVariant: string = $state('cURL');\n\nconst onLanguageChange = (): void => {\n  if (variants.length > 0) {\n    selectedVariant = variants[0].key;\n    generate(selectedLanguage, selectedVariant).catch(err =>\n      console.error(`Error generating snippet for language ${selectedLanguage} variant ${selectedVariant}:`, err),\n    );\n  }\n};\n\nlet snippet: string | undefined = $state();\n\nconst generate = async (language: string, variant: string): Promise<void> => {\n  copied = false;\n\n  let options: RequestOptions | undefined;\n  switch (service?.type) {\n    case InferenceType.LLAMA_CPP:\n      options = {\n        url: `http://localhost:${service?.connection.port || '??'}/v1/chat/completions`,\n        method: 'POST',\n        header: [\n          {\n            key: 'Content-Type',\n            value: 'application/json',\n          },\n        ],\n        body: {\n          mode: 'raw',\n          raw: `{\n  \"messages\": [\n    {\n      \"content\": \"You are a helpful assistant.\",\n      \"role\": \"system\"\n    },\n    {\n      \"content\": \"What is the capital of France?\",\n      \"role\": \"user\"\n    }\n  ]\n}`,\n        },\n      };\n      break;\n    case InferenceType.OPENVINO:\n      options = {\n        url: `http://localhost:${service?.connection.port || '??'}/v3/chat/completions`,\n        method: 'POST',\n        header: [\n          {\n            key: 'Content-Type',\n            value: 'application/json',\n          },\n        ],\n        body: {\n          mode: 'raw',\n          raw: `{\n  \"messages\": [\n    {\n      \"content\": \"You are a helpful assistant.\",\n      \"role\": \"system\"\n    },\n    {\n      \"content\": \"What is the capital of France?\",\n      \"role\": \"user\"\n    }\n  ]\n}`,\n        },\n      };\n      break;\n    case InferenceType.WHISPER_CPP:\n      options = {\n        url: `http://localhost:${service?.connection.port || '??'}/inference`,\n        method: 'POST',\n        header: [\n          {\n            key: 'Accept',\n            value: 'application/json',\n          },\n        ],\n        body: {\n          mode: 'formdata',\n          formdata: [\n            {\n              key: 'file',\n              value: './local.mp3',\n              type: 'file',\n            },\n          ],\n        },\n      };\n      break;\n  }\n\n  if (!options) return;\n\n  try {\n    snippet = await studioClient.createSnippet(options, language, variant);\n  } catch (err: unknown) {\n    snippet = `${String(err)}`;\n  }\n};\n\n$effect(() => {\n  if (!snippet && service) {\n    generate('curl', 'cURL').catch((err: unknown) =>\n      console.error(`Error generating snippet for language curl variant cURL:`, err),\n    );\n  }\n});\n\nlet copied: boolean = $state(false);\nfunction copySnippet(): void {\n  if (!snippet) return;\n\n  studioClient\n    .copyToClipboard(snippet)\n    .then(() => {\n      copied = true;\n      studioClient\n        .telemetryLogUsage('snippet.copy', {\n          cpyButton: true,\n          language: selectedLanguage,\n          variant: selectedVariant,\n        })\n        .catch(err => console.error(`Error reporting telemetry:`, err));\n    })\n    .catch((err: unknown) => {\n      console.error('Something went wrong while trying to copy language snippet.', err);\n    });\n}\n\nonMount(() => {\n  return inferenceServers.subscribe(servers => {\n    service = servers.find(server => server.container.containerId === containerId);\n    if (!service) {\n      router.goto('/services');\n    }\n  });\n});\n\nexport function goToUpPage(): void {\n  router.goto('/services');\n}\n\nfunction openLink(url: string): void {\n  studioClient.openURL(url).catch(err => console.error(`Error opening URL: ${url}`, err));\n}\n\nfunction handleOnChange(): void {\n  generate(selectedLanguage, selectedVariant).catch(err =>\n    console.log(`Error generating language=${selectedLanguage} variant=${selectedVariant}`, err),\n  );\n}\n</script>\n\n<DetailsPage\n  title=\"Service details\"\n  breadcrumbLeftPart=\"Model Services\"\n  breadcrumbRightPart=\"Service details\"\n  onclose={goToUpPage}\n  onbreadcrumbClick={goToUpPage}>\n  <!-- Removed breadcrumbTitle above, no longer needed for svelte 5 detailspage -->\n  {#snippet iconSnippet()}\n    <div class=\"mr-3\">\n      {#if service !== undefined}\n        <ServiceStatus object={service} />\n      {/if}\n    </div>\n  {/snippet}\n  {#snippet subtitleSnippet()}\n    <div class=\"flex gap-x-2 items-center text-[var(--pd-content-sub-header)]\">\n      {#if service}\n        <span class=\"text-xs\">{service.container.containerId}</span>\n      {/if}\n    </div>\n  {/snippet}\n  {#snippet actionsSnippet()}\n    {#if service !== undefined}\n      <ServiceAction detailed object={service} />\n    {/if}\n  {/snippet}\n  {#snippet contentSnippet()}\n    <div class=\"h-full overflow-y-auto bg-[var(--pd-content-bg)]\">\n      <div class=\"flex flex-col min-w-full min-h-full\">\n        <div class=\"min-w-full min-h-full flex-1\">\n          <div class=\"mt-4 px-5 space-y-5\">\n            {#if service !== undefined}\n              <!-- Inference Server -->\n              <div>\n                <!-- title -->\n                <div class=\"flex flex-row\">\n                  <span class=\"text-base grow text-[var(--pd-content-card-text)]\">Inference Server</span>\n                </div>\n\n                <!-- inference server details content -->\n                <div class=\"bg-[var(--pd-content-card-bg)] rounded-md w-full px-4 pt-3 pb-4 mt-2 flex flex-col gap-y-4\">\n                  <!-- endpoint URL -->\n                  {#if service.status === 'running'}\n                    <div class=\"flex flex-col gap-y-2\">\n                      <span class=\"text-sm text-[var(--pd-content-card-text)]\">Inference Endpoint URL</span>\n                      <div class=\"flex items-center gap-x-4\" aria-label=\"Endpoint URL\">\n                        <!-- API URL -->\n                        {#if 'api' in service.labels}\n                          <CopyButton\n                            top\n                            class=\"bg-[var(--pd-label-bg)] text-[var(--pd-label-text)] rounded-md p-2 flex flex-row w-min h-min text-xs text-nowrap items-center\"\n                            content={service.labels['api']}>\n                            {service.labels['api']}\n                            <Fa class=\"ml-2\" icon={faCopy} />\n                          </CopyButton>\n                        {/if}\n\n                        <!-- Documentation URL -->\n                        <div class=\"grow text-[var(--pd-label-text)]\">\n                          {#if 'docs' in service.labels}\n                            Access\n                            <Tooltip tip=\"Open swagger documentation\">\n                              <Link\n                                aria-label=\"swagger documentation\"\n                                on:click={openLink.bind(undefined, service.labels['docs'])}>\n                                swagger documentation\n                              </Link>\n                            </Tooltip>\n                          {/if}\n                        </div>\n\n                        <div\n                          class=\"bg-[var(--pd-label-bg)] text-[var(--pd-label-text)] rounded-md p-2 flex flex-row w-min h-min text-xs text-nowrap items-center\"\n                          aria-label=\"Service type\">\n                          {inferenceTypeLabel(service.type)}\n                        </div>\n                        {#if 'gpu' in service.labels}\n                          <Tooltip left tip={service.labels['gpu']}>\n                            <div\n                              class=\"bg-[var(--pd-label-bg)] text-[var(--pd-label-text)] rounded-md p-2 flex flex-row w-min h-min text-xs text-nowrap items-center\"\n                              aria-label=\"Inference Type\">\n                              GPU Inference\n                              <Fa spin={service.status === 'running'} class=\"ml-2\" icon={faFan} />\n                            </div>\n                          </Tooltip>\n                        {:else}\n                          <div\n                            class=\"bg-[var(--pd-label-bg)] text-[var(--pd-label-text)] rounded-md p-2 flex flex-row w-min h-min text-xs text-nowrap items-center\"\n                            aria-label=\"Inference Type\">\n                            CPU Inference\n                            <Fa class=\"ml-2\" icon={faMicrochip} />\n                          </div>\n                        {/if}\n                      </div>\n                    </div>\n                  {/if}\n\n                  <!-- models -->\n                  <div class=\"flex flex-col gap-y-2\">\n                    <span class=\"text-sm text-[var(--pd-content-card-text)]\">\n                      {service.models.length > 1 ? 'Models' : 'Model'}\n                    </span>\n                    <div>\n                      {#each service.models as model (model.id)}\n                        <div>\n                          <div\n                            class=\"w-full bg-[var(--pd-label-bg)] text-[var(--pd-label-text)] rounded-md px-2 py-1 flex flex-col gap-y-4\">\n                            <div class=\"flex flex-row gap-2 items-center\">\n                              <div class=\"grow text-sm\" aria-label=\"Model name\">\n                                <a href=\"/model/{encodeURIComponent(model.id)}\" class=\"flex items-center\">\n                                  {model.name}\n                                  <Fa class=\"ml-2\" icon={faArrowUpRightFromSquare} />\n                                </a>\n                              </div>\n                              <div>\n                                <div\n                                  class=\"bg-[var(--pd-content-card-bg)] rounded-md px-2 py-1 flex flex-row w-min h-min text-xs text-charcoal-100 text-nowrap items-center\">\n                                  <Fa class=\"mr-2\" icon={faScaleBalanced} />\n                                  {model.license}\n                                </div>\n                              </div>\n                              <div>\n                                <div\n                                  class=\"bg-[var(--pd-content-card-bg)] rounded-md px-2 py-1 flex flex-row w-min h-min text-xs text-charcoal-100 text-nowrap items-center\">\n                                  <Fa class=\"mr-2\" icon={faBuildingColumns} />\n                                  {model.registry}\n                                </div>\n                              </div>\n                            </div>\n                          </div>\n\n                          <table class=\"w-full text-[var(--pd-label-text)] ml-4 mt-2\">\n                            <tbody>\n                              {#if model.file?.size}\n                                <tr>\n                                  <td>Size</td>\n                                  <td>{filesize(model.file.size)}</td>\n                                </tr>\n                              {/if}\n                              {#if model.file}\n                                <tr>\n                                  <td>File path</td>\n                                  <td>{model.file.path}</td>\n                                </tr>\n                              {/if}\n                            </tbody>\n                          </table>\n                        </div>\n                      {/each}\n                    </div>\n                  </div>\n                </div>\n              </div>\n\n              <!-- code client -->\n              <div>\n                <div class=\"flex flex-row items-center\">\n                  <span class=\"text-base grow text-[var(--pd-content-card-text)]\">Client code</span>\n\n                  <!-- language choice -->\n                  <select\n                    required\n                    aria-label=\"snippet language selection\"\n                    bind:value={selectedLanguage}\n                    onchange={onLanguageChange}\n                    id=\"languages\"\n                    class=\"border ml-1 text-sm rounded-lg bg-[var(--pd-action-button-details-bg)] block p-1 border-[var(--pd-action-button-details-bg)] placeholder-gray-700 text-[var(--pd-action-button-details-text)]\"\n                    name=\"languages\">\n                    {#each $snippetLanguages as language (language.key)}\n                      <option class=\"my-1\" value={language.key}>{language.label}</option>\n                    {/each}\n                  </select>\n                  {#if selectedVariant !== undefined}\n                    <select\n                      required\n                      aria-label=\"snippet language variant\"\n                      id=\"variants\"\n                      bind:value={selectedVariant}\n                      onchange={handleOnChange}\n                      disabled={variants.length === 1}\n                      class=\"border ml-1 text-sm rounded-lg bg-[var(--pd-action-button-details-bg)] block p-1 border-[var(--pd-action-button-details-bg)] placeholder-gray-700 text-[var(--pd-action-button-details-text)]\"\n                      name=\"variants\">\n                      {#each variants as variant (variant.key)}\n                        <option class=\"my-1\" value={variant.key}>{variant.key}</option>\n                      {/each}\n                    </select>\n                  {/if}\n                </div>\n\n                {#if snippet !== undefined}\n                  <div\n                    class=\"bg-[var(--pd-details-empty-cmdline-bg)] text-[var(--pd-details-empty-cmdline-text)] rounded-md w-full p-4 mt-2 relative h-[400px]\"\n                    aria-label=\"Code Snippet\">\n                    {#key snippet}\n                      <MonacoEditor class=\"h-full\" readOnly content={snippet} language={selectedLanguage} noMinimap />\n                    {/key}\n                    <div class=\"absolute right-4 top-4 z-10\">\n                      <Button icon={copied ? faCheck : faCopy} type=\"secondary\" title=\"Copy\" on:click={copySnippet} />\n                    </div>\n                  </div>\n                {/if}\n              </div>\n            {/if}\n          </div>\n        </div>\n      </div>\n    </div>\n  {/snippet}\n</DetailsPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/InferenceServers.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { vi, test, expect, beforeEach } from 'vitest';\nimport { screen, render, fireEvent } from '@testing-library/svelte';\nimport InferenceServers from '/@/pages/InferenceServers.svelte';\nimport { type InferenceServer, InferenceType } from '@shared/models/IInference';\nimport { studioClient } from '/@/utils/client';\nimport { router } from 'tinro';\n\nconst mocks = vi.hoisted(() => ({\n  inferenceServersSubscribeMock: vi.fn(),\n  inferenceServersMock: {\n    subscribe: (f: (msg: unknown) => void) => {\n      f(mocks.inferenceServersSubscribeMock());\n      return (): void => {};\n    },\n  },\n}));\nvi.mock('../stores/inferenceServers', async () => {\n  return {\n    inferenceServers: mocks.inferenceServersMock,\n  };\n});\n\nvi.mock('../utils/client', async () => ({\n  studioClient: {\n    getInferenceServers: vi.fn(),\n    requestDeleteInferenceServer: vi.fn(),\n  },\n}));\n\nbeforeEach(() => {\n  vi.clearAllMocks();\n  mocks.inferenceServersSubscribeMock.mockReturnValue([]);\n  vi.mocked(studioClient.requestDeleteInferenceServer).mockResolvedValue(undefined);\n});\n\ntest('no inference servers should display a status message', async () => {\n  render(InferenceServers);\n  const title = screen.getByText('No model service running');\n  expect(title).toBeInTheDocument();\n  const table = screen.queryByRole('table');\n  expect(table).toBeNull();\n});\n\ntest('store with inference server should display the table', async () => {\n  mocks.inferenceServersSubscribeMock.mockReturnValue([\n    {\n      health: undefined,\n      models: [],\n      connection: { port: 8888 },\n      status: 'running',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n      type: InferenceType.NONE,\n      labels: {},\n    },\n  ] as InferenceServer[]);\n  render(InferenceServers);\n\n  const table = screen.getByRole('table');\n  expect(table).toBeInTheDocument();\n});\n\ntest('create service button should redirect to create page', async () => {\n  const gotoSpy = vi.spyOn(router, 'goto');\n  render(InferenceServers);\n  const createBtn = screen.getByTitle('Create a new model service');\n  expect(createBtn).toBeDefined();\n\n  await fireEvent.click(createBtn);\n  await vi.waitFor(() => {\n    expect(gotoSpy).toHaveBeenCalledWith('/service/create');\n  });\n});\n\ntest('table should have checkbox', async () => {\n  mocks.inferenceServersSubscribeMock.mockReturnValue([\n    {\n      health: undefined,\n      models: [],\n      connection: { port: 8888 },\n      status: 'running',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n      type: InferenceType.NONE,\n      labels: {},\n    },\n  ] as InferenceServer[]);\n  render(InferenceServers);\n\n  const checkbox = screen.getByTitle('Toggle service');\n  expect(checkbox).toBeInTheDocument();\n\n  const deleteBtn = screen.queryByTitle('Delete selected items');\n  expect(deleteBtn).toBeNull();\n});\n\ntest('delete button should delete selected item', async () => {\n  mocks.inferenceServersSubscribeMock.mockReturnValue([\n    {\n      health: undefined,\n      models: [],\n      connection: { port: 8888 },\n      status: 'running',\n      container: { containerId: 'dummyContainerId', engineId: 'dummyEngineId' },\n      type: InferenceType.NONE,\n      labels: {},\n    },\n  ] as InferenceServer[]);\n  render(InferenceServers);\n\n  const checkbox = screen.getByTitle('Toggle service');\n  await fireEvent.click(checkbox);\n\n  const deleteBtn = screen.getByTitle('Delete selected items');\n  expect(deleteBtn).toBeInTheDocument();\n\n  await fireEvent.click(deleteBtn);\n  expect(studioClient.requestDeleteInferenceServer).toHaveBeenCalledWith('dummyContainerId');\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/InferenceServers.svelte",
    "content": "<script lang=\"ts\">\nimport type { InferenceServer } from '@shared/models/IInference';\nimport ServiceColumnName from '/@/lib/table/service/ServiceColumnName.svelte';\nimport { inferenceServers } from '/@/stores/inferenceServers';\nimport ServiceStatus from '/@/lib/table/service/ServiceStatus.svelte';\nimport ServiceAction from '/@/lib/table/service/ServiceAction.svelte';\nimport ServiceColumnModelName from '/@/lib/table/service/ServiceColumnModelName.svelte';\nimport { faRocket, faPlusCircle, faTrash } from '@fortawesome/free-solid-svg-icons';\nimport { studioClient } from '/@/utils/client';\nimport { router } from 'tinro';\nimport { onMount } from 'svelte';\nimport { Button, Table, TableColumn, TableRow, NavPage, EmptyScreen } from '@podman-desktop/ui-svelte';\nimport ServiceColumnRuntime from '/@/lib/table/service/ServiceColumnRuntime.svelte';\n\nconst columns: TableColumn<InferenceServer>[] = [\n  new TableColumn<InferenceServer>('Status', { width: '70px', renderer: ServiceStatus, align: 'center' }),\n  new TableColumn<InferenceServer>('Name', { width: '1fr', renderer: ServiceColumnName, align: 'left' }),\n  new TableColumn<InferenceServer>('Model', { renderer: ServiceColumnModelName, align: 'left' }),\n  new TableColumn<InferenceServer>('Runtime', { width: '90px', renderer: ServiceColumnRuntime, align: 'left' }),\n  new TableColumn<InferenceServer>('Actions', { width: '80px', renderer: ServiceAction, align: 'right' }),\n];\nconst row = new TableRow<InferenceServer>({ selectable: (_service): boolean => true });\n\nlet data: (InferenceServer & { selected?: boolean })[];\n\nonMount(() => {\n  return inferenceServers.subscribe(items => {\n    data = items;\n  });\n});\n\nlet selectedItemsNumber: number;\n\nconst deleteSelected = (): void => {\n  studioClient\n    .requestDeleteInferenceServer(\n      ...data.filter(service => service.selected).map(service => service.container.containerId),\n    )\n    .catch((err: unknown) => {\n      console.error('Something went wrong while trying to delete inference server', err);\n    });\n};\n\nfunction createNewService(): void {\n  router.goto('/service/create');\n}\n</script>\n\n<NavPage title=\"Model Services\" searchEnabled={false}>\n  {#snippet additionalActions()}\n    {#if selectedItemsNumber > 0}\n      <Button title=\"Delete selected items\" on:click={deleteSelected} icon={faTrash}\n        >Delete {selectedItemsNumber} selected items</Button>\n    {/if}\n    <Button icon={faPlusCircle} title=\"Create a new model service\" on:click={createNewService}\n      >New Model Service</Button>\n  {/snippet}\n  {#snippet content()}\n    <div class=\"flex min-w-full min-h-full\">\n      {#if data?.length > 0}\n        <Table kind=\"service\" data={data} columns={columns} row={row} bind:selectedItemsNumber={selectedItemsNumber} />\n      {:else}\n        <EmptyScreen\n          icon={faRocket}\n          title=\"No model service running\"\n          message=\"A model service offers a configurable endpoint via an OpenAI-compatible web server, facilitating a seamless integration of AI capabilities into existing applications. Upon initialization, effortlessly access detailed service information and generate code snippets in multiple programming languages to ease application integration.\">\n          <div class=\"flex gap-2 justify-center\">\n            <Button type=\"link\" on:click={createNewService}>Create service</Button>\n          </div>\n        </EmptyScreen>\n      {/if}\n    </div>\n  {/snippet}\n</NavPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/Model.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { vi, test, expect, beforeEach } from 'vitest';\nimport { screen, render } from '@testing-library/svelte';\nimport Model from './Model.svelte';\nimport { studioClient } from '../utils/client';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport * as inferenceStore from '/@/stores/inferenceServers';\nimport { readable } from 'svelte/store';\nimport type { InferenceServer } from '@shared/models/IInference';\n\nvi.mock('../utils/client', async () => {\n  return {\n    studioClient: {\n      getCatalog: vi.fn(),\n    },\n    rpcBrowser: {\n      subscribe: (): unknown => {\n        return {\n          unsubscribe: (): void => {},\n        };\n      },\n    },\n  };\n});\n\nvi.mock('/@/stores/inferenceServers', () => ({\n  inferenceServers: vi.fn(),\n}));\n\nconst model: ModelInfo = {\n  id: 'model1',\n  name: 'Model 1',\n  properties: {},\n  description: '',\n};\n\nbeforeEach(() => {\n  (inferenceStore.inferenceServers as unknown) = readable<InferenceServer[]>([]);\n});\n\ntest('model status should be visible', async () => {\n  vi.mocked(studioClient.getCatalog).mockResolvedValue({\n    models: [model],\n    categories: [],\n    recipes: [],\n    version: 'v1',\n  });\n\n  const { getByRole } = render(Model, {\n    modelId: model.id,\n  });\n\n  await vi.waitFor(() => {\n    const role = getByRole('status');\n    expect(role).toBeDefined();\n    expect(role.title).toBe('NONE');\n  });\n});\n\ntest('should display model information', async () => {\n  vi.mocked(studioClient.getCatalog).mockResolvedValue({\n    models: [model],\n    categories: [],\n    recipes: [],\n    version: 'v1',\n  });\n\n  render(Model, {\n    modelId: 'model1',\n  });\n\n  await vi.waitFor(() => {\n    const elements = screen.getAllByText(model.name);\n    expect(elements.length).toBeGreaterThan(0);\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/Model.svelte",
    "content": "<script lang=\"ts\">\nimport MarkdownRenderer from '/@/lib/markdown/MarkdownRenderer.svelte';\nimport { catalog } from '/@/stores/catalog';\nimport { DetailsPage } from '@podman-desktop/ui-svelte';\nimport { router } from 'tinro';\nimport ModelStatusIcon from '/@/lib/icons/ModelStatusIcon.svelte';\n\nexport let modelId: string;\n\n$: model = $catalog.models.find(m => m.id === modelId);\n\nexport function goToUpPage(): void {\n  router.goto('/models');\n}\n</script>\n\n<DetailsPage\n  title={model?.name ?? modelId}\n  breadcrumbLeftPart=\"Models\"\n  breadcrumbRightPart={model?.name ?? ''}\n  onclose={goToUpPage}\n  onbreadcrumbClick={goToUpPage}>\n  {#snippet iconSnippet()}\n    {#if model}\n      <div class=\"mr-3\">\n        <ModelStatusIcon object={model} />\n      </div>\n    {/if}\n  {/snippet}\n  {#snippet contentSnippet()}\n    <div class=\"flex flex-row w-full h-full bg-[var(--pd-content-bg)] overflow-y-auto\">\n      <div class=\"grow p-5\">\n        <MarkdownRenderer source={model?.description} />\n      </div>\n    </div>\n  {/snippet}\n</DetailsPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/Models.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { vi, test, expect, describe } from 'vitest';\nimport { screen, render, waitFor, within } from '@testing-library/svelte';\nimport Models from './Models.svelte';\nimport { router } from 'tinro';\nimport userEvent from '@testing-library/user-event';\nimport type { InferenceServer } from '@shared/models/IInference';\n\nconst mocks = vi.hoisted(() => {\n  return {\n    getCatalogMock: vi.fn(),\n    getPullingStatusesMock: vi.fn().mockResolvedValue(new Map()),\n    modelsInfoSubscribeMock: vi.fn(),\n    tasksSubscribeMock: vi.fn(),\n    modelsInfoQueriesMock: {\n      subscribe: (f: (msg: unknown) => void) => {\n        f(mocks.modelsInfoSubscribeMock());\n        return (): void => {};\n      },\n    },\n    tasksQueriesMock: {\n      subscribe: (f: (msg: unknown) => void) => {\n        f(mocks.tasksSubscribeMock());\n        return (): void => {};\n      },\n    },\n    getModelsInfoMock: vi.fn().mockResolvedValue([]),\n    getTasks: vi.fn().mockResolvedValue([]),\n  };\n});\n\nvi.mock('../stores/inferenceServers', () => ({\n  inferenceServers: {\n    subscribe: (f: (msg: InferenceServer[]) => void) => {\n      f([]);\n      return (): void => {};\n    },\n  },\n}));\n\nvi.mock('/@/utils/client', async () => {\n  return {\n    studioClient: {\n      getModelsInfo: mocks.getModelsInfoMock,\n      getPullingStatuses: mocks.getPullingStatusesMock,\n    },\n    rpcBrowser: {\n      subscribe: (): unknown => {\n        return {\n          unsubscribe: (): void => {},\n        };\n      },\n    },\n  };\n});\n\nvi.mock('../stores/modelsInfo', async () => {\n  return {\n    modelsInfo: mocks.modelsInfoQueriesMock,\n  };\n});\n\nvi.mock('../stores/tasks', async () => {\n  return {\n    tasks: mocks.tasksQueriesMock,\n  };\n});\n\ntest('should display There is no model yet', async () => {\n  mocks.modelsInfoSubscribeMock.mockReturnValue([]);\n  mocks.tasksSubscribeMock.mockReturnValue([]);\n\n  render(Models);\n\n  const status = screen.getByLabelText('status');\n  expect(status).toBeDefined();\n});\n\ntest('should display There is no model yet and have a task running', async () => {\n  mocks.modelsInfoSubscribeMock.mockReturnValue([]);\n  mocks.tasksSubscribeMock.mockReturnValue([\n    {\n      id: 'random',\n      name: 'random',\n      state: 'loading',\n      labels: {\n        'model-pulling': 'random-models-id',\n      },\n    },\n  ]);\n  render(Models);\n\n  const status = screen.getByLabelText('status');\n  expect(status).toBeDefined();\n\n  await waitFor(() => {\n    const title = screen.getByText('Downloading models');\n    expect(title).toBeDefined();\n  });\n});\n\ntest('should not display any tasks running', async () => {\n  mocks.modelsInfoSubscribeMock.mockReturnValue([]);\n  mocks.tasksSubscribeMock.mockReturnValue([\n    {\n      id: 'random',\n      name: 'random',\n      state: 'loading',\n    },\n  ]);\n  mocks.getPullingStatusesMock.mockResolvedValue([]);\n\n  render(Models);\n\n  const notification = screen.queryByText('Downloading models');\n  expect(notification).toBeNull();\n});\n\ntest('should display one model', async () => {\n  mocks.modelsInfoSubscribeMock.mockReturnValue([\n    {\n      id: 'dummy-id',\n      name: 'dummy-name',\n      memory: 1024,\n    },\n  ]);\n  mocks.tasksSubscribeMock.mockReturnValue([]);\n\n  render(Models);\n\n  const table = screen.getByRole('table');\n  expect(table).toBeDefined();\n\n  const cells = screen.queryAllByRole('cell');\n  expect(cells.length > 2).toBeTruthy();\n\n  const name = await within(cells[2]).findByText('dummy-name');\n  expect(name).not.toBeNull();\n});\n\ntest('should display downloaded model first', async () => {\n  mocks.modelsInfoSubscribeMock.mockReturnValue([\n    {\n      id: 'dummy-local-id',\n      name: 'dummy-local-name',\n      memory: 1024,\n      file: {\n        path: 'random',\n      },\n    },\n    {\n      id: 'dummy-id',\n      name: 'dummy-name',\n      memory: 1024,\n    },\n  ]);\n  mocks.tasksSubscribeMock.mockReturnValue([]);\n\n  const { container } = render(Models);\n\n  const table = within(container).getByRole('table');\n  expect(table).toBeDefined();\n\n  const rows = within(table).queryAllByRole('row');\n  expect(rows.length).toBe(3);\n\n  // First row should be the headers\n  const headers = within(rows[0]).queryAllByRole('columnheader');\n  expect(headers.length > 0).toBeTruthy();\n\n  // second raw should be the model downloaded\n  const deleteBtn = within(rows[1]).getByTitle('Delete Model');\n  expect(deleteBtn).toBeDefined();\n\n  // last raw should be the remote model\n  const downloadBtn = within(rows[2]).getByTitle('Download Model');\n  expect(downloadBtn).toBeDefined();\n});\n\ndescribe('downloaded models', () => {\n  test('should display no model in downloaded tab', async () => {\n    mocks.modelsInfoSubscribeMock.mockReturnValue([\n      {\n        id: 'dummy-id',\n        name: 'dummy-name',\n        memory: 1024,\n      },\n    ]);\n    mocks.tasksSubscribeMock.mockReturnValue([]);\n\n    render(Models);\n\n    router.goto('downloaded');\n\n    await waitFor(() => {\n      const status = screen.getByLabelText('status');\n      expect(status).toBeDefined();\n    });\n  });\n\n  test('should display a model in downloaded tab', async () => {\n    mocks.modelsInfoSubscribeMock.mockReturnValue([\n      {\n        id: 'dummy-id',\n        name: 'dummy-name',\n        file: {\n          file: 'dummy',\n          path: 'dummy',\n        },\n        memory: 1024,\n        // eslint-disable-next-line sonarjs/no-clear-text-protocols\n        url: 'http://url',\n      },\n    ]);\n    mocks.tasksSubscribeMock.mockReturnValue([]);\n\n    render(Models);\n\n    router.goto('downloaded');\n\n    await waitFor(() => {\n      const table = screen.getByRole('table');\n      expect(table).toBeDefined();\n    });\n  });\n\n  test('should display only downloaded models', async () => {\n    mocks.modelsInfoSubscribeMock.mockReturnValue([\n      {\n        id: 'dummy-id-downloaded',\n        name: 'dummy-downloaded-1',\n        file: {\n          file: 'dummy',\n          path: 'dummy',\n        },\n        memory: 1024,\n        // eslint-disable-next-line sonarjs/no-clear-text-protocols\n        url: 'http://url',\n      },\n      {\n        id: 'dummy-id-downloaded-2',\n        name: 'dummy-downloaded-2',\n        file: {\n          file: 'dummy',\n          path: 'dummy',\n        },\n        memory: 1024,\n        // eslint-disable-next-line sonarjs/no-clear-text-protocols\n        url: 'http://url',\n      },\n      {\n        id: 'dummy-id-imported',\n        name: 'dummy-imported',\n        file: {\n          file: 'dummy',\n          path: 'dummy',\n        },\n        memory: 1024,\n      },\n    ]);\n    mocks.tasksSubscribeMock.mockReturnValue([]);\n\n    render(Models);\n\n    router.goto('downloaded');\n\n    await waitFor(() => expect(screen.getByRole('table')).toBeDefined());\n\n    const rows = screen.getAllByRole('cell', { name: 'Model Name' });\n    expect(rows.length).toBe(2);\n    expect(await within(rows[0]).findByTitle('dummy-downloaded-1')).toBeDefined();\n    expect(await within(rows[1]).findByTitle('dummy-downloaded-2')).toBeDefined();\n  });\n});\n\ndescribe('imported models', () => {\n  test('should display no model in imported tab', async () => {\n    mocks.modelsInfoSubscribeMock.mockReturnValue([]);\n    mocks.tasksSubscribeMock.mockReturnValue([]);\n\n    render(Models);\n\n    router.goto('imported');\n\n    await waitFor(() => {\n      const status = screen.getByLabelText('status');\n      expect(status).toBeDefined();\n    });\n  });\n\n  test('should display a model in imported tab', async () => {\n    mocks.modelsInfoSubscribeMock.mockReturnValue([\n      {\n        id: 'dummy-id',\n        name: 'dummy-name',\n        file: {\n          file: 'dummy',\n          path: 'dummy',\n        },\n        memory: 1024,\n      },\n    ]);\n    mocks.tasksSubscribeMock.mockReturnValue([]);\n\n    render(Models);\n\n    router.goto('imported');\n\n    await waitFor(() => {\n      const table = screen.getByRole('table');\n      expect(table).toBeDefined();\n    });\n  });\n});\n\ndescribe('available models', () => {\n  test('should display a model in available tab', async () => {\n    mocks.modelsInfoSubscribeMock.mockReturnValue([\n      {\n        id: 'dummy-id',\n        name: 'dummy-name',\n        memory: 1024,\n      },\n    ]);\n    mocks.tasksSubscribeMock.mockReturnValue([]);\n\n    render(Models);\n\n    router.goto('available');\n\n    await waitFor(() => {\n      const table = screen.getByRole('table');\n      expect(table).toBeDefined();\n    });\n  });\n\n  test('should display no model in available tab', async () => {\n    mocks.modelsInfoSubscribeMock.mockReturnValue([\n      {\n        id: 'dummy-id',\n        name: 'dummy-name',\n        file: {\n          file: 'dummy',\n          path: 'dummy',\n        },\n        memory: 1024,\n      },\n    ]);\n    mocks.tasksSubscribeMock.mockReturnValue([]);\n\n    render(Models);\n\n    router.goto('available');\n\n    await waitFor(() => {\n      const status = screen.getByLabelText('status');\n      expect(status).toBeDefined();\n    });\n  });\n});\n\ntest('Import button should redirect to import page', async () => {\n  const routerMock = vi.spyOn(router, 'goto');\n\n  render(Models);\n\n  const importButton = screen.getByRole('button', { name: 'Import Models' });\n  await userEvent.click(importButton);\n\n  expect(routerMock).toBeCalledWith('/models/import');\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/Models.svelte",
    "content": "<script lang=\"ts\">\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { modelsInfo } from '../stores/modelsInfo';\nimport ModelColumnName from '../lib/table/model/ModelColumnName.svelte';\nimport ModelColumnLabels from '../lib/table/model/ModelColumnLabels.svelte';\nimport type { Task } from '@shared/models/ITask';\nimport TasksProgress from '/@/lib/progress/TasksProgress.svelte';\nimport Card from '/@/lib/Card.svelte';\nimport { onMount } from 'svelte';\nimport ModelColumnSize from '../lib/table/model/ModelColumnSize.svelte';\nimport ModelColumnAge from '../lib/table/model/ModelColumnAge.svelte';\nimport ModelColumnActions from '../lib/table/model/ModelColumnActions.svelte';\nimport { EmptyScreen, Tab, Button, Table, TableColumn, TableRow, NavPage } from '@podman-desktop/ui-svelte';\nimport Route from '/@/Route.svelte';\nimport { tasks } from '/@/stores/tasks';\nimport ModelStatusIcon from '../lib/icons/ModelStatusIcon.svelte';\nimport { router } from 'tinro';\nimport { faBookOpen, faFileImport } from '@fortawesome/free-solid-svg-icons';\nimport { SvelteSet } from 'svelte/reactivity';\n\nconst columns = [\n  new TableColumn<ModelInfo>('Status', {\n    width: '60px',\n    renderer: ModelStatusIcon,\n    comparator: (a, b): number => (a.file ? 0 : 1) - (b.file ? 0 : 1),\n  }),\n  new TableColumn<ModelInfo>('Name', {\n    width: 'minmax(100px,1fr)',\n    renderer: ModelColumnName,\n    comparator: (a, b): number => b.name.localeCompare(a.name),\n  }),\n  new TableColumn<ModelInfo>('Size', {\n    width: 'minmax(10px,50px)',\n    renderer: ModelColumnSize,\n    comparator: (a, b): number => (a.file?.size ?? 0) - (b.file?.size ?? 0),\n  }),\n  new TableColumn<ModelInfo>('Age', {\n    width: 'minmax(10px,70px)',\n    renderer: ModelColumnAge,\n    comparator: (a, b): number => (a.file?.creation?.getTime() ?? 0) - (b.file?.creation?.getTime() ?? 0),\n  }),\n  new TableColumn<ModelInfo>('', { width: 'minmax(50px,175px)', align: 'right', renderer: ModelColumnLabels }),\n  new TableColumn<ModelInfo>('Actions', { align: 'right', width: '120px', renderer: ModelColumnActions }),\n];\nconst row = new TableRow<ModelInfo>({});\n\nlet loading: boolean = true;\n\nlet pullingTasks: Task[] = [];\nlet models: ModelInfo[] = [];\n\n// filtered mean, we remove the models that are being downloaded\nlet filteredModels: ModelInfo[] = [];\n\n$: localModels = filteredModels.filter(model => model.file && model.url);\n$: remoteModels = filteredModels.filter(model => !model.file);\n$: importedModels = filteredModels.filter(model => !model.url);\n\nfunction filterModels(): void {\n  // Let's collect the models we do not want to show (loading).\n  const modelsId: string[] = pullingTasks.reduce((previousValue, currentValue) => {\n    if (currentValue.labels !== undefined && currentValue.state !== 'error') {\n      previousValue.push(currentValue.labels['model-pulling']);\n    }\n    return previousValue;\n  }, [] as string[]);\n  filteredModels = models.filter(model => !modelsId.includes(model.id));\n}\n\nonMount(() => {\n  // Subscribe to the tasks store\n  const tasksUnsubscribe = tasks.subscribe(value => {\n    // Filter out duplicates\n    const modelIds = new SvelteSet<string>();\n    pullingTasks = value.reduce((filtered: Task[], task: Task) => {\n      if (\n        (task.state === 'loading' || task.state === 'error') &&\n        task.labels !== undefined &&\n        'model-pulling' in task.labels &&\n        !modelIds.has(task.labels['model-pulling'])\n      ) {\n        modelIds.add(task.labels['model-pulling']);\n        filtered.push(task);\n      }\n      return filtered;\n    }, []);\n\n    loading = false;\n    filterModels();\n  });\n\n  // Subscribe to the models store\n  const localModelsUnsubscribe = modelsInfo.subscribe(value => {\n    models = value;\n    filterModels();\n  });\n\n  return (): void => {\n    tasksUnsubscribe();\n    localModelsUnsubscribe();\n  };\n});\n\nasync function importModel(): Promise<void> {\n  router.goto('/models/import');\n}\n</script>\n\n<NavPage title=\"Models\" searchEnabled={false}>\n  {#snippet tabs()}\n    <Tab title=\"All\" url=\"/models\" selected={$router.path === '/models'} />\n    <Tab title=\"Downloaded\" url=\"/models/downloaded\" selected={$router.path === '/models/downloaded'} />\n    <Tab title=\"Imported\" url=\"/models/imported\" selected={$router.path === '/models/imported'} />\n    <Tab title=\"Available\" url=\"/models/available\" selected={$router.path === '/models/available'} />\n  {/snippet}\n  {#snippet additionalActions()}\n    <Button on:click={importModel} icon={faFileImport} aria-label=\"Import Models\">Import</Button>\n  {/snippet}\n  {#snippet content()}\n    <div class=\"flex flex-col min-w-full min-h-full space-y-5\">\n      {#if !loading}\n        {#if pullingTasks.length > 0}\n          <div class=\"w-full px-5\">\n            <Card classes=\"bg-[var(--pd-content-card-bg)] mt-4\">\n              <div slot=\"content\" class=\"font-normal p-2 w-full\">\n                <div class=\"text-[var(--pd-content-card-title)] mb-2\">Downloading models</div>\n                <TasksProgress tasks={pullingTasks} />\n              </div>\n            </Card>\n          </div>\n        {/if}\n\n        <div class=\"flex min-w-full min-h-full\">\n          <!-- All models -->\n          <Route path=\"/\">\n            {#if filteredModels.length > 0}\n              <Table defaultSortColumn=\"Status\" kind=\"model\" data={filteredModels} columns={columns} row={row}></Table>\n            {:else}\n              <EmptyScreen aria-label=\"status\" icon={faBookOpen} title=\"No models\" message=\"No models available\" />\n            {/if}\n          </Route>\n\n          <!-- Downloaded models -->\n          <Route path=\"/downloaded\">\n            {#if localModels.length > 0}\n              <Table kind=\"model\" data={localModels} columns={columns} row={row}></Table>\n            {:else}\n              <EmptyScreen\n                aria-label=\"status\"\n                icon={faBookOpen}\n                title=\"No models\"\n                message=\"No model has been downloaded yet\" />\n            {/if}\n          </Route>\n\n          <!-- Imported models -->\n          <Route path=\"/imported\">\n            {#if importedModels.length > 0}\n              <Table kind=\"model\" data={importedModels} columns={columns} row={row}></Table>\n            {:else}\n              <EmptyScreen\n                aria-label=\"status\"\n                icon={faBookOpen}\n                title=\"No models\"\n                message=\"No model has been imported yet\" />\n            {/if}\n          </Route>\n\n          <!-- Available models (from catalogs)-->\n          <Route path=\"/available\">\n            {#if remoteModels.length > 0}\n              <Table kind=\"model\" data={remoteModels} columns={columns} row={row}></Table>\n            {:else}\n              <EmptyScreen aria-label=\"status\" icon={faBookOpen} title=\"No models\" message=\"No model is available\" />\n            {/if}\n          </Route>\n        </div>\n      {/if}\n    </div>\n  {/snippet}\n</NavPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/NewInstructLabSession.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { vi, test, expect, beforeEach, describe } from 'vitest';\nimport { fireEvent, render } from '@testing-library/svelte';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport NewInstructLabSession from '/@/pages/NewInstructLabSession.svelte';\nimport { writable, type Writable } from 'svelte/store';\nimport { modelsInfo } from '/@/stores/modelsInfo';\nimport { studioClient } from '/@/utils/client';\nimport type { Uri } from '@shared/uri/Uri';\nimport type { RenderResult } from '@testing-library/svelte';\nimport { router } from 'tinro';\n\nvi.mock('../stores/modelsInfo', async () => ({\n  modelsInfo: {\n    subscribe: vi.fn(),\n    unsubscribe: vi.fn(),\n  },\n}));\n\nvi.mock('tinro', () => ({\n  router: {\n    goto: vi.fn(),\n  },\n}));\n\nvi.mock('../utils/client', async () => ({\n  studioClient: {\n    openURL: vi.fn(),\n    openDialog: vi.fn(),\n    getExtensionConfiguration: vi.fn(),\n  },\n  rpcBrowser: {\n    subscribe: (): unknown => {\n      return {\n        unsubscribe: (): void => {},\n      };\n    },\n  },\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  const infos: Writable<ModelInfo[]> = writable([]);\n  vi.mocked(modelsInfo).subscribe.mockImplementation(run => infos.subscribe(run));\n  vi.mocked(studioClient.getExtensionConfiguration).mockResolvedValue({\n    experimentalGPU: false,\n    apiPort: 0,\n    experimentalTuning: false,\n    modelsPath: '',\n    inferenceRuntime: 'llama-cpp',\n    modelUploadDisabled: false,\n    showGPUPromotion: false,\n    appearance: 'dark',\n  });\n});\n\ntest('empty form should have submit disabled', async () => {\n  const { getByTitle } = render(NewInstructLabSession);\n\n  const submit = getByTitle('Start session');\n  expect(submit).toBeDefined();\n  expect(submit).toBeDisabled();\n});\n\ntest('breadcrumb click should goto sessions list', async () => {\n  const { getByRole } = render(NewInstructLabSession);\n\n  const back = getByRole('link', { name: 'Back' });\n  expect(back).toBeDefined();\n\n  await fireEvent.click(back);\n\n  expect(router.goto).toHaveBeenCalledWith('/tune');\n});\n\ndescribe('radio selection', () => {\n  test('expect knowledge radio to be selected by default', async () => {\n    const { getByTitle } = render(NewInstructLabSession);\n\n    const selectKnowledgeFile = getByTitle('Select knowledge file');\n    expect(selectKnowledgeFile).toBeDefined();\n    expect(selectKnowledgeFile).toBeEnabled();\n\n    const selectSkillFile = getByTitle('Select skills file');\n    expect(selectSkillFile).toBeDefined();\n    expect(selectSkillFile).toBeDisabled();\n  });\n\n  test('expect knowledge to be disabled if user select skills', async () => {\n    const { getByTitle } = render(NewInstructLabSession);\n\n    const useSkills = getByTitle('Use Skills');\n    expect(useSkills).toBeDefined();\n    await fireEvent.click(useSkills);\n\n    const selectKnowledgeFile = getByTitle('Select knowledge file');\n    expect(selectKnowledgeFile).toBeDefined();\n    expect(selectKnowledgeFile).toBeDisabled();\n  });\n});\n\n/**\n * The file selection is the same for knowledge and skills so using each\n */\ndescribe.each(['knowledge', 'skills'])('file selection %s', (type: string) => {\n  /**\n   * This function render the NewInstructLabSession with the radio expected selected (either skills or knowledge\n   */\n  // eslint-disable-next-line @typescript-eslint/no-explicit-any\n  async function renderForm(): Promise<RenderResult<any>> {\n    const renderResult = render(NewInstructLabSession);\n\n    if (type === 'skills') {\n      const useSkills = renderResult.getByTitle('Use Skills');\n      expect(useSkills).toBeDefined();\n      await fireEvent.click(useSkills);\n    }\n\n    return renderResult;\n  }\n\n  test(`click on select ${type} should open dialog`, async () => {\n    vi.mocked(studioClient.openDialog).mockResolvedValue([]);\n\n    const { getByTitle } = await renderForm();\n\n    const selectKnowledgeFile = getByTitle(`Select ${type} file`);\n    expect(selectKnowledgeFile).toBeDefined();\n    expect(selectKnowledgeFile).toBeEnabled();\n\n    await fireEvent.click(selectKnowledgeFile);\n\n    expect(studioClient.openDialog).toHaveBeenCalledWith({\n      title: `Select ${type}`,\n      selectors: ['openFile'],\n      filters: [\n        {\n          name: 'YAML files',\n          extensions: ['yaml', 'YAML', 'yml'],\n        },\n      ],\n    });\n  });\n\n  test(`expect ${type} to be added on selection`, async () => {\n    const file = '/random/folder/resource.yaml';\n    vi.mocked(studioClient.openDialog).mockResolvedValue([\n      {\n        path: file,\n      },\n    ] as Uri[]);\n    const { getByTitle, getByText } = await renderForm();\n\n    const selectKnowledgeFile = getByTitle(`Select ${type} file`);\n    await fireEvent.click(selectKnowledgeFile);\n\n    expect(studioClient.openDialog).toHaveBeenCalled();\n\n    const span = getByText(file);\n    expect(span).toBeEnabled();\n  });\n\n  test(`expect multiple ${type} to be added on multi selection`, async () => {\n    const files = ['/random/folder/resource1.yaml', '/random/folder/resource2.yaml', '/random/folder/resource3.yaml'];\n    vi.mocked(studioClient.openDialog).mockResolvedValue(\n      files.map(file => ({\n        path: file,\n      })) as Uri[],\n    );\n    const { getByTitle, getByText } = await renderForm();\n\n    const selectKnowledgeFile = getByTitle(`Select ${type} file`);\n    await fireEvent.click(selectKnowledgeFile);\n\n    expect(studioClient.openDialog).toHaveBeenCalled();\n\n    for (const file of files) {\n      const span = getByText(file);\n      expect(span).toBeDefined();\n    }\n  });\n\n  test('remove file button should remove a given file', async () => {\n    const files = ['/random/folder/resource1.yaml', '/random/folder/resource2.yaml'];\n    vi.mocked(studioClient.openDialog).mockResolvedValue(\n      files.map(file => ({\n        path: file,\n      })) as Uri[],\n    );\n    const { getByTitle, queryByText } = await renderForm();\n\n    const selectKnowledgeFile = getByTitle(`Select ${type} file`);\n    await fireEvent.click(selectKnowledgeFile);\n\n    expect(studioClient.openDialog).toHaveBeenCalled();\n\n    const removeBtn = getByTitle(`Remove ${files[1]}`);\n    expect(removeBtn).toBeEnabled();\n    if (!removeBtn) throw new Error('undefined remove btn');\n\n    await fireEvent.click(removeBtn);\n\n    await vi.waitFor(() => {\n      expect(queryByText(files[1])).toBeNull();\n    });\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/NewInstructLabSession.svelte",
    "content": "<script lang=\"ts\">\nimport { router } from 'tinro';\nimport { FormPage, Input, Button, Link } from '@podman-desktop/ui-svelte';\nimport { faFile, faPlus, faPlusCircle, faMinusCircle, faCircleCheck } from '@fortawesome/free-solid-svg-icons';\nimport Fa from 'svelte-fa';\nimport ModelSelect from '/@/lib/select/ModelSelect.svelte';\nimport { modelsInfo } from '/@/stores/modelsInfo';\nimport { studioClient } from '/@/utils/client';\nimport { Uri } from '@shared/uri/Uri';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\n\nlet skillsFiles: string[] = $state([]);\nlet knowledgeFiles: string[] = $state([]);\n\nlet model: ModelInfo | undefined = $state(undefined);\nlet sessionName: string = $state('');\nlet valid: boolean = $derived(\n  (skillsFiles.length > 0 || knowledgeFiles.length > 0) && !!model && sessionName.length > 0,\n);\n\nlet trainingType: 'knowledge' | 'skills' = $state('knowledge');\n\nfunction goToUpPage(): void {\n  router.goto('/tune');\n}\n\nasync function requestExplorerModal(title: string): Promise<Uri[]> {\n  const results = await studioClient.openDialog({\n    title: title,\n    selectors: ['openFile'],\n    filters: [\n      {\n        name: 'YAML files',\n        extensions: ['yaml', 'YAML', 'yml'],\n      },\n    ],\n  });\n  if (!results) {\n    return [];\n  }\n\n  return results.map(result => Uri.revive(result));\n}\n\nasync function addSkills(): Promise<void> {\n  const files = await requestExplorerModal('Select skills');\n  skillsFiles.push(...files.map(file => file.path));\n}\n\nfunction removeKnowledge(file: string): void {\n  knowledgeFiles = knowledgeFiles.toSpliced(knowledgeFiles.indexOf(file), 1);\n}\nfunction removeSkills(file: string): void {\n  skillsFiles = skillsFiles.toSpliced(skillsFiles.indexOf(file), 1);\n}\n\nasync function addKnowledge(): Promise<void> {\n  const files = await requestExplorerModal('Select knowledge');\n  knowledgeFiles.push(...files.map(file => file.path));\n}\n\nfunction setTrainingType(type: 'knowledge' | 'skills'): void {\n  switch (trainingType) {\n    case 'knowledge':\n      skillsFiles = [];\n      break;\n    case 'skills':\n      knowledgeFiles = [];\n      break;\n  }\n  trainingType = type;\n}\n\nfunction openInstructLabDocumentation(): void {\n  studioClient\n    .openURL(\n      'https://github.com/instructlab/instructlab?tab=readme-ov-file#-creating-new-knowledge-or-skills-and-training-the-model',\n    )\n    .catch((err: unknown) => {\n      console.error(err);\n    });\n}\n</script>\n\n<FormPage\n  title=\"New InstructLab Session\"\n  breadcrumbLeftPart=\"InstructLab Sessions\"\n  breadcrumbRightPart=\"New session\"\n  onclose={goToUpPage}\n  onbreadcrumbClick={goToUpPage}>\n  <!-- Removed breadcrumbTitle above, no longer needed for svelte 5 formpage -->\n  {#snippet icon()}\n    <div class=\"rounded-full w-8 h-8 flex items-center justify-center\">\n      <Fa size=\"1.125x\" class=\"text-[var(--pd-content-header-icon)]\" icon={faPlus} />\n    </div>\n  {/snippet}\n  {#snippet content()}\n    <div class=\"flex flex-col w-full\">\n      <!-- form -->\n      <div class=\"bg-[var(--pd-content-card-bg)] m-5 space-y-6 px-8 sm:pb-6 xl:pb-8 rounded-lg h-fit\">\n        <div class=\"w-full flex flex-col gap-y-4\">\n          <!-- model input -->\n          <div>\n            <label for=\"model\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n              >Model to Fine Tune</label>\n            <ModelSelect models={$modelsInfo} disabled={false} bind:value={model} />\n          </div>\n\n          <!-- session name -->\n          <div>\n            <label for=\"session-name\" class=\"block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n              >Session name</label>\n            <Input bind:value={sessionName} class=\"grow\" name=\"session-name\" aria-label=\"session name\" />\n            <span class=\"text-[var(--pd-table-body-text)]\"\n              >Name of the session to be able to easily find it in your list of sessions.</span>\n          </div>\n\n          <!-- file(s) input -->\n          <div class=\"flex flex-col gap-y-1\">\n            <!-- knowledge -->\n            <div\n              class:border-2={true}\n              class=\"rounded-md p-5 cursor-pointer bg-[var(--pd-content-card-inset-bg)] flex flex-col gap-y-2\"\n              aria-label=\"Knowledge\"\n              aria-pressed={trainingType === 'knowledge' ? 'true' : 'false'}\n              class:border-[var(--pd-content-card-border-selected)]={trainingType === 'knowledge'}\n              class:border-[var(--pd-content-card-border)]={trainingType !== 'knowledge'}>\n              <button\n                title=\"Use Knowledge\"\n                class=\"flex flex-row align-middle items-center\"\n                onclick={setTrainingType.bind(undefined, 'knowledge')}>\n                <div\n                  class=\"text-2xl\"\n                  class:text-[var(--pd-content-card-border-selected)]={trainingType === 'knowledge'}\n                  class:text-[var(--pd-content-card-border)]={trainingType !== 'knowledge'}>\n                  <Fa icon={faCircleCheck} />\n                </div>\n                <div\n                  class=\"pl-2\"\n                  class:text-[var(--pd-content-card-text)]={trainingType === 'knowledge'}\n                  class:text-[var(--pd-input-field-disabled-text)]={trainingType !== 'knowledge'}>\n                  Add knowledge\n                </div>\n              </button>\n\n              <!-- files list -->\n              <div class=\"flex flex-col\">\n                {#each knowledgeFiles as file (file)}\n                  <div\n                    class=\"bg-[var(--pd-label-bg)] text-[var(--pd-label-text)] max-w-full rounded-md px-2 py-1 mb-2 flex flex-row w-min h-min text-sm text-nowrap items-center\">\n                    <Fa class=\"mr-2\" icon={faFile} />\n                    <span class=\"overflow-x-hidden text-ellipsis max-w-full\">\n                      {file}\n                    </span>\n                    <Button\n                      title=\"Remove {file}\"\n                      on:click={removeKnowledge.bind(undefined, file)}\n                      icon={faMinusCircle}\n                      type=\"link\" />\n                  </div>\n                {/each}\n                <Button\n                  title=\"Select knowledge file\"\n                  type=\"link\"\n                  disabled={trainingType !== 'knowledge'}\n                  class={trainingType !== 'knowledge'\n                    ? 'text-[var(--pd-input-field-disabled-text)] hover:bg-transparent w-min'\n                    : 'w-min'}\n                  on:click={addKnowledge}\n                  icon={faPlusCircle}>Add knowledge to use</Button>\n                <span\n                  class:text-[var(--pd-input-field-disabled-text)]={trainingType !== 'knowledge'}\n                  class:text-[var(--pd-table-body-text)]={trainingType === 'knowledge'}>\n                  Add a YAML file downloaded from your Knowledge contribution on InstructLab. This will create a\n                  synthetic dataset based on the YAML file and retrain the model using this data.\n                  <Link on:click={openInstructLabDocumentation}>Learn more about knowledge contributions</Link>\n                </span>\n              </div>\n            </div>\n\n            <!-- skills -->\n            <div\n              class:border-2={true}\n              class=\"rounded-md p-5 cursor-pointer bg-[var(--pd-content-card-inset-bg)] flex flex-col gap-y-2\"\n              aria-label=\"Skills\"\n              aria-pressed={trainingType === 'skills' ? 'true' : 'false'}\n              class:border-[var(--pd-content-card-border-selected)]={trainingType === 'skills'}\n              class:border-[var(--pd-content-card-border)]={trainingType !== 'skills'}>\n              <button\n                title=\"Use Skills\"\n                class=\"flex flex-row align-middle items-center\"\n                onclick={setTrainingType.bind(undefined, 'skills')}>\n                <div\n                  class=\"text-2xl\"\n                  class:text-[var(--pd-content-card-border-selected)]={trainingType === 'skills'}\n                  class:text-[var(--pd-content-card-border)]={trainingType !== 'skills'}>\n                  <Fa icon={faCircleCheck} />\n                </div>\n                <div\n                  class=\"pl-2\"\n                  class:text-[var(--pd-content-card-text)]={trainingType === 'skills'}\n                  class:text-[var(--pd-input-field-disabled-text)]={trainingType !== 'skills'}>\n                  Add skills\n                </div>\n              </button>\n              <!-- files list -->\n              <div class=\"flex flex-col\">\n                {#each skillsFiles as file (file)}\n                  <div\n                    class=\"bg-[var(--pd-label-bg)] text-[var(--pd-label-text)] max-w-full rounded-md px-2 py-1 mb-2 flex flex-row w-min h-min text-sm text-nowrap items-center\">\n                    <Fa class=\"mr-2\" icon={faFile} />\n                    <span class=\"overflow-x-hidden text-ellipsis max-w-full\">\n                      {file}\n                    </span>\n                    <Button\n                      title=\"Remove {file}\"\n                      on:click={removeSkills.bind(undefined, file)}\n                      icon={faMinusCircle}\n                      type=\"link\" />\n                  </div>\n                {/each}\n                <Button\n                  title=\"Select skills file\"\n                  on:click={addSkills}\n                  disabled={trainingType !== 'skills'}\n                  icon={faPlusCircle}\n                  type=\"link\"\n                  class={trainingType !== 'skills'\n                    ? 'text-[var(--pd-input-field-disabled-text)] hover:bg-transparent w-min'\n                    : 'w-min'}>Add skill to use</Button>\n                <span\n                  class:text-[var(--pd-input-field-disabled-text)]={trainingType !== 'skills'}\n                  class:text-[var(--pd-table-body-text)]={trainingType === 'skills'}>\n                  Add a YAML file downloaded from your skills contribution on InstructLab. This will create a synthetic\n                  dataset based on the YAML file and retrain the model using this data.\n                  <Link on:click={openInstructLabDocumentation}>Learn more about skills contributions</Link>\n                </span>\n              </div>\n            </div>\n          </div>\n        </div>\n        <footer>\n          <div class=\"w-full flex flex-col\">\n            <Button title=\"Start session\" inProgress={false} disabled={!valid} icon={faPlusCircle}>\n              Start session\n            </Button>\n          </div>\n        </footer>\n      </div>\n    </div>\n  {/snippet}\n</FormPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/Playground.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { render, screen, waitFor, within } from '@testing-library/svelte';\nimport { beforeEach, expect, test, vi, describe } from 'vitest';\nimport Playground from './Playground.svelte';\nimport { studioClient } from '../utils/client';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { fireEvent } from '@testing-library/dom';\nimport type { AssistantChat, ModelUsage, PendingChat, UserChat } from '@shared/models/IPlaygroundMessage';\nimport * as conversationsStore from '/@/stores/conversations';\nimport * as inferenceServersStore from '/@/stores/inferenceServers';\nimport { readable, writable } from 'svelte/store';\nimport userEvent from '@testing-library/user-event';\nimport { InferenceType, type InferenceServer } from '@shared/models/IInference';\n\nvi.mock('../utils/client', async () => {\n  return {\n    studioClient: {\n      getCatalog: vi.fn(),\n      submitPlaygroundMessage: vi.fn(),\n      requestCancelToken: vi.fn(),\n    },\n    rpcBrowser: {\n      subscribe: (): unknown => {\n        return {\n          unsubscribe: (): void => {},\n        };\n      },\n    },\n  };\n});\n\nvi.mock('/@/stores/conversations', async () => {\n  return {\n    conversations: vi.fn(),\n  };\n});\n\nvi.mock('/@/stores/inferenceServers', async () => {\n  return {\n    inferenceServers: vi.fn(),\n  };\n});\n\nconst customConversations = writable<conversationsStore.ConversationWithBackend[]>([\n  {\n    id: 'playground-1',\n    name: 'Playground 1',\n    modelId: 'model-1',\n    messages: [],\n    usage: {} as ModelUsage,\n    backend: InferenceType.LLAMA_CPP,\n  },\n]);\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(inferenceServersStore).inferenceServers = readable([]);\n\n  // mock catalog\n  vi.mocked(studioClient.getCatalog).mockResolvedValue({\n    models: [\n      {\n        id: 'model-1',\n        name: 'Model 1',\n      },\n    ] as ModelInfo[],\n    recipes: [],\n    categories: [],\n  });\n  vi.mocked(studioClient.requestCancelToken).mockResolvedValue(undefined);\n\n  // mock conversation\n  vi.mocked(conversationsStore).conversations = customConversations;\n\n  // mock inference server\n  vi.mocked(inferenceServersStore).inferenceServers = readable([\n    {\n      models: [{ id: 'model-1' }],\n      status: 'running',\n    } as unknown as InferenceServer,\n  ]);\n});\n\ntest('should display playground and model names in header', async () => {\n  render(Playground, {\n    playgroundId: 'playground-1',\n  });\n\n  await waitFor(() => {\n    // TODO: restrict to header when https://github.com/containers/podman-desktop/issues/7740 is fixed\n    const title = screen.getAllByText('Playground 1')[0];\n    expect(title).toBeInTheDocument();\n    const subtitle = screen.getByLabelText('Model name');\n    expect(subtitle).toBeInTheDocument();\n    expect(subtitle.textContent).equals('Model 1');\n  });\n});\n\ntest('send prompt should not be enabled initially', async () => {\n  render(Playground, {\n    playgroundId: 'playground-1',\n  });\n\n  await waitFor(() => {\n    const send = screen.getByRole('button', { name: 'Send prompt' });\n    expect(send).toBeDisabled();\n  });\n});\n\ntest('send prompt should be disabled initially if model server is not healhty', async () => {\n  vi.mocked(inferenceServersStore).inferenceServers = readable([\n    {\n      models: [{ id: 'model-1' }],\n      status: 'running',\n      health: {\n        Status: 'starting',\n      },\n    } as unknown as InferenceServer,\n  ]);\n  render(Playground, {\n    playgroundId: 'playground-1',\n  });\n\n  await waitFor(() => {\n    const send = screen.getByRole('button', { name: 'Send prompt' });\n    expect(send).toBeDisabled();\n  });\n});\n\ntest('send prompt should be disabled initially if model server is not running', async () => {\n  vi.mocked(inferenceServersStore).inferenceServers = readable([\n    {\n      models: [{ id: 'model-1' }],\n      status: 'stopped',\n      health: {\n        Status: '',\n      },\n    } as unknown as InferenceServer,\n  ]);\n  render(Playground, {\n    playgroundId: 'playground-1',\n  });\n\n  await waitFor(() => {\n    const send = screen.getByRole('button', { name: 'Send prompt' });\n    expect(send).toBeDisabled();\n  });\n});\n\ntest('sending prompt should disable the send button and the input element', async () => {\n  vi.mocked(studioClient.submitPlaygroundMessage).mockResolvedValue(0);\n  render(Playground, {\n    playgroundId: 'playground-1',\n  });\n\n  let send: HTMLElement;\n  await waitFor(() => {\n    send = screen.getByRole('button', { name: 'Send prompt' });\n    expect(send).toBeInTheDocument();\n  });\n  fireEvent.click(send!);\n\n  await waitFor(() => {\n    send = screen.getByRole('button', { name: 'Send prompt' });\n    expect(send).toBeDisabled();\n    const input = screen.getByRole('textbox', { name: 'prompt' });\n    expect(input).toBeDisabled();\n  });\n});\n\ntest('sending prompt not using button should disable the send button and the input element', async () => {\n  vi.mocked(studioClient.submitPlaygroundMessage).mockResolvedValue(0);\n  render(Playground, {\n    playgroundId: 'playground-1',\n  });\n\n  let prompt: HTMLElement;\n  await waitFor(() => {\n    prompt = screen.getByLabelText('prompt');\n    expect(prompt).toBeInTheDocument();\n  });\n  fireEvent.change(prompt!, { target: { value: 'prompt' } });\n  fireEvent.keyDown(prompt!, { key: 'Enter' });\n\n  await waitFor(() => {\n    prompt = screen.getByRole('button', { name: 'Send prompt' });\n    expect(prompt).toBeDisabled();\n    const input = screen.getByRole('textbox', { name: 'prompt' });\n    expect(input).toBeDisabled();\n  });\n});\n\ntest('receiving complete message should enable the input element', async () => {\n  vi.mocked(studioClient.submitPlaygroundMessage).mockResolvedValue(0);\n  render(Playground, {\n    playgroundId: 'playground-1',\n  });\n\n  let send: HTMLElement;\n  await waitFor(() => {\n    send = screen.getByRole('button', { name: 'Send prompt' });\n    expect(send).toBeInTheDocument();\n  });\n  fireEvent.click(send!);\n\n  await waitFor(() => {\n    send = screen.getByRole('button', { name: 'Send prompt' });\n    expect(send).toBeDisabled();\n    const input = screen.getByRole('textbox', { name: 'prompt' });\n    expect(input).toBeDisabled();\n  });\n\n  customConversations.set([\n    {\n      id: 'playground-1',\n      name: 'Playground 1',\n      modelId: 'model-1',\n      messages: [\n        {\n          role: 'user',\n          id: 'message-1',\n          content: 'a prompt',\n        } as UserChat,\n        {\n          role: 'assistant',\n          id: 'message-2',\n          content: 'a response',\n          completed: Date.now(),\n        } as AssistantChat,\n      ],\n      usage: {} as ModelUsage,\n      backend: InferenceType.LLAMA_CPP,\n    },\n  ]);\n\n  await waitFor(() => {\n    const input = screen.getByRole('textbox', { name: 'prompt' });\n    expect(input).toBeEnabled();\n  });\n});\n\ntest('sending prompt should display the prompt and the response', async () => {\n  vi.mocked(studioClient.submitPlaygroundMessage).mockResolvedValue(0);\n  render(Playground, {\n    playgroundId: 'playground-1',\n  });\n\n  let send: HTMLElement;\n  await waitFor(() => {\n    send = screen.getByRole('button', { name: 'Send prompt' });\n    expect(send).toBeInTheDocument();\n  });\n  const textarea = screen.getByLabelText('prompt');\n  expect(textarea).toBeInTheDocument();\n  await userEvent.type(textarea, 'a question for the assistant');\n\n  fireEvent.click(send!);\n\n  customConversations.set([\n    {\n      id: 'playground-1',\n      name: 'Playground 1',\n      modelId: 'model-1',\n      messages: [\n        {\n          role: 'user',\n          id: 'message-1',\n          content: 'a question for the assistant',\n        } as UserChat,\n        {\n          role: 'assistant',\n          id: 'message-2',\n          content: 'a response from the ',\n          completed: false,\n        } as unknown as PendingChat,\n      ],\n      usage: {} as ModelUsage,\n      backend: InferenceType.LLAMA_CPP,\n    },\n  ]);\n\n  await waitFor(() => {\n    const conversation = screen.getByLabelText('conversation');\n    within(conversation).getByText('a question for the assistant');\n    within(conversation).getByText('a response from the');\n  });\n\n  customConversations.set([\n    {\n      id: 'playground-1',\n      name: 'Playground 1',\n      modelId: 'model-1',\n      messages: [\n        {\n          role: 'user',\n          id: 'message-1',\n          content: 'a question for the assistant',\n        } as UserChat,\n        {\n          role: 'assistant',\n          id: 'message-2',\n          content: 'a response from the assistant\\neat, sleep, code, repeat\\neat, sleep, code, repeat',\n          completed: Date.now(),\n        } as AssistantChat,\n      ],\n      usage: {} as ModelUsage,\n      backend: InferenceType.LLAMA_CPP,\n    },\n  ]);\n\n  await waitFor(() => {\n    const conversation = screen.getByLabelText('conversation');\n    within(conversation).getByText('a question for the assistant');\n    within(conversation).getByText('a response from the assistant');\n  });\n});\n\ntest('user should be able to stop prompt', async () => {\n  vi.mocked(studioClient.submitPlaygroundMessage).mockResolvedValue(55);\n  render(Playground, {\n    playgroundId: 'playground-1',\n  });\n\n  let prompt: HTMLElement;\n  await waitFor(() => {\n    prompt = screen.getByLabelText('prompt');\n    expect(prompt).toBeInTheDocument();\n  });\n  fireEvent.change(prompt!, { target: { value: 'prompt' } });\n  fireEvent.keyDown(prompt!, { key: 'Enter' });\n\n  await waitFor(() => {\n    const stopBtn = screen.getByTitle('Stop');\n    expect(stopBtn).toBeDefined();\n\n    fireEvent.click(stopBtn);\n  });\n\n  await vi.waitFor(() => {\n    expect(studioClient.requestCancelToken).toHaveBeenCalledWith(55);\n  });\n});\n\ndescribe('error message', () => {\n  test('submitPlaygroundMessage reject should be displayed', async () => {\n    // mock reject\n    vi.mocked(studioClient.submitPlaygroundMessage).mockRejectedValue(new Error('dummy'));\n\n    const { getByRole } = render(Playground, {\n      playgroundId: 'playground-1',\n    });\n\n    // Get the input\n    const prompt: HTMLElement = await waitFor<HTMLElement>(() => {\n      const element = getByRole('textbox', { name: 'prompt' });\n      expect(element).toBeInTheDocument();\n      return element;\n    });\n\n    fireEvent.change(prompt, { target: { value: 'prompt' } });\n    fireEvent.keyDown(prompt, { key: 'Enter' });\n\n    // Get the error div\n    const error: HTMLElement = await waitFor<HTMLElement>(() => {\n      const element = getByRole('alert', { name: 'error' });\n      expect(element).toBeInTheDocument();\n      return element;\n    });\n\n    expect(error).toHaveTextContent('Error: dummy');\n  });\n\n  test('error message should display the content', async () => {\n    // mock conversation\n    vi.mocked(conversationsStore).conversations = writable([\n      {\n        id: 'playground-1',\n        name: 'Playground 1',\n        modelId: 'model-1',\n        messages: [\n          {\n            id: 'error-message-id',\n            error: 'something went wrong with the server',\n            timestamp: 55,\n          },\n        ],\n        backend: InferenceType.LLAMA_CPP,\n      },\n    ]);\n\n    const { getByRole } = render(Playground, {\n      playgroundId: 'playground-1',\n    });\n\n    // Get the error div\n    const error: HTMLElement = await waitFor<HTMLElement>(() => {\n      const element = getByRole('alert', { name: 'error' });\n      expect(element).toBeInTheDocument();\n      return element;\n    });\n\n    expect(error).toHaveTextContent('something went wrong with the server');\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/Playground.svelte",
    "content": "<script lang=\"ts\">\nimport { conversations } from '../stores/conversations';\nimport { studioClient } from '/@/utils/client';\nimport {\n  isAssistantChat,\n  isPendingChat,\n  isUserChat,\n  isSystemPrompt,\n  isChatMessage,\n  isErrorMessage,\n  isAssistantToolCall,\n  type Message,\n} from '@shared/models/IPlaygroundMessage';\nimport { catalog } from '../stores/catalog';\nimport ContentDetailsLayout from '../lib/ContentDetailsLayout.svelte';\nimport RangeInput from '../lib/RangeInput.svelte';\nimport Fa from 'svelte-fa';\n\nimport ChatMessage from '../lib/conversation/ChatMessage.svelte';\nimport SystemPromptBanner from '/@/lib/conversation/SystemPromptBanner.svelte';\nimport { inferenceServers } from '/@/stores/inferenceServers';\nimport { faCircleInfo, faPaperPlane, faStop } from '@fortawesome/free-solid-svg-icons';\nimport { Button, Tooltip, DetailsPage, StatusIcon } from '@podman-desktop/ui-svelte';\nimport { router } from 'tinro';\nimport ConversationActions from '../lib/conversation/ConversationActions.svelte';\nimport { ContainerIcon } from '@podman-desktop/ui-svelte/icons';\nimport ToolCallMessage from '/@/lib/conversation/ToolCallMessage.svelte';\nimport type { InferenceServer } from '@shared/models/IInference';\nimport type { ModelOptions } from '@shared/models/IModelOptions';\n\ninterface Props {\n  playgroundId: string;\n}\n\nlet { playgroundId }: Props = $props();\n\nlet prompt: string = $state('');\nlet scrollable: Element | undefined = $state();\nlet errorMsg = $state('');\nlet cancellationTokenId: number | undefined = $state(undefined);\n\n// settings\nlet temperature = $state(0.8);\nlet max_tokens = $state(-1);\nlet top_p = $state(0.5);\n\nlet conversation = $derived($conversations.find(conversation => conversation.id === playgroundId));\nlet messages = $derived(\n  conversation?.messages.filter(message => isChatMessage(message)).filter(message => !isSystemPrompt(message)) ?? [],\n);\nlet model = $derived($catalog.models.find(model => model.id === conversation?.modelId));\nlet completion_tokens = $derived(conversation?.usage?.completion_tokens ?? 0);\nlet prompt_tokens = $derived(conversation?.usage?.prompt_tokens ?? 0);\n\n// Find latest message of the conversation\nlet latest: Message | undefined = $derived(conversation?.messages[conversation.messages.length - 1]);\n\nlet inProgress = $state(false);\nlet sendEnabled = $derived.by(() => {\n  if (inProgress) {\n    return false;\n  }\n  if (latest) {\n    if (isSystemPrompt(latest) || (isAssistantChat(latest) && !isPendingChat(latest))) {\n      return true;\n    }\n    if (isErrorMessage(latest)) {\n      return true;\n    }\n  } else {\n    return true;\n  }\n  return false;\n});\n\n$effect(() => {\n  if (latest && isErrorMessage(latest)) {\n    errorMsg = latest.error;\n  }\n});\n\nlet server: InferenceServer | undefined = $derived(\n  $inferenceServers.find(is => !!conversation && is.models.map(mi => mi.id).includes(conversation?.modelId)),\n);\n\nfunction askPlayground(): void {\n  errorMsg = '';\n  inProgress = true;\n  const options: ModelOptions = {\n    temperature,\n    top_p,\n    stream_options: { include_usage: true },\n  };\n  if (max_tokens > 0) {\n    options.max_tokens = max_tokens;\n  }\n  studioClient\n    .submitPlaygroundMessage(playgroundId, prompt, options)\n    .then(token => {\n      cancellationTokenId = token;\n    })\n    .catch((err: unknown) => {\n      errorMsg = String(err);\n    })\n    .finally(() => {\n      inProgress = false;\n    });\n  prompt = '';\n}\n\n$effect(() => {\n  if (!conversation) {\n    router.goto('/playgrounds');\n    return;\n  }\n  if (!latest) {\n    return;\n  }\n  if (isUserChat(latest) || (isAssistantChat(latest) && isPendingChat(latest))) {\n    if (scrollable) scrollToBottom(scrollable).catch(err => console.error(`Error scrolling to bottom:`, err));\n  }\n});\n\nfunction requestFocus(element: HTMLElement): void {\n  element.focus();\n}\n\nfunction handleKeydown(e: KeyboardEvent): void {\n  if (e.key === 'Enter') {\n    askPlayground();\n    e.preventDefault();\n  }\n}\n\nasync function scrollToBottom(element: Element): Promise<void> {\n  element.scroll?.({ top: element.scrollHeight, behavior: 'smooth' });\n}\n\nfunction isHealthy(status?: string, health?: string): boolean {\n  return status === 'running' && (!health || health === 'healthy');\n}\n\nfunction getStatusForIcon(status?: string, health?: string): string {\n  switch (status) {\n    case 'running':\n      switch (health) {\n        case 'healthy':\n          return 'RUNNING';\n        case 'starting':\n          return 'STARTING';\n        default:\n          return 'NOT-RUNNING';\n      }\n    default:\n      return 'NOT-RUNNING';\n  }\n}\n\nfunction getStatusText(status?: string, health?: string): string {\n  switch (status) {\n    case 'running':\n      switch (health) {\n        case 'healthy':\n          return 'Model Service running';\n        case 'starting':\n          return 'Model Service starting';\n        default:\n          return 'Model Service not running';\n      }\n    default:\n      return 'Model Service not running';\n  }\n}\n\nfunction getSendPromptTitle(sendEnabled: boolean, status?: string, health?: string): string | undefined {\n  if (!isHealthy(status, health)) {\n    return getStatusText(status, health);\n  } else if (!sendEnabled) {\n    return 'Please wait, assistant is replying';\n  }\n  return undefined;\n}\n\nexport function goToUpPage(): void {\n  router.goto('/playgrounds');\n}\n\nfunction handleOnClick(): void {\n  if (cancellationTokenId) {\n    studioClient\n      .requestCancelToken(cancellationTokenId)\n      .catch(err => console.error(`Error request cancel token ${cancellationTokenId}`, err));\n  }\n}\n</script>\n\n{#if conversation}\n  <div class=\"overflow-auto h-full\">\n    <DetailsPage\n      title={conversation?.name}\n      breadcrumbLeftPart=\"Playgrounds\"\n      breadcrumbRightPart={conversation?.name}\n      onclose={goToUpPage}\n      onbreadcrumbClick={goToUpPage}>\n      {#snippet iconSnippet()}\n        <div class=\"mr-3\">\n          <StatusIcon\n            icon={ContainerIcon}\n            size={24}\n            status={getStatusForIcon(server?.status, server?.health?.Status)} />\n        </div>\n      {/snippet}\n      {#snippet subtitleSnippet()}\n        <div class=\"flex gap-x-2 items-center text-[var(--pd-content-sub-header)]\">\n          {#if model}\n            <div class=\"text-sm\" aria-label=\"Model name\">\n              <a href=\"/model/{model.id}\">{model.name}</a>\n            </div>\n          {/if}\n        </div>\n      {/snippet}\n      {#snippet actionsSnippet()}\n        <ConversationActions detailed conversation={conversation} />\n      {/snippet}\n      {#snippet contentSnippet()}\n        <div class=\"flex flex-col w-full h-full bg-[var(--pd-content-bg)]\">\n          <div class=\"h-full overflow-auto\" bind:this={scrollable}>\n            <ContentDetailsLayout\n              detailsTitle=\"Settings\"\n              detailsLabel=\"settings\"\n              detailsSummary=\"Playground Settings: edit model parameters and view metrics\">\n              <svelte:fragment slot=\"content\">\n                <div class=\"flex flex-col w-full h-full grow overflow-auto\">\n                  <div aria-label=\"conversation\" class=\"w-full h-full\">\n                    {#if conversation}\n                      <!-- Show a banner for the system prompt -->\n                      {#key conversation.messages.length}\n                        <SystemPromptBanner conversation={conversation} />\n                      {/key}\n                      <!-- show all message except the system prompt -->\n                      <ul>\n                        {#each messages as message (message.id)}\n                          <li>\n                            {#if isAssistantToolCall(message)}\n                              <ToolCallMessage message={message} />\n                            {:else}\n                              <ChatMessage message={message} />\n                            {/if}\n                          </li>\n                        {/each}\n                      </ul>\n                    {/if}\n                  </div>\n                </div>\n              </svelte:fragment>\n              <svelte:fragment slot=\"details\">\n                <div class=\"text-[var(--pd-content-card-text)]\">Next prompt will use these settings</div>\n                <div\n                  class=\"bg-[var(--pd-content-card-inset-bg)] text-[var(--pd-content-card-text)] w-full rounded-md p-4\">\n                  <div class=\"mb-4 flex flex-col\">Model Parameters</div>\n                  <div class=\"flex flex-col space-y-4\" aria-label=\"parameters\">\n                    <div class=\"flex flex-row\">\n                      <div class=\"w-full\">\n                        <RangeInput name=\"temperature\" min=\"0\" max=\"2\" step=\"0.1\" bind:value={temperature} />\n                      </div>\n                      <Tooltip left>\n                        <Fa class=\"text-[var(--pd-content-card-icon)]\" icon={faCircleInfo} />\n                        <svelte:fragment slot=\"tip\">\n                          <div class=\"inline-block py-2 px-4 rounded-md\" aria-label=\"tooltip\">\n                            What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the\n                            output more random, while lower values like 0.2 will make it more focused and deterministic.\n                          </div>\n                        </svelte:fragment>\n                      </Tooltip>\n                    </div>\n                    <div class=\"flex flex-row\">\n                      <div class=\"w-full\">\n                        <RangeInput name=\"max tokens\" min=\"-1\" max=\"32768\" step=\"1\" bind:value={max_tokens} />\n                      </div>\n                      <Tooltip left>\n                        <Fa class=\"text-[var(--pd-content-card-icon)]\" icon={faCircleInfo} />\n                        <svelte:fragment slot=\"tip\">\n                          <div class=\"inline-block py-2 px-4 rounded-md\" aria-label=\"tooltip\">\n                            The maximum number of tokens that can be generated in the chat completion.\n                          </div>\n                        </svelte:fragment>\n                      </Tooltip>\n                    </div>\n                    <div class=\"flex flex-row\">\n                      <div class=\"w-full\">\n                        <RangeInput name=\"top-p\" min=\"0\" max=\"1\" step=\"0.1\" bind:value={top_p} />\n                      </div>\n                      <Tooltip left>\n                        <Fa class=\"text-[var(--pd-content-card-icon)]\" icon={faCircleInfo} />\n                        <svelte:fragment slot=\"tip\">\n                          <div class=\"inline-block py-2 px-4 rounded-md\" aria-label=\"tooltip\">\n                            An alternative to sampling with temperature, where the model considers the results of the\n                            tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%\n                            probability mass are considered.\n                          </div>\n                        </svelte:fragment>\n                      </Tooltip>\n                    </div>\n                  </div>\n                </div>\n                <div class=\"text-[var(--pd-content-card-text)]\">Model metrics</div>\n                <div\n                  class=\"bg-[var(--pd-content-card-inset-bg)] text-[var(--pd-content-card-text)] w-full rounded-md p-4\">\n                  <div class=\"flex flex-col space-y-4\" aria-label=\"metrics\">\n                    <div class=\"flex flex-row\">\n                      <div class=\"w-full\">\n                        PROMPT TOKENS\n                        <div class=\"flex flex-row\">\n                          {prompt_tokens}\n                        </div>\n                      </div>\n                      <Tooltip left>\n                        <Fa class=\"text-[var(--pd-content-card-icon)]\" icon={faCircleInfo} />\n                        <svelte:fragment slot=\"tip\">\n                          <div class=\"inline-block py-2 px-4 rounded-md\" aria-label=\"tooltip\">\n                            The number of tokens in the prompt is used as input to the model.\n                          </div>\n                        </svelte:fragment>\n                      </Tooltip>\n                    </div>\n                    <div class=\"flex flex-row\">\n                      <div class=\"w-full\">\n                        COMPLETION TOKENS\n                        <div class=\"flex flex-row\">\n                          {completion_tokens}\n                        </div>\n                      </div>\n                      <Tooltip left>\n                        <Fa class=\"text-[var(--pd-content-card-icon)]\" icon={faCircleInfo} />\n                        <svelte:fragment slot=\"tip\">\n                          <div class=\"inline-block py-2 px-4 rounded-md\" aria-label=\"tooltip\">\n                            The number of tokens in the model's output to the prompt that has been used as an input to\n                            the model.\n                          </div>\n                        </svelte:fragment>\n                      </Tooltip>\n                    </div>\n                  </div>\n                </div>\n              </svelte:fragment>\n            </ContentDetailsLayout>\n          </div>\n          {#if errorMsg}\n            <div class=\"text-[var(--pd-input-field-error-text)] p-2\" aria-label=\"error\" role=\"alert\">{errorMsg}</div>\n          {/if}\n          <div class=\"flex flex-row flex-none w-full px-4 py-2 bg-[var(--pd-content-card-bg)]\">\n            <textarea\n              aria-label=\"prompt\"\n              bind:value={prompt}\n              use:requestFocus\n              onkeydown={handleKeydown}\n              rows=\"2\"\n              class=\"w-full p-2 outline-hidden rounded-xs bg-[var(--pd-content-card-inset-bg)] text-[var(--pd-content-card-text)] placeholder-[var(--pd-content-card-text)]\"\n              placeholder=\"Type your prompt here\"\n              disabled={!sendEnabled}></textarea>\n\n            <div class=\"flex-none text-right m-4\">\n              {#if !sendEnabled && cancellationTokenId !== undefined}\n                <Button title=\"Stop\" icon={faStop} type=\"secondary\" on:click={handleOnClick} />\n              {:else}\n                <Button\n                  inProgress={!sendEnabled}\n                  disabled={!isHealthy(server?.status, server?.health?.Status) || !prompt?.length}\n                  on:click={askPlayground}\n                  icon={faPaperPlane}\n                  type=\"secondary\"\n                  title={getSendPromptTitle(sendEnabled, server?.status, server?.health?.Status)}\n                  aria-label=\"Send prompt\"></Button>\n              {/if}\n            </div>\n          </div>\n        </div>\n      {/snippet}\n    </DetailsPage>\n  </div>\n{/if}\n"
  },
  {
    "path": "packages/frontend/src/pages/PlaygroundCreate.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { render, within } from '@testing-library/svelte';\nimport { expect, test, vi, beforeEach } from 'vitest';\nimport { studioClient } from '../utils/client';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { writable } from 'svelte/store';\nimport userEvent from '@testing-library/user-event';\nimport * as tasksStore from '/@/stores/tasks';\nimport * as modelsInfoStore from '/@/stores/modelsInfo';\nimport type { Task } from '@shared/models/ITask';\nimport PlaygroundCreate from './PlaygroundCreate.svelte';\nimport { InferenceType } from '@shared/models/IInference';\nimport * as path from 'node:path';\nimport * as os from 'node:os';\n\nconst dummyLlamaCppModel: ModelInfo = {\n  id: 'llama-cpp-model-id',\n  name: 'Dummy LlamaCpp model',\n  file: {\n    file: 'file',\n    path: path.resolve(os.tmpdir(), 'path'),\n  },\n  properties: {},\n  description: '',\n  backend: InferenceType.LLAMA_CPP,\n};\n\nconst dummyWhisperCppModel: ModelInfo = {\n  id: 'whisper-cpp-model-id',\n  name: 'Dummy Whisper model',\n  file: {\n    file: 'file',\n    path: path.resolve(os.tmpdir(), 'path'),\n  },\n  properties: {},\n  description: '',\n  backend: InferenceType.WHISPER_CPP,\n};\n\nconst dummyOpenVinoModel: ModelInfo = {\n  id: 'openvino-model-id',\n  name: 'Dummy Openvino model',\n  file: {\n    file: 'file',\n    path: path.resolve(os.tmpdir(), 'path'),\n  },\n  properties: {},\n  description: '',\n  backend: InferenceType.OPENVINO,\n};\n\nvi.mock('../utils/client', async () => {\n  return {\n    studioClient: {\n      requestCreatePlayground: vi.fn(),\n      getExtensionConfiguration: vi.fn().mockResolvedValue({}),\n      getRegisteredProviders: vi.fn().mockResolvedValue([]),\n    },\n    rpcBrowser: {\n      subscribe: (): unknown => {\n        return {\n          unsubscribe: (): void => {},\n        };\n      },\n    },\n  };\n});\n\nvi.mock('/@/stores/tasks', async () => {\n  return {\n    tasks: vi.fn(),\n  };\n});\n\nvi.mock('/@/stores/modelsInfo', async () => {\n  return {\n    modelsInfo: vi.fn(),\n  };\n});\n\nbeforeEach(() => {\n  window.HTMLElement.prototype.scrollIntoView = vi.fn();\n\n  const tasksList = writable<Task[]>([]);\n  vi.mocked(tasksStore).tasks = tasksList;\n  vi.mocked(studioClient.getRegisteredProviders).mockResolvedValue([\n    InferenceType.LLAMA_CPP,\n    InferenceType.WHISPER_CPP,\n    InferenceType.OPENVINO,\n  ]);\n});\n\ntest('model should be selected by default when runtime is set', async () => {\n  const modelsInfoList = writable<ModelInfo[]>([dummyLlamaCppModel]);\n  vi.mocked(modelsInfoStore).modelsInfo = modelsInfoList;\n\n  vi.mocked(studioClient.requestCreatePlayground).mockRejectedValue('error creating playground');\n\n  const { container } = render(PlaygroundCreate, { props: { exclude: [InferenceType.NONE] } });\n\n  // Select our runtime\n  const dropdown = within(container).getByLabelText('Select Inference Runtime');\n  await userEvent.click(dropdown);\n\n  const llamacppOption = within(container).getByText(InferenceType.LLAMA_CPP);\n  await userEvent.click(llamacppOption);\n\n  const model = within(container).getByText(dummyLlamaCppModel.name);\n  expect(model).toBeInTheDocument();\n});\n\ntest('selecting a runtime filters the displayed models', async () => {\n  const modelsInfoList = writable<ModelInfo[]>([dummyLlamaCppModel, dummyWhisperCppModel, dummyOpenVinoModel]);\n  vi.mocked(modelsInfoStore).modelsInfo = modelsInfoList;\n\n  const { container } = render(PlaygroundCreate, { props: { exclude: [InferenceType.NONE] } });\n\n  // Select our runtime\n  const dropdown = within(container).getByLabelText('Select Inference Runtime');\n  await userEvent.click(dropdown);\n\n  const openvinoOption = within(container).getByText(InferenceType.OPENVINO);\n  await userEvent.click(openvinoOption);\n\n  expect(within(container).queryByText(dummyOpenVinoModel.name)).toBeInTheDocument();\n  expect(within(container).queryByText(dummyLlamaCppModel.name)).toBeNull();\n  expect(within(container).queryByText(dummyWhisperCppModel.name)).toBeNull();\n});\n\ntest('should show warning when no local models are available', () => {\n  const modelsInfoList = writable<ModelInfo[]>([]);\n  vi.mocked(modelsInfoStore).modelsInfo = modelsInfoList;\n\n  const { container } = render(PlaygroundCreate);\n\n  const warning = within(container).getByText(/You don't have any models downloaded/);\n  expect(warning).toBeInTheDocument();\n});\n\ntest('should display error message if createPlayground fails', async () => {\n  const modelsInfoList = writable<ModelInfo[]>([dummyLlamaCppModel]);\n  vi.mocked(modelsInfoStore).modelsInfo = modelsInfoList;\n\n  vi.mocked(studioClient.requestCreatePlayground).mockRejectedValue('error creating playground');\n\n  const { container } = render(PlaygroundCreate);\n\n  const errorMessage = within(container).queryByLabelText('Error Message Content');\n  expect(errorMessage).not.toBeInTheDocument();\n\n  // Select the runtime first\n  const runtimeDropdown = within(container).getByLabelText('Select Inference Runtime');\n  await userEvent.click(runtimeDropdown);\n\n  const runtimeOption = within(container).getByText(InferenceType.LLAMA_CPP);\n  await userEvent.click(runtimeOption);\n\n  const createButton = within(container).getByTitle('Create playground');\n  await userEvent.click(createButton);\n\n  const errorMessageAfterSubmit = within(container).queryByLabelText('Error Message Content');\n  expect(errorMessageAfterSubmit).toBeInTheDocument();\n  expect(errorMessageAfterSubmit?.textContent).equal('error creating playground');\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/PlaygroundCreate.svelte",
    "content": "<script lang=\"ts\">\nimport { faExclamationCircle, faInfoCircle, faPlus, faPlusCircle } from '@fortawesome/free-solid-svg-icons';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { modelsInfo } from '/@/stores/modelsInfo';\nimport Fa from 'svelte-fa';\nimport { studioClient } from '../utils/client';\nimport { router } from 'tinro';\nimport { onDestroy, onMount } from 'svelte';\nimport type { Task } from '@shared/models/ITask';\nimport TasksProgress from '../lib/progress/TasksProgress.svelte';\nimport { tasks } from '../stores/tasks';\nimport { filterByLabel } from '../utils/taskUtils';\nimport type { Unsubscriber } from 'svelte/store';\nimport { Button, ErrorMessage, FormPage, Input } from '@podman-desktop/ui-svelte';\nimport ModelSelect from '/@/lib/select/ModelSelect.svelte';\nimport { InferenceType } from '@shared/models/IInference';\nimport InferenceRuntimeSelect from '/@/lib/select/InferenceRuntimeSelect.svelte';\nimport { configuration } from '../stores/extensionConfiguration';\n\n// Get recommended runtime\nlet runtime: InferenceType | undefined = undefined;\n\n// Exlude certain runtimes from selection\nexport let exclude: InferenceType[] = [InferenceType.NONE, InferenceType.WHISPER_CPP];\n\n// Get registered list of providers\nlet providers: InferenceType[] = [];\n\nonMount(async () => {\n  providers = await studioClient.getRegisteredProviders();\n\n  const inferenceRuntime = $configuration?.inferenceRuntime;\n  if (\n    Object.values(InferenceType).includes(inferenceRuntime as InferenceType) &&\n    !exclude.includes(inferenceRuntime as InferenceType)\n  ) {\n    runtime = inferenceRuntime as InferenceType;\n  }\n});\n\nlet localModels: ModelInfo[];\n$: localModels = $modelsInfo.filter(\n  model => model.file && (!runtime || model.backend === runtime) && !exclude.includes(model.backend as InferenceType),\n);\n$: availModels = $modelsInfo.filter(model => !model.file);\nlet model: ModelInfo | undefined = undefined;\nlet submitted: boolean = false;\nlet playgroundName: string;\nlet errorMsg: string | undefined = undefined;\n\n// The tracking id is a unique identifier provided by the\n// backend when calling requestCreateInferenceServer\nlet trackingId: string | undefined = undefined;\n\n// The trackedTasks are the tasks linked to the trackingId\nlet trackedTasks: Task[] = [];\n\n// Preset model selection depending on runtime\n$: if (localModels.length > 0) {\n  model = localModels[0];\n} else {\n  model = undefined;\n}\n\nfunction openModelsPage(): void {\n  router.goto(`/models`);\n}\n\n// Navigate to the new created playground environment\nconst openPlaygroundPage = (playgroundId: string): void => {\n  router.goto(`/playground/${playgroundId}`);\n};\n\nfunction onNameInput(event: Event): void {\n  playgroundName = (event.target as HTMLInputElement).value || '';\n}\n\nasync function submit(): Promise<void> {\n  errorMsg = undefined;\n  if (model === undefined) throw new Error('model id not valid.');\n  // disable submit button\n  submitted = true;\n  try {\n    // Using || and not && as we want to have the empty string systemPrompt passed as undefined\n    trackingId = await studioClient.requestCreatePlayground(playgroundName, model);\n  } catch (err: unknown) {\n    trackingId = undefined;\n    console.error('Something wrong while trying to create the playground.', err);\n    errorMsg = String(err);\n    submitted = false;\n  }\n}\n\n// Utility method to filter the tasks properly based on the tracking Id\nconst processTasks = (tasks: Task[]): void => {\n  if (!trackingId) {\n    trackedTasks = [];\n    return;\n  }\n\n  trackedTasks = filterByLabel(tasks, {\n    trackingId: trackingId,\n  });\n\n  // Check for errors\n  // hint: we do not need to display them as the TasksProgress component will\n  const error = trackedTasks.find(task => task.error)?.error !== undefined;\n  if (error) {\n    submitted = false;\n  }\n\n  const task: Task | undefined = trackedTasks.find(task => 'playgroundId' in (task.labels ?? {}));\n  if (task === undefined) return;\n\n  const playgroundId = task.labels?.['playgroundId'];\n  if (playgroundId) {\n    openPlaygroundPage(playgroundId);\n  }\n};\n\nlet unsubscribeTasks: Unsubscriber;\nonMount(() => {\n  unsubscribeTasks = tasks.subscribe(tasks => {\n    processTasks(tasks);\n  });\n});\n\nonDestroy(() => {\n  unsubscribeTasks?.();\n});\n\nexport function goToUpPage(): void {\n  router.goto('/playgrounds');\n}\n</script>\n\n<FormPage\n  title=\"New Playground environment\"\n  breadcrumbLeftPart=\"Playgrounds\"\n  breadcrumbRightPart=\"New Playground environment\"\n  onclose={goToUpPage}\n  onbreadcrumbClick={goToUpPage}>\n  <!-- Removed breadcrumbTitle above, no longer needed for svelte 5 formpage -->\n  {#snippet icon()}\n    <div class=\"rounded-full w-8 h-8 flex items-center justify-center\">\n      <Fa size=\"1.125x\" class=\"text-[var(--pd-content-header-icon)]\" icon={faPlus} />\n    </div>\n  {/snippet}\n\n  {#snippet content()}\n    <div class=\"flex flex-col w-full\">\n      <!-- tasks tracked -->\n      {#if trackedTasks.length > 0}\n        <div class=\"mx-5 mt-5\" role=\"status\">\n          <TasksProgress tasks={trackedTasks} />\n        </div>\n      {/if}\n\n      <!-- form -->\n      <div class=\"bg-[var(--pd-content-card-bg)] m-5 pt-5 space-y-6 px-8 sm:pb-6 xl:pb-8 rounded-lg h-fit\">\n        <div class=\"w-full\">\n          <!-- playground name input -->\n          <label for=\"playgroundName\" class=\"block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n            >Playground name</label>\n          <Input\n            disabled={submitted}\n            id=\"playgroundName\"\n            class=\"w-full\"\n            name=\"playgroundName\"\n            on:input={onNameInput}\n            placeholder=\"Leave blank to generate a name\"\n            aria-label=\"playgroundName\" />\n\n          <!-- inference runtime -->\n          <label for=\"inference-runtime\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\">\n            Inference Runtime\n          </label>\n          <InferenceRuntimeSelect bind:value={runtime} providers={providers} exclude={exclude} />\n\n          <!-- model input -->\n          <label for=\"model\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\">Model</label>\n          <ModelSelect models={localModels} disabled={submitted} bind:value={model} />\n          {#if localModels.length === 0}\n            <div class=\"text-red-500 p-1 flex flex-row items-center\">\n              <Fa size=\"1.1x\" class=\"cursor-pointer text-red-500\" icon={faExclamationCircle} />\n              <div role=\"alert\" aria-label=\"Error Message Content\" class=\"ml-2\">\n                You don't have any models downloaded. You can download them in <a\n                  href=\"javascript:void(0);\"\n                  class=\"underline\"\n                  title=\"Models page\"\n                  on:click={openModelsPage}>models page</a\n                >.\n              </div>\n            </div>\n          {:else if availModels.length > 0}\n            <div class=\"text-sm p-1 flex flex-row items-center text-[var(--pd-content-card-text)]\">\n              <Fa size=\"1.1x\" class=\"cursor-pointer\" icon={faInfoCircle} />\n              <div role=\"alert\" aria-label=\"Info Message Content\" class=\"ml-2\">\n                Other models are available, but must be downloaded from the <a\n                  href=\"javascript:void(0);\"\n                  class=\"underline\"\n                  title=\"Models page\"\n                  on:click={openModelsPage}>models page</a\n                >.\n              </div>\n            </div>\n          {/if}\n        </div>\n        {#if errorMsg !== undefined}\n          <ErrorMessage error={errorMsg} />\n        {/if}\n        <footer>\n          <div class=\"w-full flex flex-col\">\n            <Button\n              title=\"Create playground\"\n              inProgress={submitted}\n              on:click={submit}\n              disabled={!model}\n              icon={faPlusCircle}>\n              Create playground\n            </Button>\n          </div>\n        </footer>\n      </div>\n    </div>\n  {/snippet}\n</FormPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/Playgrounds.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { vi, test, expect } from 'vitest';\nimport { screen, render, within } from '@testing-library/svelte';\nimport Playgrounds from '/@/pages/Playgrounds.svelte';\nimport * as catalogStore from '/@/stores/catalog';\nimport { readable } from 'svelte/store';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\n\nconst mocks = vi.hoisted(() => {\n  return {\n    conversationSubscribeMock: vi.fn(),\n    conversationsQueriesMock: {\n      subscribe: (f: (msg: unknown) => void) => {\n        f(mocks.conversationSubscribeMock());\n        return (): void => {};\n      },\n    },\n  };\n});\n\nvi.mock('/@/utils/client', async () => {\n  return {\n    studioClient: {\n      getCatalog: vi.fn(),\n    },\n    rpcBrowser: {\n      subscribe: (): unknown => {\n        return {\n          unsubscribe: (): void => {},\n        };\n      },\n    },\n  };\n});\n\nvi.mock('../stores/conversations', async () => {\n  return {\n    conversations: mocks.conversationsQueriesMock,\n  };\n});\n\nvi.mock('/@/stores/catalog', async () => {\n  return {\n    catalog: vi.fn(),\n  };\n});\n\nconst initialCatalog: ApplicationCatalog = {\n  categories: [],\n  models: [\n    {\n      id: 'model1',\n      name: 'Model 1',\n      description: 'Readme for model 1',\n      registry: 'Hugging Face',\n      license: '?',\n      url: 'https://model1.example.com',\n      memory: 1000,\n    },\n  ],\n  recipes: [],\n};\n\ntest('should display There is no playground yet', async () => {\n  mocks.conversationSubscribeMock.mockResolvedValue([]);\n  render(Playgrounds);\n\n  const title = screen.getByText('No Playground Environment');\n  expect(title).toBeDefined();\n});\n\ntest('should display one model', async () => {\n  mocks.conversationSubscribeMock.mockReturnValue([\n    {\n      id: 'playground-0',\n      name: 'Playground 0',\n      modelId: 'model1',\n      messages: [],\n    },\n  ]);\n  vi.mocked(catalogStore).catalog = readable(initialCatalog);\n\n  render(Playgrounds);\n\n  const table = screen.getByRole('table');\n  expect(table).toBeDefined();\n  screen.debug(table, 4096);\n\n  const rows = screen.queryAllByRole('row');\n  expect(rows.length > 0).toBeTruthy();\n\n  const icon = await within(rows[1]).findByRole('img');\n  expect(icon).toBeDefined();\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/Playgrounds.svelte",
    "content": "<script lang=\"ts\">\nimport { router } from 'tinro';\nimport PlaygroundColumnModel from '../lib/table/playground/PlaygroundColumnModel.svelte';\nimport PlaygroundColumnName from '../lib/table/playground/PlaygroundColumnName.svelte';\nimport ConversationColumnAction from '/@/lib/table/playground/ConversationColumnAction.svelte';\nimport { conversations, type ConversationWithBackend } from '/@/stores/conversations';\nimport PlaygroundColumnIcon from '/@/lib/table/playground/PlaygroundColumnIcon.svelte';\nimport { Button, EmptyScreen, Table, TableColumn, TableRow, NavPage } from '@podman-desktop/ui-svelte';\nimport { faMessage, faPlusCircle } from '@fortawesome/free-solid-svg-icons';\nimport PlaygroundColumnRuntime from '../lib/table/playground/PlaygroundColumnRuntime.svelte';\n\nconst columns = [\n  new TableColumn<unknown>('', { width: '40px', renderer: PlaygroundColumnIcon }),\n  new TableColumn<ConversationWithBackend>('Name', { width: '1fr', renderer: PlaygroundColumnName }),\n  new TableColumn<ConversationWithBackend>('Model', { width: '1fr', renderer: PlaygroundColumnModel }),\n  new TableColumn<ConversationWithBackend>('Runtime', { width: '90px', renderer: PlaygroundColumnRuntime }),\n  new TableColumn<ConversationWithBackend>('Actions', {\n    width: '80px',\n    renderer: ConversationColumnAction,\n    align: 'right',\n  }),\n];\nconst row = new TableRow<ConversationWithBackend>({});\n\nfunction createNewPlayground(): void {\n  router.goto('/playground/create');\n}\n</script>\n\n<NavPage title=\"Playground Environments\" searchEnabled={false}>\n  {#snippet additionalActions()}\n    <Button icon={faPlusCircle} on:click={createNewPlayground}>New Playground</Button>\n  {/snippet}\n\n  {#snippet content()}\n    <div class=\"flex min-w-full\">\n      {#if $conversations.length > 0}\n        <Table kind=\"playground\" data={$conversations} columns={columns} row={row}></Table>\n      {:else}\n        <EmptyScreen\n          icon={faMessage}\n          title=\"No Playground Environment\"\n          message=\"Playground environments allow for experimenting with available models in a local environment. An intuitive user prompt helps in exploring the capabilities and accuracy of various models and aids in finding the best model for the use case at hand.\">\n          <div class=\"flex gap-2 justify-center\">\n            <Button type=\"link\" on:click={createNewPlayground}>Create playground</Button>\n          </div>\n        </EmptyScreen>\n      {/if}\n    </div>\n  {/snippet}\n</NavPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/Preferences.svelte",
    "content": "<script lang=\"ts\">\n</script>\n\n<div>Preferences</div>\n"
  },
  {
    "path": "packages/frontend/src/pages/Recipe.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { vi, test, expect, beforeEach } from 'vitest';\nimport { screen, render } from '@testing-library/svelte';\nimport Recipe from './Recipe.svelte';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\nimport * as catalogStore from '/@/stores/catalog';\nimport { readable, writable } from 'svelte/store';\n\nconst mocks = vi.hoisted(() => {\n  return {\n    getCatalogMock: vi.fn(),\n    getPullingStatusesMock: vi.fn(),\n    pullApplicationMock: vi.fn(),\n    telemetryLogUsageMock: vi.fn(),\n    getApplicationsStateMock: vi.fn(),\n    getLocalRepositoriesMock: vi.fn(),\n    getTasksMock: vi.fn(),\n    getModelsInfo: vi.fn(),\n  };\n});\n\nvi.mock('../stores/tasks', () => ({\n  tasks: {\n    subscribe: (f: (msg: unknown) => void) => {\n      f(mocks.getTasksMock());\n      return (): void => {};\n    },\n  },\n}));\n\nvi.mock('../stores/localRepositories', () => ({\n  localRepositories: {\n    subscribe: (f: (msg: unknown) => void) => {\n      f(mocks.getLocalRepositoriesMock());\n      return (): void => {};\n    },\n  },\n}));\n\nvi.mock('../utils/client', async () => {\n  return {\n    studioClient: {\n      getCatalog: mocks.getCatalogMock,\n      getPullingStatuses: mocks.getPullingStatusesMock,\n      pullApplication: mocks.pullApplicationMock,\n      telemetryLogUsage: mocks.telemetryLogUsageMock,\n      getApplicationsState: mocks.getApplicationsStateMock,\n      getModelsInfo: mocks.getModelsInfo,\n    },\n    rpcBrowser: {\n      subscribe: (): unknown => {\n        return {\n          unsubscribe: (): void => {},\n        };\n      },\n    },\n  };\n});\n\nvi.mock('/@/stores/catalog', async () => {\n  return {\n    catalog: vi.fn(),\n  };\n});\n\nconst initialCatalog: ApplicationCatalog = {\n  categories: [],\n  models: [\n    {\n      id: 'model1',\n      name: 'Model 1',\n      description: 'Readme for model 1',\n      registry: 'Hugging Face',\n      license: '?',\n      url: 'https://model1.example.com',\n      memory: 1000,\n    },\n    {\n      id: 'model2',\n      name: 'Model 2',\n      description: 'Readme for model 2',\n      registry: 'Civital',\n      license: '?',\n      url: '',\n      memory: 1000,\n    },\n  ],\n  recipes: [\n    {\n      id: 'recipe 1',\n      name: 'Recipe 1',\n      readme: 'readme 1',\n      categories: [],\n      recommended: ['model1', 'model2'],\n      description: 'description 1',\n      repository: 'repo 1',\n    },\n    {\n      id: 'recipe 2',\n      name: 'Recipe 2',\n      readme: 'readme 2',\n      categories: [],\n      description: 'description 2',\n      repository: 'repo 2',\n    },\n  ],\n};\n\nconst updatedCatalog: ApplicationCatalog = {\n  categories: [],\n  models: [\n    {\n      id: 'model1',\n      name: 'Model 1',\n      description: 'Readme for model 1',\n      registry: 'Hugging Face',\n      license: '?',\n      url: 'https://model1.example.com',\n      memory: 1000,\n    },\n    {\n      id: 'model2',\n      name: 'Model 2',\n      description: 'Readme for model 2',\n      registry: 'Civital',\n      license: '?',\n      url: '',\n      memory: 1000,\n    },\n  ],\n  recipes: [\n    {\n      id: 'recipe 1',\n      name: 'New Recipe Name',\n      readme: 'readme 1',\n      categories: [],\n      recommended: ['model1', 'model2'],\n      description: 'description 1',\n      repository: 'repo 1',\n    },\n    {\n      id: 'recipe 2',\n      name: 'Recipe 2',\n      readme: 'readme 2',\n      categories: [],\n      description: 'description 2',\n      repository: 'repo 2',\n    },\n  ],\n};\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  mocks.getLocalRepositoriesMock.mockReturnValue([]);\n  mocks.getTasksMock.mockReturnValue([]);\n  mocks.telemetryLogUsageMock.mockReturnValue(Promise.resolve());\n});\n\ntest('should display recipe information', async () => {\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  mocks.getApplicationsStateMock.mockResolvedValue([]);\n  mocks.getPullingStatusesMock.mockResolvedValue([]);\n  mocks.getModelsInfo.mockResolvedValue([]);\n  render(Recipe, {\n    recipeId: 'recipe 1',\n  });\n\n  expect(screen.queryAllByText('Recipe 1').length).greaterThan(0);\n  expect(screen.queryAllByText('readme 1').length).greaterThan(0);\n});\n\ntest('should display updated recipe information', async () => {\n  mocks.getApplicationsStateMock.mockResolvedValue([]);\n  const customCatalog = writable<ApplicationCatalog>(initialCatalog);\n  vi.mocked(catalogStore).catalog = customCatalog;\n  mocks.getPullingStatusesMock.mockResolvedValue([]);\n  mocks.getModelsInfo.mockResolvedValue([]);\n  render(Recipe, {\n    recipeId: 'recipe 1',\n  });\n\n  expect(screen.queryAllByText('Recipe 1').length).greaterThan(0);\n  expect(screen.queryAllByText('readme 1').length).greaterThan(0);\n\n  customCatalog.set(updatedCatalog);\n  await new Promise(resolve => setTimeout(resolve, 10));\n  expect(screen.queryAllByText('New Recipe Name').length).greaterThan(0);\n});\n\ntest('should send telemetry data', async () => {\n  mocks.getApplicationsStateMock.mockResolvedValue([]);\n  vi.mocked(catalogStore).catalog = readable<ApplicationCatalog>(initialCatalog);\n  mocks.getPullingStatusesMock.mockResolvedValue([]);\n  mocks.pullApplicationMock.mockResolvedValue(undefined);\n  mocks.getModelsInfo.mockResolvedValue([]);\n  render(Recipe, {\n    recipeId: 'recipe 1',\n  });\n  await new Promise(resolve => setTimeout(resolve, 200));\n\n  expect(mocks.telemetryLogUsageMock).toHaveBeenNthCalledWith(1, 'recipe.open', {\n    'recipe.id': 'recipe 1',\n    'recipe.name': 'Recipe 1',\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/Recipe.svelte",
    "content": "<script lang=\"ts\">\nimport { studioClient } from '/@/utils/client';\nimport { DetailsPage, Tab, Button, EmptyScreen } from '@podman-desktop/ui-svelte';\nimport Card from '/@/lib/Card.svelte';\nimport MarkdownRenderer from '/@/lib/markdown/MarkdownRenderer.svelte';\nimport { getIcon } from '/@/utils/categoriesUtils';\nimport { catalog } from '/@/stores/catalog';\nimport RecipeDetails from '/@/lib/RecipeDetails.svelte';\nimport ContentDetailsLayout from '../lib/ContentDetailsLayout.svelte';\nimport { router } from 'tinro';\nimport { faRocket } from '@fortawesome/free-solid-svg-icons';\nimport Fa from 'svelte-fa';\nimport Route from '/@/Route.svelte';\nimport ApplicationTable from '/@/lib/table/application/ApplicationTable.svelte';\nimport TasksBanner from '/@/lib/progress/TasksBanner.svelte';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\n\nexport let recipeId: string;\n\n// The recipe model provided\n$: recipe = $catalog.recipes.find(r => r.id === recipeId);\n$: categories = $catalog.categories;\n\n// Send recipe info to telemetry\nlet recipeTelemetry: string | undefined = undefined;\n$: if (recipe && recipe.id !== recipeTelemetry) {\n  recipeTelemetry = recipe.id;\n  studioClient\n    .telemetryLogUsage('recipe.open', { 'recipe.id': recipe.id, 'recipe.name': recipe.name })\n    .catch(err => console.error(`Error reporting telemetry:`, err));\n}\n\nexport function goToUpPage(): void {\n  router.goto('/recipes');\n}\n\nfunction handleOnClick(): void {\n  router.goto(`/recipe/${recipeId}/start`);\n}\n\nfunction getFilter(items: ApplicationState[]): ApplicationState[] {\n  return items.filter(item => item.recipeId === recipeId);\n}\n</script>\n\n<DetailsPage\n  title={recipe?.name ?? ''}\n  breadcrumbLeftPart=\"Recipes\"\n  breadcrumbRightPart={recipe?.name ?? ''}\n  onclose={goToUpPage}\n  onbreadcrumbClick={goToUpPage}>\n  {#snippet iconSnippet()}\n    <div class=\"rounded-full w-8 h-8 flex items-center justify-center\">\n      <Fa size=\"1.125x\" class=\"text-[var(--pd-content-header-icon)]\" icon={getIcon(recipe?.icon)} />\n    </div>\n  {/snippet}\n\n  {#snippet tabsSnippet()}\n    <Tab title=\"Summary\" url=\"/recipe/{recipeId}\" selected={$router.path === `/recipe/${recipeId}`} />\n    <Tab title=\"Running\" url=\"/recipe/{recipeId}/running\" selected={$router.path === `/recipe/${recipeId}/running`} />\n  {/snippet}\n\n  {#snippet actionsSnippet()}\n    <Button on:click={handleOnClick} icon={faRocket} aria-label=\"Start recipe\">Start</Button>\n  {/snippet}\n\n  {#snippet contentSnippet()}\n    <div class=\"bg-[var(--pd-content-bg)] h-full overflow-y-auto\">\n      <Route path=\"/\">\n        <ContentDetailsLayout\n          detailsTitle=\"AI App Details\"\n          detailsLabel=\"application details\"\n          detailsSummary=\"AI App Details: view the app repository\">\n          <svelte:fragment slot=\"content\">\n            <MarkdownRenderer source={recipe?.readme} />\n          </svelte:fragment>\n          <svelte:fragment slot=\"details\">\n            <RecipeDetails recipeId={recipeId} />\n          </svelte:fragment>\n        </ContentDetailsLayout>\n      </Route>\n      <Route path=\"/running\">\n        <TasksBanner title=\"Pulling recipes\" labels={{ 'recipe-pulling': recipeId }} />\n        <div class=\"flex w-full h-full\">\n          <ApplicationTable filter={getFilter}>\n            <svelte:fragment slot=\"empty-screen\">\n              <EmptyScreen icon={faRocket} title=\"No application running\" message=\"There is no AI App running\" />\n            </svelte:fragment>\n          </ApplicationTable>\n        </div>\n      </Route>\n    </div>\n  {/snippet}\n  {#snippet subtitleSnippet()}\n    <div class=\"mt-2\">\n      {#each recipe?.categories ?? [] as categoryId (categoryId)}\n        <Card\n          title={categories.find(category => category.id === categoryId)?.name ?? '?'}\n          classes=\"bg-[var(--pd-label-bg)] p-1 text-xs w-fit\" />\n      {/each}\n    </div>\n  {/snippet}\n</DetailsPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/Recipes.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { fireEvent, render, screen } from '@testing-library/svelte';\nimport { beforeAll, beforeEach, expect, test, vi } from 'vitest';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\nimport * as catalogStore from '/@/stores/catalog';\nimport { readable } from 'svelte/store';\nimport Recipes from '/@/pages/Recipes.svelte';\nimport { studioClient } from '../utils/client';\n\nvi.mock('/@/stores/catalog', async () => {\n  return {\n    catalog: vi.fn(),\n  };\n});\n\nvi.mock('../utils/client', async () => ({\n  studioClient: {\n    filterRecipes: vi.fn(),\n    getExtensionConfiguration: vi.fn().mockResolvedValue({}),\n  },\n  rpcBrowser: {\n    subscribe: (): unknown => {\n      return {\n        unsubscribe: (): void => {},\n      };\n    },\n  },\n}));\n\nvi.mock('../stores/localRepositories', () => ({\n  localRepositories: {\n    subscribe: (f: (msg: unknown) => void) => {\n      f([]);\n      return (): void => {};\n    },\n  },\n}));\n\nconst recipes = [\n  {\n    id: 'recipe1',\n    name: 'Recipe 1',\n    recommended: ['model1'],\n    categories: [],\n    description: 'Recipe 1',\n    readme: '',\n    repository: 'https://recipe-1',\n  },\n  {\n    id: 'recipe2',\n    name: 'Recipe 2',\n    recommended: ['model2'],\n    categories: ['dummy-category'],\n    description: 'Recipe 2',\n    readme: '',\n    repository: 'https://recipe-2',\n  },\n];\n\nconst catalog: ApplicationCatalog = {\n  recipes: recipes,\n  models: [],\n  categories: [\n    {\n      id: 'dummy-category',\n      name: 'Dummy category',\n    },\n  ],\n};\n\nclass ResizeObserver {\n  observe = vi.fn();\n  disconnect = vi.fn();\n  unobserve = vi.fn();\n}\n\nbeforeAll(() => {\n  Object.defineProperty(window, 'ResizeObserver', { value: ResizeObserver });\n});\n\nvi.mock('/@/lib/RecipeCardTags', () => ({\n  getBGColor: vi.fn((_: string) => 'bg-purple-200'),\n  getTextColor: vi.fn((_: string) => 'text-purple-200'),\n  FRAMEWORKS: ['langchain', 'vectordb'],\n  TOOLS: ['whisper-cpp'],\n}));\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  vi.mocked(catalogStore).catalog = readable(catalog);\n  vi.mocked(studioClient).filterRecipes.mockResolvedValue({\n    result: recipes,\n    filters: {},\n    choices: {},\n  });\n  vi.mocked(studioClient.getExtensionConfiguration).mockResolvedValue({\n    experimentalGPU: false,\n    apiPort: 0,\n    experimentalTuning: false,\n    modelsPath: '',\n    inferenceRuntime: 'llama-cpp',\n    modelUploadDisabled: false,\n    showGPUPromotion: false,\n    appearance: 'dark',\n  });\n});\n\ntest('recipe without category should be visible', async () => {\n  render(Recipes);\n\n  await vi.waitFor(() => {\n    const text = screen.getAllByText('Recipe 1');\n    expect(text.length).toBeGreaterThan(0);\n  });\n});\n\ntest('recipe with category should be visible', async () => {\n  render(Recipes);\n\n  await vi.waitFor(() => {\n    const text = screen.getAllByText('Recipe 2');\n    expect(text.length).toBeGreaterThan(0);\n  });\n});\n\ntest('filters returned in choices + all are displayed', async () => {\n  vi.mocked(studioClient).filterRecipes.mockResolvedValue({\n    result: recipes,\n    filters: {},\n    choices: {\n      tools: [\n        { name: 'tool1', count: 1 },\n        { name: 'tool2', count: 2 },\n      ],\n      languages: [\n        { name: 'lang1', count: 3 },\n        { name: 'lang2', count: 4 },\n      ],\n      frameworks: [\n        { name: 'fw1', count: 5 },\n        { name: 'fw2', count: 6 },\n      ],\n    },\n  });\n\n  render(Recipes);\n\n  await vi.waitFor(() => {\n    const text = screen.getAllByText('Recipe 1');\n    expect(text.length).toBeGreaterThan(0);\n  });\n\n  const tests = [\n    { category: 'Tools', choices: ['all', 'tool1 (1)', 'tool2 (2)'] },\n    { category: 'Frameworks', choices: ['all', 'fw1 (5)', 'fw2 (6)'] },\n    { category: 'Languages', choices: ['all', 'lang1 (3)', 'lang2 (4)'] },\n  ];\n\n  for (const test of tests) {\n    const dropdownLabel = screen.getByLabelText(test.category);\n    expect(dropdownLabel).toBeInTheDocument();\n    await fireEvent.click(dropdownLabel);\n\n    await vi.waitFor(() => {\n      for (const choice of test.choices) {\n        const text = screen.getAllByText(choice);\n        expect(text.length).toBeGreaterThan(0);\n      }\n    });\n  }\n});\n\ntest('filterRecipes is called with selected filters', async () => {\n  vi.mocked(studioClient).filterRecipes.mockResolvedValue({\n    result: recipes,\n    filters: {},\n    choices: {\n      tools: [\n        { name: 'tool1', count: 1 },\n        { name: 'tool2', count: 2 },\n      ],\n      languages: [\n        { name: 'lang1', count: 3 },\n        { name: 'lang2', count: 4 },\n      ],\n      frameworks: [\n        { name: 'fw1', count: 5 },\n        { name: 'fw2', count: 6 },\n      ],\n    },\n  });\n\n  render(Recipes);\n\n  await vi.waitFor(() => {\n    const text = screen.getAllByText('Recipe 1');\n    expect(text.length).toBeGreaterThan(0);\n  });\n\n  const selectedFilters = [\n    { category: 'Tools', filter: 'tool1 (1)' },\n    { category: 'Languages', filter: 'lang2 (4)' },\n  ];\n\n  for (const selectedFilter of selectedFilters) {\n    const dropdownLabel = screen.getByLabelText(selectedFilter.category);\n    expect(dropdownLabel).toBeInTheDocument();\n    await fireEvent.click(dropdownLabel);\n\n    await vi.waitFor(async () => {\n      const text = screen.getByText(selectedFilter.filter);\n      expect(text).toBeInTheDocument();\n      await fireEvent.click(text);\n    });\n  }\n\n  expect(studioClient.filterRecipes).toHaveBeenCalledWith({\n    tools: ['tool1'],\n    languages: ['lang2'],\n  });\n});\n\ntest('Browse Recipe Repository button is present', async () => {\n  render(Recipes);\n\n  await vi.waitFor(() => {\n    const button = screen.getByTitle('https://github.com/containers/ai-lab-recipes/blob/main/CONTRIBUTING.md');\n    expect(button).toBeInTheDocument();\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/Recipes.svelte",
    "content": "<script lang=\"ts\">\nimport RecipesCard from '/@/lib/RecipesCard.svelte';\nimport { catalog } from '/@/stores/catalog';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport type { Category } from '@shared/models/ICategory';\nimport { Button, Dropdown, NavPage } from '@podman-desktop/ui-svelte';\nimport { Fa } from 'svelte-fa';\nimport { faGithub } from '@fortawesome/free-brands-svg-icons'; // Import the GitHub icon\nimport { studioClient } from '../utils/client';\nimport type { CatalogFilterKey, Choice, RecipeChoices, RecipeFilters } from '@shared/models/FilterRecipesResult';\nimport { onMount } from 'svelte';\nimport { configuration } from '../stores/extensionConfiguration';\nimport { SvelteMap } from 'svelte/reactivity';\n\n// filters available in the dropdowns for the user to select\nlet choices: RecipeChoices = $state({});\n\n// filters selected by the user\nlet filters = $state<RecipeFilters>({});\n\n// the filtered recipes\nlet recipes: Recipe[] = $state([]);\n\n// categoryDict is the list of categories in the catalog, derived from the catalog store\nlet categoryDict: { [k: string]: Category } = $derived(\n  Object.fromEntries($catalog.categories.map(category => [category.id, category])),\n);\n\n// call filterRecipes every time the filters change\n// and update the recipes and choices states with the result\n$effect(() => {\n  (async (): Promise<void> => {\n    if (!filters) {\n      return;\n    }\n    const snapshotFilters = $state.snapshot(filters);\n    const result = await studioClient.filterRecipes(snapshotFilters);\n    recipes = result.result;\n    choices = result.choices;\n  })().catch((err: unknown) => {\n    console.error('unable to filter recipes with filters', filters, err);\n  });\n});\n\nconst UNCLASSIFIED: Category = {\n  id: 'unclassified',\n  name: 'Unclassified',\n};\n\n// compute the recipes by category\n// - when categoryDict is initially set\n// - every time `recipes` state changes\nlet groups: Map<Category, Recipe[]> = $derived.by(() => {\n  if (!Object.keys(categoryDict).length) {\n    return new Map();\n  }\n  const output: Map<Category, Recipe[]> = new SvelteMap();\n  for (const recipe of recipes) {\n    if (recipe.categories.length === 0) {\n      output.set(UNCLASSIFIED, [...(output.get(UNCLASSIFIED) ?? []), recipe]);\n      continue;\n    }\n    // iterate over all categories\n    for (const categoryId of recipe.categories) {\n      let key: Category;\n      if (categoryId in categoryDict) {\n        key = categoryDict[categoryId];\n      } else {\n        key = UNCLASSIFIED;\n      }\n\n      output.set(key, [...(output.get(key) ?? []), recipe]);\n    }\n  }\n  return output;\n});\n\nfunction onFilterChange(filter: CatalogFilterKey, v: unknown): void {\n  if (typeof v === 'string') {\n    if (v.length) {\n      filters[filter] = [v];\n    } else {\n      delete filters[filter];\n    }\n  }\n}\n\n// convert a list of choices provided by the backend to a list of options acceptable by the Dropdown component, adding an empty choice\nfunction choicesToOptions(choices: Choice[] | undefined): { label: string; value: string }[] {\n  return [\n    { label: 'all', value: '' },\n    ...(choices?.map(l => ({ value: l.name, label: `${l.name} (${l.count})` })) ?? []),\n  ];\n}\n\n// add more filters here when the backend supports them\nconst filtersComponents: { label: string; key: CatalogFilterKey }[] = [\n  { label: 'Tools', key: 'tools' },\n  { label: 'Frameworks', key: 'frameworks' },\n  { label: 'Languages', key: 'languages' },\n];\n\nfunction openContribution(): void {\n  studioClient.openURL('https://github.com/containers/ai-lab-recipes/blob/main/CONTRIBUTING.md').catch(console.error);\n}\n\nlet defaultRuntime: string | undefined = $state();\n\nonMount(() => {\n  const inferenceRuntime = $configuration?.inferenceRuntime;\n  if (inferenceRuntime) defaultRuntime = inferenceRuntime;\n  if (inferenceRuntime !== 'all') onFilterChange('tools', defaultRuntime ?? '');\n});\n</script>\n\n<NavPage title=\"Recipe Catalog\" searchEnabled={false}>\n  {#snippet content()}\n    <div class=\"flex flex-col min-w-full min-h-full\">\n      <div class=\"min-w-full min-h-full flex-1\">\n        <div class=\"px-5 space-y-5\">\n          <!-- Add the summary here -->\n          <div class=\"text-sm text-[var(--pd-modal-text)] space-y-3\">\n            <p>\n              Recipes help you explore and get started with a number of core AI use cases like chatbots, code\n              generators, text summarizers, agents, and more. Each recipe comes with detailed explanations and runnable\n              source code compatible with various large language models (LLMs).\n            </p>\n            <p>\n              Recipes are organized into categories:\n              <span class=\"text-[var(--pd-link)]\">audio, computer vision, multimodal, natural language processing</span\n              >.\n            </p>\n            <p>Want to contribute more AI applications? The catalog is open source and available on GitHub!</p>\n            <Button\n              title=\"https://github.com/containers/ai-lab-recipes/blob/main/CONTRIBUTING.md\"\n              on:click={openContribution}>\n              <div class=\"flex items-center space-x-2\">\n                <Fa icon={faGithub} />\n                <span>Browse Recipe Repository</span>\n              </div>\n            </Button>\n          </div>\n          <div class=\"flex flex-row space-x-2 text-[var(--pd-modal-text)]\">\n            {#each filtersComponents as filterComponent (filterComponent.key)}\n              <div class=\"w-full\">\n                <label for={filterComponent.key} class=\"block mb-2 text-sm font-medium\">{filterComponent.label}</label>\n                <Dropdown\n                  id={filterComponent.key}\n                  value={filterComponent.key === 'tools' ? defaultRuntime : ''}\n                  options={choicesToOptions(choices[filterComponent.key])}\n                  onChange={(v): void => onFilterChange(filterComponent.key, v)}></Dropdown>\n              </div>\n            {/each}\n          </div>\n          {#if groups}\n            {#each groups.entries() as [category, recipes] (category.id)}\n              <RecipesCard category={category} recipes={recipes} />\n            {/each}\n          {/if}\n        </div>\n      </div>\n    </div>\n  {/snippet}\n</NavPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/StartRecipe.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { vi, beforeEach, test, expect } from 'vitest';\nimport { studioClient } from '/@/utils/client';\nimport { render, screen, fireEvent, within } from '@testing-library/svelte';\nimport StartRecipe from '/@/pages/StartRecipe.svelte';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport { InferenceType } from '@shared/models/IInference';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { Task } from '@shared/models/ITask';\nimport { router } from 'tinro';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { VMType } from '@shared/models/IPodman';\nimport * as LocalRepositoryStore from '/@/stores/localRepositories';\nimport * as ConnectionStore from '/@/stores/containerProviderConnections';\nimport * as ModelsInfoStore from '/@/stores/modelsInfo';\nimport * as TaskStore from '/@/stores/tasks';\nimport * as CatalogStore from '/@/stores/catalog';\nimport { readable, writable } from 'svelte/store';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\n\n// Mock LocalRepository store\nvi.mock('/@/stores/localRepositories');\nvi.mock('/@/stores/containerProviderConnections');\nvi.mock('/@/stores/modelsInfo');\nvi.mock('/@/stores/tasks');\nvi.mock('/@/stores/catalog');\nvi.mock('/@/stores/extensionConfiguration');\n\nvi.mock('tinro', () => ({\n  router: {\n    goto: vi.fn(),\n    location: {\n      query: new Map(),\n    },\n  },\n}));\n\nvi.mock('../utils/client', async () => ({\n  studioClient: {\n    checkContainerConnectionStatusAndResources: vi.fn(),\n    requestPullApplication: vi.fn(),\n  },\n}));\n\nconst fakeRecipe: Recipe = {\n  id: 'dummy-recipe-id',\n  backend: InferenceType.LLAMA_CPP,\n  name: 'Dummy Recipe',\n  description: 'Dummy description',\n  recommended: ['dummy-model-1'],\n  categories: [],\n} as unknown as Recipe;\n\nconst fakeLlamaStackRecipe: Recipe = {\n  id: 'dummy-llama-stack-recipe-id',\n  backend: 'llama-stack',\n  name: 'Dummy Llama Stack Recipe',\n  description: 'Dummy description',\n  categories: [],\n} as unknown as Recipe;\n\nconst fakeRecommendedModel: ModelInfo = {\n  id: 'dummy-model-1',\n  backend: InferenceType.LLAMA_CPP,\n  name: 'Dummy Model 1',\n  file: {\n    file: 'dummy-model-file',\n    path: 'dummy-model-path',\n  },\n} as unknown as ModelInfo;\n\nconst fakeRemoteModel: ModelInfo = {\n  id: 'dummy-model-2',\n  backend: InferenceType.LLAMA_CPP,\n  name: 'Dummy Model 2',\n} as unknown as ModelInfo;\n\nconst containerProviderConnection: ContainerProviderConnectionInfo = {\n  name: 'Dummy connainter connection provider',\n  status: 'started',\n  type: 'podman',\n  vmType: VMType.QEMU,\n  providerId: 'podman',\n};\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n\n  // reset all query between tests\n  router.location.query.clear();\n\n  vi.mocked(CatalogStore).catalog = readable<ApplicationCatalog>({\n    recipes: [fakeRecipe, fakeLlamaStackRecipe],\n    models: [],\n    categories: [],\n    version: '',\n  });\n  vi.mocked(ConnectionStore).containerProviderConnections = readable([containerProviderConnection]);\n  vi.mocked(ModelsInfoStore).modelsInfo = readable([fakeRecommendedModel, fakeRemoteModel]);\n  vi.mocked(LocalRepositoryStore).localRepositories = readable([]);\n  vi.mocked(TaskStore).tasks = readable([]);\n\n  vi.mocked(studioClient.requestPullApplication).mockResolvedValue('fake-tracking-id');\n\n  // mock scrollIntoView\n  window.HTMLElement.prototype.scrollIntoView = vi.fn();\n});\n\ntest('Recipe name should be visible', async () => {\n  render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  const span = screen.getByLabelText('Recipe name');\n  expect(span).toBeDefined();\n  expect(span.textContent).toBe(fakeRecipe.name);\n});\n\ntest('Recipe Local Repository should be visible when defined', async () => {\n  // mock recipe local repository\n  vi.mocked(LocalRepositoryStore).localRepositories = readable([\n    {\n      path: 'dummy-recipe-path',\n      sourcePath: 'dummy-recipe-path',\n      labels: {\n        'recipe-id': fakeRecipe.id,\n      },\n    },\n  ]);\n\n  render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  const span = screen.getByLabelText('Recipe local path');\n  expect(span).toBeDefined();\n  expect(span.textContent).toBe('dummy-recipe-path');\n});\n\ntest('Submit button should be disabled when model is required and no model is selected', async () => {\n  vi.mocked(ModelsInfoStore).modelsInfo = readable([]);\n\n  render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  const button = screen.getByTitle(`Start ${fakeRecipe.name} recipe`);\n  expect(button).toBeDefined();\n  expect(button).toBeDisabled();\n});\n\ntest('Submit button should be enabled when model is not required', async () => {\n  vi.mocked(ModelsInfoStore).modelsInfo = readable([]);\n\n  render(StartRecipe, {\n    recipeId: 'dummy-llama-stack-recipe-id',\n  });\n\n  const button = screen.getByTitle(`Start ${fakeLlamaStackRecipe.name} recipe`);\n  expect(button).toBeDefined();\n  expect(button).toBeEnabled();\n});\n\ntest('First recommended model should be selected as default model', async () => {\n  const { container } = render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  await vi.waitFor(() => {\n    const option = getSelectedOption<{ value: string }>(container);\n    expect(option?.value).toBe(fakeRecommendedModel.id);\n  });\n});\n\n/**\n * Return the selected value\n * @param container\n */\nfunction getSelectedOption<T>(container: HTMLElement): T | undefined {\n  const input = container.querySelector('input[name=\"select-model\"][type=\"hidden\"]');\n  if (!input) throw new Error('input not found');\n  // eslint-disable-next-line sonarjs/different-types-comparison\n  if ((input as HTMLInputElement).value === undefined) return undefined;\n  return JSON.parse((input as HTMLInputElement).value);\n}\n\n/**\n * Utility method to select an option in the svelte-select component\n * @param container\n * @param label\n */\nasync function selectOption(container: HTMLElement, label: string): Promise<void> {\n  // first get the select input\n  const input = screen.getByLabelText('Select Model');\n  await fireEvent.pointerUp(input); // they are using the pointer up event instead of click.\n\n  // get all options available\n  const items = container.querySelectorAll('div[class~=\"list-item\"]');\n  // ensure we have two options\n  expect(items.length).toBeGreaterThan(0);\n\n  // get the option we are interested in (remote model here)\n  const remoteModelOption = Array.from(items).find(item => item.querySelector('span')?.textContent === label);\n  if (!remoteModelOption) throw new Error('missing options in select');\n\n  // click on it\n  await fireEvent.click(remoteModelOption);\n\n  return await vi.waitFor(() => {\n    const value = getSelectedOption<{ label: string }>(container);\n    expect(value?.label).toBe(label);\n  });\n}\n\ntest('Selecting model not downloaded should display a warning', async () => {\n  const { container } = render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  await selectOption(container, fakeRemoteModel.name);\n\n  // Ensure the selected value is the one we choose\n  await vi.waitFor(() => {\n    const span = screen.getByRole('alert');\n    expect(span).toBeDefined();\n    expect(span.textContent).toBe(\n      'The selected model will be downloaded. This action can take some time depending on your connection',\n    );\n  });\n});\n\ntest('Selecting model downloaded should not display a warning', async () => {\n  const { container } = render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  await selectOption(container, fakeRecommendedModel.name);\n  const span = screen.queryByRole('alert');\n  expect(span).toBeNull();\n});\n\ntest('Selecting model should enable submit button', async () => {\n  const { container } = render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  await selectOption(container, fakeRecommendedModel.name);\n\n  const button = screen.getByTitle(`Start ${fakeRecipe.name} recipe`);\n  expect(button).toBeDefined();\n  expect(button).not.toBeDisabled();\n});\n\ntest('Submit button should call requestPullApplication with proper arguments', async () => {\n  const { container } = render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  await selectOption(container, fakeRecommendedModel.name);\n\n  const button = screen.getByTitle(`Start ${fakeRecipe.name} recipe`);\n  expect(button).toBeEnabled();\n  await fireEvent.click(button);\n\n  await vi.waitFor(() => {\n    expect(studioClient.requestPullApplication).toHaveBeenCalledWith({\n      connection: containerProviderConnection,\n      recipeId: fakeRecipe.id,\n      modelId: fakeRecommendedModel.id,\n      dependencies: {\n        llamaStack: false,\n      },\n    });\n  });\n});\n\ntest('Submit button should call requestPullApplication with proper arguments for llama-stack recipe', async () => {\n  render(StartRecipe, {\n    recipeId: 'dummy-llama-stack-recipe-id',\n  });\n\n  const button = screen.getByTitle(`Start ${fakeLlamaStackRecipe.name} recipe`);\n  expect(button).toBeEnabled();\n  await fireEvent.click(button);\n\n  await vi.waitFor(() => {\n    expect(studioClient.requestPullApplication).toHaveBeenCalledWith({\n      connection: containerProviderConnection,\n      recipeId: fakeLlamaStackRecipe.id,\n      dependencies: {\n        llamaStack: true,\n      },\n    });\n  });\n});\n\ntest('Submit button should call requestPullApplication with proper arguments', async () => {\n  // mock no container connection available\n  vi.mocked(ConnectionStore).containerProviderConnections = readable([]);\n\n  const { container, getByTitle } = render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  await selectOption(container, fakeRecommendedModel.name);\n\n  const button = getByTitle(`Start ${fakeRecipe.name} recipe`);\n  expect(button).toBeDisabled();\n});\n\ntest('Loading task should make the submit button disabled', async () => {\n  vi.mocked(TaskStore).tasks = readable([\n    {\n      id: 'dummy-task-id',\n      name: 'Dummy task',\n      state: 'loading',\n      labels: {\n        trackingId: 'fake-tracking-id',\n      },\n    } as Task,\n  ]);\n  const { container } = render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  await selectOption(container, fakeRecommendedModel.name);\n\n  const button = screen.getByTitle(`Start ${fakeRecipe.name} recipe`);\n  expect(button).not.toBeDisabled();\n  await fireEvent.click(button);\n\n  await vi.waitFor(() => {\n    expect(button).toBeDefined();\n  });\n});\n\ntest('Completed task should make the open details button visible', async () => {\n  vi.mocked(TaskStore).tasks = readable([\n    {\n      id: 'dummy-task-id',\n      name: 'Dummy task',\n      state: 'success',\n      labels: {\n        trackingId: 'fake-tracking-id',\n        recipeId: 'dummy-recipe-id',\n      },\n    } as Task,\n  ]);\n\n  router.location.query.set('trackingId', 'fake-tracking-id');\n\n  const { container } = render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n    trackingId: 'fake-tracking-id',\n  });\n\n  await vi.waitFor(() => {\n    expect(within(container).getByTitle('Open details')).toBeDefined();\n  });\n});\n\ntest('trackingId in router query should use it to display related tasks', () => {\n  vi.mocked(TaskStore).tasks = readable([\n    {\n      id: 'dummy-task-id',\n      name: 'Dummy task',\n      state: 'loading',\n      labels: {\n        trackingId: 'fake-tracking-id',\n      },\n    } as Task,\n  ]);\n\n  router.location.query.set('trackingId', 'fake-tracking-id');\n\n  render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n    trackingId: 'fake-tracking-id',\n  });\n  const button = screen.getByTitle(`Start ${fakeRecipe.name} recipe`);\n  expect(button).toBeDisabled();\n});\n\ntest('restoring page should use model-id from tasks to restore the value in the select input', async () => {\n  vi.mocked(TaskStore).tasks = readable([\n    {\n      id: 'dummy-task-id',\n      name: 'Dummy task',\n      state: 'loading',\n      labels: {\n        trackingId: 'fake-tracking-id',\n        'model-id': fakeRecommendedModel.id,\n      },\n    } as Task,\n  ]);\n\n  router.location.query.set('trackingId', 'fake-tracking-id');\n\n  const { container } = render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  return await vi.waitFor(() => {\n    const input = container.querySelector('input[name=\"select-model\"][type=\"hidden\"]');\n    if (!input) throw new Error('input not found');\n    expect(JSON.parse((input as HTMLInputElement).value).label).toBe(fakeRecommendedModel.name);\n  });\n});\n\ntest('no containerProviderConnections should have no running container error', async () => {\n  // mock an empty store\n  vi.mocked(ConnectionStore).containerProviderConnections = readable([]);\n\n  const { getByRole } = render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  const alert = getByRole('alert');\n  expect(alert).toHaveTextContent('No running container engine found');\n});\n\ntest('no container error should disappear if one get available', async () => {\n  // mock an empty store\n  const store = writable<ContainerProviderConnectionInfo[]>([]);\n  vi.mocked(ConnectionStore).containerProviderConnections = store;\n\n  const { getByRole, queryByRole } = render(StartRecipe, {\n    recipeId: 'dummy-recipe-id',\n  });\n\n  // First we should have the error\n  await vi.waitFor(() => {\n    const alert = getByRole('alert');\n    expect(alert).toHaveTextContent('No running container engine found');\n  });\n\n  // let's fill the store\n  store.set([containerProviderConnection]);\n\n  // wait for error to be removed\n  await vi.waitFor(() => {\n    const alert = queryByRole('alert');\n    expect(alert).toBeNull();\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/StartRecipe.svelte",
    "content": "<script lang=\"ts\">\nimport { faFolder, faRocket, faUpRightFromSquare, faWarning } from '@fortawesome/free-solid-svg-icons';\nimport { catalog } from '/@/stores/catalog';\nimport Fa from 'svelte-fa';\nimport type { Recipe, RecipePullOptions, RecipePullOptionsWithModelInference } from '@shared/models/IRecipe';\nimport type { LocalRepository } from '@shared/models/ILocalRepository';\nimport { findLocalRepositoryByRecipeId } from '/@/utils/localRepositoriesUtils';\nimport { localRepositories } from '/@/stores/localRepositories';\nimport { modelsInfo } from '/@/stores/modelsInfo';\nimport { Button, ErrorMessage, FormPage } from '@podman-desktop/ui-svelte';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { InferenceType } from '@shared/models/IInference';\nimport { studioClient } from '/@/utils/client';\nimport type { Task } from '@shared/models/ITask';\nimport { tasks } from '/@/stores/tasks';\nimport { router } from 'tinro';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { containerProviderConnections } from '/@/stores/containerProviderConnections';\nimport ModelSelect from '/@/lib/select/ModelSelect.svelte';\nimport ContainerProviderConnectionSelect from '/@/lib/select/ContainerProviderConnectionSelect.svelte';\nimport ContainerConnectionWrapper from '/@/lib/notification/ContainerConnectionWrapper.svelte';\nimport TrackedTasks from '/@/lib/progress/TrackedTasks.svelte';\n\ninterface Props {\n  recipeId: string;\n  // The tracking id is a unique identifier provided by the\n  // backend when calling requestPullApplication\n  trackingId?: string;\n}\n\nlet { recipeId, trackingId }: Props = $props();\n\nlet recipe: Recipe | undefined = $derived($catalog.recipes.find(r => r.id === recipeId));\n\n// The container provider connection to use\nlet containerProviderConnection: ContainerProviderConnectionInfo | undefined = $state(undefined);\n// Filtered connections (started)\nlet startedContainerProviderConnectionInfo: ContainerProviderConnectionInfo[] = $derived(\n  $containerProviderConnections.filter(connection => connection.status === 'started'),\n);\n// recipe local path\nlet localPath: LocalRepository | undefined = $derived(findLocalRepositoryByRecipeId($localRepositories, recipe?.id));\n// Filter all models based on backend property\nlet models: ModelInfo[] = $derived(\n  $modelsInfo.filter(model => (model.backend ?? InferenceType.NONE) === (recipe?.backend ?? InferenceType.NONE)),\n);\n// Hold the selected model\nlet model: ModelInfo | undefined = $state(undefined);\n// loading state\nlet loading = $state(false);\n// All tasks are successful (not any in error)\nlet completed: boolean = $state(false);\n\nlet errorMsg: string | undefined = $state(undefined);\n\nlet formValid = $derived.by<boolean>((): boolean => {\n  if (!recipe) {\n    return false;\n  }\n  if (!isModelNeeded(recipe)) {\n    return true;\n  }\n  return !!model;\n});\n\n$effect(() => {\n  // Select default connection\n  if (!containerProviderConnection && startedContainerProviderConnectionInfo.length > 0) {\n    containerProviderConnection = startedContainerProviderConnectionInfo[0];\n  }\n  // Select default model\n  if (!model && recipe && models.length > 0) {\n    model = getFirstRecommended();\n  }\n});\n\nconst getFirstRecommended = (): ModelInfo | undefined => {\n  if (!recipe || !models) return undefined;\n  const recommended = recipe.recommended && recipe.recommended.length > 0 ? recipe.recommended[0] : undefined;\n\n  const model = models.find(model => model.id === recommended);\n  if (!model) return undefined;\n  return model;\n};\n\nconst processTasks = (trackedTasks: Task[]): void => {\n  // if one task is in loading we are still loading\n  loading = !!trackingId && trackedTasks.some(task => task.state === 'loading');\n\n  // if all task are successful we are successful\n  completed = !!trackingId && trackedTasks.every(task => task.state === 'success');\n\n  // if we re-open the page, we might need to restore the model selected\n  populateModelFromTasks(trackedTasks);\n};\n\n// This method uses the trackedTasks to restore the selected value of model\n// It is useful when the page has been restored\nfunction populateModelFromTasks(trackedTasks: Task[]): void {\n  const task = trackedTasks.find(\n    task => task.labels && 'model-id' in task.labels && typeof task.labels['model-id'] === 'string',\n  );\n  const modelId = task?.labels?.['model-id'];\n  if (!modelId) return;\n\n  const nModel = models.find(model => model.id === modelId);\n  if (!nModel) return;\n\n  model = nModel;\n}\n\nasync function submit(): Promise<void> {\n  if (!recipe || !formValid) return;\n\n  errorMsg = undefined;\n\n  try {\n    const options: RecipePullOptions = {\n      recipeId: $state.snapshot(recipe.id),\n      connection: $state.snapshot(containerProviderConnection),\n      dependencies: {\n        llamaStack: recipe.backend === 'llama-stack',\n      },\n    };\n    if (model) {\n      (options as RecipePullOptionsWithModelInference).modelId = $state.snapshot(model.id);\n    }\n    const trackingId = await studioClient.requestPullApplication(options);\n    router.location.query.set('trackingId', trackingId);\n  } catch (err: unknown) {\n    console.error('Something wrong while trying to create the inference server.', err);\n    errorMsg = String(err);\n  }\n}\n\nexport function goToUpPage(): void {\n  router.goto('/recipes');\n}\n\nfunction handleOnClick(): void {\n  router.goto(`/recipe/${recipeId}/running`);\n}\n\nfunction isModelNeeded(recipe: Recipe): boolean {\n  return recipe.backend !== 'llama-stack';\n}\n</script>\n\n<FormPage\n  breadcrumbLeftPart=\"Recipes\"\n  breadcrumbRightPart=\"Start recipe\"\n  title=\"Start recipe\"\n  onclose={goToUpPage}\n  onbreadcrumbClick={goToUpPage}>\n  <!-- Removed breadcrumbTitle above, no longer needed for svelte 5 formpage -->\n  {#snippet icon()}\n    <div class=\"rounded-full w-8 h-8 flex items-center justify-center\">\n      <Fa size=\"1.125x\" class=\"text-[var(--pd-content-header-icon)]\" icon={faRocket} />\n    </div>\n  {/snippet}\n\n  {#snippet content()}\n    <div class=\"flex flex-col w-full\">\n      <!-- warning machine resources -->\n      {#if containerProviderConnection}\n        <div class=\"mx-5\">\n          <ContainerConnectionWrapper\n            checkContext=\"recipe\"\n            model={$state.snapshot(model)}\n            containerProviderConnection={$state.snapshot(containerProviderConnection)} />\n        </div>\n      {/if}\n\n      <!-- tasks tracked -->\n      <TrackedTasks onChange={processTasks} class=\"mx-5 mt-5\" trackingId={trackingId} tasks={$tasks} />\n\n      {#if recipe}\n        <!-- form -->\n        <div class=\"space-y-6 bg-[var(--pd-content-card-bg)] m-5 px-8 sm:pb-6 xl:pb-8 rounded-lg h-fit\">\n          <div>\n            <!-- selected recipe -->\n            <label for=\"recipe\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n              >Recipe</label>\n\n            <div\n              class=\"py-2 px-4 rounded-lg w-full flex flex-col bg-[var(--pd-content-bg)] text-[var(--pd-content-card-text)]\">\n              <span aria-label=\"Recipe name\">{recipe.name}</span>\n              {#if localPath}\n                <div\n                  class=\"bg-[var(--pd-label-bg)] text-[var(--pd-label-text)] max-w-full rounded-md p-2 mb-2 flex flex-row w-full h-min text-sm text-nowrap items-center\">\n                  <Fa class=\"mr-2\" icon={faFolder} />\n                  <span aria-label=\"Recipe local path\" class=\"overflow-x-hidden text-ellipsis max-w-full\">\n                    {localPath.path}\n                  </span>\n                </div>\n              {/if}\n            </div>\n\n            <!-- container provider connection input -->\n            {#if startedContainerProviderConnectionInfo.length > 1}\n              <label for=\"model\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n                >Container engine</label>\n              <ContainerProviderConnectionSelect\n                bind:value={containerProviderConnection}\n                containerProviderConnections={startedContainerProviderConnectionInfo} />\n            {/if}\n\n            {#if isModelNeeded(recipe)}\n              <!-- model form -->\n              <label for=\"select-model\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n                >Model</label>\n              <ModelSelect bind:value={model} disabled={loading} recommended={recipe.recommended} models={models} />\n              {#if model && model.file === undefined}\n                <div class=\"text-gray-800 text-sm flex items-center\">\n                  <Fa class=\"mr-2\" icon={faWarning} />\n                  <span role=\"alert\"\n                    >The selected model will be downloaded. This action can take some time depending on your connection</span>\n                </div>\n              {/if}\n            {/if}\n          </div>\n\n          {#if errorMsg !== undefined || !containerProviderConnection}\n            <ErrorMessage error={errorMsg ?? 'No running container engine found'} />\n          {/if}\n\n          <footer>\n            <div class=\"w-full flex flex-col\">\n              {#if completed}\n                <Button icon={faUpRightFromSquare} title=\"Open details\" on:click={handleOnClick}>Open details</Button>\n              {:else}\n                <Button\n                  title=\"Start {recipe.name} recipe\"\n                  inProgress={loading}\n                  on:click={submit}\n                  disabled={!formValid || loading || !containerProviderConnection}\n                  icon={faRocket}>\n                  Start {recipe.name} recipe\n                </Button>\n              {/if}\n            </div>\n          </footer>\n        </div>\n      {/if}\n    </div>\n  {/snippet}\n</FormPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/TuneSessions.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { vi, test, expect } from 'vitest';\nimport { screen, render, waitFor, within } from '@testing-library/svelte';\nimport { router } from 'tinro';\nimport TuneSessions from './TuneSessions.svelte';\nimport type { InstructlabSession } from '@shared/models/instructlab/IInstructlabSession';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\n\nconst mocks = vi.hoisted(() => ({\n  instructlabSessionsListMock: vi.fn(),\n  getCatalogMock: vi.fn(),\n}));\n\nvi.mock('../stores/instructlabSessions', () => ({\n  instructlabSessions: {\n    subscribe: (f: (msg: InstructlabSession[]) => void) => {\n      f(mocks.instructlabSessionsListMock());\n      return (): void => {};\n    },\n  },\n}));\n\nvi.mock('/@/utils/client', async () => {\n  return {\n    studioClient: {\n      getCatalog: mocks.getCatalogMock,\n    },\n    rpcBrowser: {\n      subscribe: (): unknown => {\n        return {\n          unsubscribe: (): void => {},\n        };\n      },\n    },\n  };\n});\n\ntest('should display empty screen', async () => {\n  const sessions: InstructlabSession[] = [];\n  mocks.instructlabSessionsListMock.mockReturnValue(sessions);\n  render(TuneSessions);\n\n  const status = screen.getByLabelText('status');\n  expect(status).toBeDefined();\n});\n\ntest('should display sessions', async () => {\n  const time = new Date(new Date().getTime() - 6 * 24 * 60 * 60 * 1000).getTime() / 1000; // 6 days ago\n  const sessions: InstructlabSession[] = [\n    {\n      name: 'session 1',\n      modelId: 'model1',\n      targetModel: 'model1-target',\n      repository: '/repo1',\n      status: 'fine-tuned',\n      createdTime: time,\n    },\n    {\n      name: 'session 2',\n      modelId: 'model2',\n      targetModel: 'model2-target',\n      repository: '/repo2',\n      status: 'generating-instructions',\n      createdTime: time,\n    },\n  ];\n  mocks.instructlabSessionsListMock.mockReturnValue(sessions);\n  const catalog: ApplicationCatalog = {\n    recipes: [],\n    models: [],\n    categories: [],\n  };\n  mocks.getCatalogMock.mockResolvedValue(catalog);\n  render(TuneSessions);\n\n  const table = screen.getByRole('table');\n  expect(table).toBeDefined();\n\n  // Should display 2 sessions (+ header)\n  const rows = screen.queryAllByRole('row');\n  expect(rows.length).toEqual(3);\n\n  // First session is session 1\n  const cellsSession1 = within(rows[1]).queryAllByRole('cell');\n  expect(cellsSession1.length > 1).toBeTruthy();\n  const name1 = await within(cellsSession1[1]).findByText('session 1');\n  expect(name1).not.toBeNull();\n  const duration1 = await within(cellsSession1[4]).findByText('6 days');\n  expect(duration1).not.toBeNull();\n\n  // Second session is session 2\n  const cellsSession2 = within(rows[2]).queryAllByRole('cell');\n  expect(cellsSession2.length > 1).toBeTruthy();\n  const name2 = await within(cellsSession2[1]).findByText('session 2');\n  expect(name2).not.toBeNull();\n\n  // Open Running tab\n  router.goto('running');\n\n  await waitFor(async () => {\n    const rows = screen.queryAllByRole('row');\n    expect(rows.length).toEqual(2);\n\n    // First session is session 2\n    const cellsSession1 = within(rows[1]).queryAllByRole('cell');\n    expect(cellsSession1.length > 1).toBeTruthy();\n    const name1 = await within(cellsSession1[1]).findByText('session 2');\n    expect(name1).not.toBeNull();\n  });\n\n  // Open All tab\n  router.goto('..');\n\n  await waitFor(async () => {\n    const rows = screen.queryAllByRole('row');\n    expect(rows.length).toEqual(3);\n  });\n\n  // Open Completed tab\n  router.goto('completed');\n\n  await waitFor(async () => {\n    const rows = screen.queryAllByRole('row');\n    expect(rows.length).toEqual(2);\n\n    // First session is session 1\n    const cellsSession1 = within(rows[1]).queryAllByRole('cell');\n    expect(cellsSession1.length > 1).toBeTruthy();\n    const name1 = await within(cellsSession1[1]).findByText('session 1');\n    expect(name1).not.toBeNull();\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/TuneSessions.svelte",
    "content": "<script lang=\"ts\">\nimport { faGaugeHigh, faPlusCircle } from '@fortawesome/free-solid-svg-icons';\nimport { Button, EmptyScreen, NavPage, Tab, Table, TableColumn, TableRow } from '@podman-desktop/ui-svelte';\nimport { onMount } from 'svelte';\nimport { instructlabSessions } from '../stores/instructlabSessions';\nimport type { InstructlabSession } from '@shared/models/instructlab/IInstructlabSession';\nimport InstructlabColumnName from '../lib/table/instructlab/InstructlabColumnName.svelte';\nimport InstructlabColumnModelName from '../lib/table/instructlab/InstructlabColumnModelName.svelte';\nimport InstructlabColumnRepository from '../lib/table/instructlab/InstructlabColumnRepository.svelte';\nimport InstructlabColumnTargetModelName from '../lib/table/instructlab/InstructlabColumnTargetModelName.svelte';\nimport InstructlabColumnAge from '../lib/table/instructlab/InstructlabColumnAge.svelte';\nimport InstructlabColumnStatus from '../lib/table/instructlab/InstructlabColumnStatus.svelte';\nimport { router } from 'tinro';\nimport Route from '../Route.svelte';\n\nfunction start(): void {\n  router.goto('/tune/start');\n}\n\nconst columns: TableColumn<InstructlabSession>[] = [\n  new TableColumn<InstructlabSession>('Name', { width: '120px', renderer: InstructlabColumnName, align: 'left' }),\n  new TableColumn<InstructlabSession>('Model', { width: '1fr', renderer: InstructlabColumnModelName, align: 'left' }),\n  new TableColumn<InstructlabSession>('Repository', {\n    width: '100px',\n    renderer: InstructlabColumnRepository,\n    align: 'left',\n  }),\n  new TableColumn<InstructlabSession>('Duration', { width: '70px', renderer: InstructlabColumnAge }),\n  new TableColumn<InstructlabSession>('Stage', { width: '80px', renderer: InstructlabColumnStatus, align: 'left' }),\n  new TableColumn<InstructlabSession>('Target model', {\n    width: '1fr',\n    renderer: InstructlabColumnTargetModelName,\n    align: 'left',\n  }),\n];\nconst row = new TableRow<InstructlabSession>({});\n\nlet data: InstructlabSession[];\n\n$: running = data?.filter(t => t.status !== 'fine-tuned');\n$: completed = data?.filter(t => t.status === 'fine-tuned');\n\nonMount(() => {\n  return instructlabSessions.subscribe(items => {\n    data = items;\n  });\n});\n</script>\n\n<NavPage title=\"InstructLab Sessions\" searchEnabled={false}>\n  {#snippet tabs()}\n    <Tab title=\"All\" url=\"/tune\" selected={$router.path === '/tune'} />\n    <Tab title=\"Running\" url=\"/tune/running\" selected={$router.path === '/tune/running'} />\n    <Tab title=\"Completed\" url=\"/tune/completed\" selected={$router.path === '/tune/completed'} />\n  {/snippet}\n\n  {#snippet additionalActions()}\n    <Button icon={faPlusCircle} on:click={start}>Start Fine Tuning</Button>\n  {/snippet}\n\n  {#snippet content()}\n    <div class=\"flex min-w-full\">\n      <!-- All models -->\n      <Route path=\"/\">\n        {#if data?.length > 0}\n          <Table kind=\"session\" data={data} columns={columns} row={row} />\n        {:else}\n          <EmptyScreen\n            aria-label=\"status\"\n            icon={faGaugeHigh}\n            title=\"No InstructLab Session\"\n            message=\"Create InstructLab session to improve trained models with specialized knowledges and skills tuning\">\n            <div class=\"flex gap-2 justify-center\">\n              <Button type=\"link\" on:click={start}>Create InstructLab Session</Button>\n            </div>\n          </EmptyScreen>\n        {/if}\n      </Route>\n\n      <!-- Running models -->\n      <Route path=\"/running\">\n        {#if running?.length > 0}\n          <Table kind=\"session\" data={running} columns={columns} row={row} />\n        {:else}\n          <EmptyScreen\n            aria-label=\"status\"\n            icon={faGaugeHigh}\n            title=\"No Running InstructLab Session\"\n            message=\"Create InstructLab session to improve trained models with specialized knowledges and skills tuning\">\n            <div class=\"flex gap-2 justify-center\">\n              <Button type=\"link\" on:click={start}>Create InstructLab Session</Button>\n            </div>\n          </EmptyScreen>\n        {/if}\n      </Route>\n\n      <!-- Completed models -->\n      <Route path=\"/completed\">\n        {#if completed?.length > 0}\n          <Table kind=\"session\" data={completed} columns={columns} row={row} />\n        {:else}\n          <EmptyScreen\n            aria-label=\"status\"\n            icon={faGaugeHigh}\n            title=\"No Completed InstructLab Session\"\n            message=\"Create InstructLab session to improve trained models with specialized knowledges and skills tuning\">\n            <div class=\"flex gap-2 justify-center\">\n              <Button type=\"link\" on:click={start}>Create InstructLab Session</Button>\n            </div>\n          </EmptyScreen>\n        {/if}\n      </Route>\n    </div>\n  {/snippet}\n</NavPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/applications.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { ApplicationState } from '@shared/models/IApplicationState';\n\n/* returns the status of the AI application, to be used by <IconStatus> */\nexport function getApplicationStatus(appState: ApplicationState): string {\n  const podStatus = appState.pod.Status.toUpperCase();\n  if (['DEGRADED', 'STARTING', 'USED', 'DELETING', 'CREATED'].includes(podStatus)) {\n    return podStatus;\n  }\n  if (podStatus !== 'RUNNING') {\n    return 'UNKNOWN';\n  }\n  switch (appState.health) {\n    case 'none':\n    case 'healthy':\n      return 'RUNNING';\n    case 'starting':\n      return 'STARTING';\n    case 'unhealthy':\n      return 'DEGRADED';\n  }\n}\n\n/* returns the status of the AI application in plain text */\nexport function getApplicationStatusText(appState: ApplicationState): string {\n  if (appState.pod.Status === 'Running') {\n    if (appState.health === 'starting') {\n      return 'Starting';\n    }\n    if (appState.health === 'unhealthy') {\n      return 'Degraded';\n    }\n  }\n  return appState.pod.Status;\n}\n"
  },
  {
    "path": "packages/frontend/src/pages/instructlab/AboutInstructLab.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport { writable, type Writable } from 'svelte/store';\nimport userEvent from '@testing-library/user-event';\nimport AboutInstructLab from './AboutInstructLab.svelte';\nimport type { ExtensionConfiguration } from '@shared/models/IExtensionConfiguration';\nimport { configuration } from '/@/stores/extensionConfiguration';\nimport { studioClient } from '/@/utils/client';\nimport { router } from 'tinro';\n\nvi.mock('/@/stores/extensionConfiguration', () => ({\n  configuration: {\n    subscribe: vi.fn(),\n    unsubscribe: vi.fn(),\n  },\n}));\n\nvi.mock('tinro', () => ({\n  router: {\n    goto: vi.fn(),\n  },\n}));\n\nvi.mock('/@/utils/client', () => ({\n  studioClient: {\n    openURL: vi.fn(),\n  },\n}));\n\nclass ResizeObserver {\n  observe = vi.fn();\n  disconnect = vi.fn();\n  unobserve = vi.fn();\n}\n\nconst mockConfiguration: Writable<ExtensionConfiguration> = writable({\n  experimentalGPU: false,\n  modelsPath: '',\n  apiPort: -1,\n  modelUploadDisabled: false,\n  experimentalTuning: false,\n  showGPUPromotion: false,\n  inferenceRuntime: 'llama-cpp',\n  appearance: 'dark',\n});\n\nbeforeEach(() => {\n  Object.defineProperty(window, 'ResizeObserver', { value: ResizeObserver });\n  vi.resetAllMocks();\n  vi.mocked(configuration).subscribe.mockImplementation(run => mockConfiguration.subscribe(run));\n});\n\ntest('renders Start Fine Tuning button if experimentalTuning is true', async () => {\n  mockConfiguration.set({\n    experimentalGPU: false,\n    showGPUPromotion: true,\n    modelUploadDisabled: false,\n    modelsPath: '',\n    experimentalTuning: true,\n    inferenceRuntime: 'llama-cpp',\n    apiPort: -1,\n    appearance: 'dark',\n  });\n  render(AboutInstructLab);\n  expect(await screen.findByText('Start Fine Tuning')).toBeInTheDocument();\n});\n\ntest('does not render Start Fine Tuning button if experimentalTuning is false', async () => {\n  mockConfiguration.set({\n    experimentalGPU: false,\n    showGPUPromotion: true,\n    modelUploadDisabled: false,\n    modelsPath: '',\n    experimentalTuning: false,\n    inferenceRuntime: 'llama-cpp',\n    apiPort: -1,\n    appearance: 'dark',\n  });\n  render(AboutInstructLab);\n  expect(screen.queryByText('Start Fine Tuning')).not.toBeInTheDocument();\n});\n\ntest('navigates to /tune/start when Start Fine Tuning is clicked', async () => {\n  mockConfiguration.set({\n    experimentalGPU: false,\n    showGPUPromotion: true,\n    modelUploadDisabled: false,\n    modelsPath: '',\n    experimentalTuning: true,\n    inferenceRuntime: 'llama-cpp',\n    apiPort: -1,\n    appearance: 'dark',\n  });\n  render(AboutInstructLab);\n  const btn = await screen.findByText('Start Fine Tuning');\n  await userEvent.click(btn);\n  expect(router.goto).toHaveBeenCalledWith('/tune/start');\n});\n\ntest('opens documentation link when clicked', async () => {\n  render(AboutInstructLab);\n  const docLink = await screen.findByText('Access InstructLab documentation');\n  await userEvent.click(docLink);\n  expect(studioClient.openURL).toHaveBeenCalledWith('https://docs.instructlab.ai/');\n});\n\ntest('opens HuggingFace link when clicked', async () => {\n  render(AboutInstructLab);\n  const hfLink = await screen.findByText('View InstructLab on HuggingFace');\n  await userEvent.click(hfLink);\n  expect(studioClient.openURL).toHaveBeenCalledWith('https://huggingface.co/instructlab');\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/instructlab/AboutInstructLab.svelte",
    "content": "<script lang=\"ts\">\nimport { faPlusCircle } from '@fortawesome/free-solid-svg-icons';\nimport { NavPage, Button, Link, Carousel } from '@podman-desktop/ui-svelte';\nimport { studioClient } from '/@/utils/client';\nimport { router } from 'tinro';\nimport fineTunningLargeModelsWithInstructLab from '/@/lib/images/fineTunningLargeModelsWithInstructLab.png';\nimport instructLabDemocratizingAIModelsAtScale from '/@/lib/images/instructLabDemocratizingAIModelsAtScale.png';\nimport instructLabArchitectureImplementationOverview from '/@/lib/images/instructLabArchitectureImplementationOverview.png';\nimport howInstructLabsSyntheticDataGenerationEnhancesLLM from '/@/lib/images/howInstructLabsSyntheticDataGenerationEnhancesLLM.png';\nimport instructLabTitleImage from '/@/lib/images/instructLabTitleImage.png';\nimport AboutInstructLabDiscoverCard from '../../lib/instructlab/AboutInstructLabDiscoverCard.svelte';\nimport AboutInstructLabExploreCard from '../../lib/instructlab/AboutInstructLabExploreCard.svelte';\nimport { configuration } from '/@/stores/extensionConfiguration';\nimport { onMount } from 'svelte';\n\nlet experimentalTuning = false;\n\nonMount(() => {\n  configuration.subscribe(val => {\n    experimentalTuning = val?.experimentalTuning ?? false;\n  });\n});\n\nfunction start(): void {\n  router.goto('/tune/start');\n}\n\nconst instructLabDocumentation = 'https://docs.instructlab.ai/';\nconst instructLabHuggingFace = 'https://huggingface.co/instructlab';\nconst instructLabSamples = '';\n\nasync function openInstructLabDocumentation(): Promise<void> {\n  await studioClient.openURL(instructLabDocumentation);\n}\n\nasync function openInstructLabHuggingFace(): Promise<void> {\n  await studioClient.openURL(instructLabHuggingFace);\n}\n\nasync function openSamples(): Promise<void> {\n  await studioClient.openURL(instructLabSamples);\n}\n\ninterface AboutInstructLabExploreCardInterface {\n  title: string;\n  link: string;\n  image: string;\n  isVideo?: boolean;\n}\n\nlet cards: AboutInstructLabExploreCardInterface[] = [\n  {\n    title: 'InstructLab: Democratizing AI Models at Scale',\n    link: 'https://www.ibm.com/training/course/instructlab-democratizing-ai-models-at-scale-DL01001G',\n    image: instructLabDemocratizingAIModelsAtScale,\n    isVideo: false,\n  },\n  {\n    title: 'Fine Tuning Large Language Models with InstructLab',\n    link: 'https://www.youtube.com/watch?v=pu3-PeBG0YU',\n    image: fineTunningLargeModelsWithInstructLab,\n    isVideo: true,\n  },\n  {\n    title: 'How InstructLab’s synthetic data generation enhances LLMs',\n    link: 'https://www.redhat.com/en/blog/how-instructlabs-synthetic-data-generation-enhances-llms',\n    image: howInstructLabsSyntheticDataGenerationEnhancesLLM,\n    isVideo: false,\n  },\n  {\n    title: 'InstructLab Architecture & Implementation Overview',\n    link: 'https://blog.instructlab.ai/2024/11/instructlab-architecture-implementation-overview/',\n    image: instructLabArchitectureImplementationOverview,\n    isVideo: false,\n  },\n];\n</script>\n\n{#snippet card(cardItem: AboutInstructLabExploreCardInterface)}\n  <AboutInstructLabExploreCard\n    title={cardItem.title}\n    link={cardItem.link}\n    image={cardItem.image}\n    isVideo={cardItem.isVideo} />\n{/snippet}\n\n<NavPage title=\"About InstructLab\" searchEnabled={false}>\n  {#snippet additionalActions()}\n    {#if experimentalTuning}\n      <Button icon={faPlusCircle} on:click={start}>Start Fine Tuning</Button>\n    {/if}\n  {/snippet}\n\n  {#snippet content()}\n    <div class=\"flex flex-col min-w-full min-h-full\">\n      <div class=\"min-w-full min-h-full flex-1\">\n        <div class=\"text-[var(--pd-details-body-text)] mt-4 px-5 space-y-5\" aria-label=\"inner-content\">\n          <div class=\"flex bg-[var(--pd-content-card-bg)] rounded-md p-5 gap-3 flex-row flex-nowrap items-center\">\n            <img\n              src={instructLabTitleImage}\n              class=\"max-h-[100%] w-auto max-w-[20%] object-contain rounded-md self-start\"\n              alt=\"InstructLab\" />\n            <div class=\"flex flex-col flex-1 h-100% self-start gap-4\">\n              <div\n                class=\"text-[var(--pd-content-card-text)] truncate text-ellipsis overflow-hidden whitespace-pre-line flex flex-col gap-4\">\n                <div>\n                  Here you can create an InstructLab session to improve trained models with specialized knowledge and\n                  skill tuning. InstructLab is a model-agnostic open source AI project that facilitates contributions to\n                  Large Language Models (LLMs).\n                </div>\n                <!-- Remove hidden once we have some samples data -->\n                <div class=\"hidden\">\n                  Start by trying one of our samples or bring your own knowledge and skills files.\n                </div>\n              </div>\n              <div class=\"flex flex-row justify-start items-center gap-3 mt-2\">\n                <Link on:click={openInstructLabDocumentation}>Access InstructLab documentation</Link>\n                <Link on:click={openInstructLabHuggingFace}>View InstructLab on HuggingFace</Link>\n              </div>\n            </div>\n          </div>\n\n          <!-- Remove hidden once we have some samples data -->\n          <div class=\"flex flex-1 flex-col hidden\">\n            <p class=\"text-xl text-[var(--pd-details-body-text)]\">\n              Discover from available <Link on:click={openSamples} class=\"font-bold\">SAMPLES</Link>\n            </p>\n            <div class=\"flex flex-col pt-5 grow\">\n              <div class=\"grid grid-cols-3 gap-x-2\">\n                <AboutInstructLabDiscoverCard title=\"asdas\" link=\"asd\" image={instructLabTitleImage} desc=\"aaaa\" />\n                <AboutInstructLabDiscoverCard title=\"asdas\" link=\"asd\" image={instructLabTitleImage} desc=\"aaaa\" />\n                <AboutInstructLabDiscoverCard title=\"asdas\" link=\"asd\" image={instructLabTitleImage} desc=\"aaaa\" />\n              </div>\n            </div>\n          </div>\n          <div class=\"flex flex-1 flex-col\">\n            <p class=\"text-xl text-[var(--pd-details-body-text)]\">Explore articles and videos</p>\n            <div class=\"rounded-md my-5 bg-[var(--pd-content-card-bg)] p-2\">\n              <Carousel cards={cards} card={card} />\n            </div>\n          </div>\n        </div>\n      </div>\n    </div>\n  {/snippet}\n</NavPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/instructlab/StartInstructLabContainer.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { render, screen } from '@testing-library/svelte';\nimport StartInstructLabContainer from '/@/pages/instructlab/StartInstructLabContainer.svelte';\nimport { instructlabClient, studioClient } from '/@/utils/client';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { VMType } from '@shared/models/IPodman';\nimport userEvent from '@testing-library/user-event';\nimport * as tasks from '/@/stores/tasks';\nimport { writable } from 'svelte/store';\n\nvi.mock('../../stores/tasks', async () => {\n  return {\n    tasks: vi.fn(),\n  };\n});\n\nconst getContainerConnectionInfoMock = vi.fn();\n\nvi.mock('../../stores/containerProviderConnections', () => ({\n  containerProviderConnections: {\n    subscribe: (f: (msg: unknown) => void) => {\n      f(getContainerConnectionInfoMock());\n      return (): void => {};\n    },\n  },\n}));\n\nvi.mock('../../utils/client', async () => ({\n  instructlabClient: {\n    getInstructlabContainerId: vi.fn(),\n    routeToInstructLabContainerTerminal: vi.fn().mockResolvedValue(undefined),\n  },\n  studioClient: {\n    openURL: vi.fn().mockResolvedValue(true),\n  },\n  rpcBrowser: {\n    subscribe: (): unknown => {\n      return {\n        unsubscribe: (): void => {},\n      };\n    },\n  },\n}));\n\nconst containerProviderConnection: ContainerProviderConnectionInfo = {\n  name: 'Dummy container connection provider',\n  status: 'started',\n  type: 'podman',\n  vmType: VMType.QEMU,\n  providerId: 'podman',\n};\n\nbeforeEach(() => {\n  getContainerConnectionInfoMock.mockReturnValue([containerProviderConnection]);\n  vi.mocked(tasks).tasks = writable([]);\n});\n\ntest('start button should be displayed if no InstructLab container', async () => {\n  render(StartInstructLabContainer);\n\n  const startBtn = screen.getByTitle('Start InstructLab container');\n  expect(startBtn).toBeDefined();\n});\n\ntest('start button should be displayed and enabled', async () => {\n  render(StartInstructLabContainer);\n\n  const startBtn = screen.getByTitle('Start InstructLab container');\n  expect(startBtn).toBeDefined();\n  expect(startBtn).toBeEnabled();\n});\n\ntest('open button should be displayed if no InstructLab container', async () => {\n  vi.mocked(instructlabClient.getInstructlabContainerId).mockResolvedValue('containerId');\n  render(StartInstructLabContainer);\n\n  await vi.waitFor(() => {\n    const openBtn = screen.getByTitle('Open InstructLab container');\n    expect(openBtn).toBeDefined();\n  });\n});\n\ntest('click open button should redirect to InstructLab container', async () => {\n  vi.mocked(instructlabClient.getInstructlabContainerId).mockResolvedValue('containerId');\n  render(StartInstructLabContainer);\n\n  const openBtn = await vi.waitFor(() => {\n    const openBtn = screen.getByTitle('Open InstructLab container');\n    expect(openBtn).toBeDefined();\n    return openBtn;\n  });\n\n  await userEvent.click(openBtn);\n  expect(instructlabClient.routeToInstructLabContainerTerminal).toHaveBeenCalledWith('containerId');\n});\n\ntest('documentation button should be displayed and redirect to external link', async () => {\n  render(StartInstructLabContainer);\n\n  const docBtn = screen.getByTitle('Read documentation');\n  expect(docBtn).toBeDefined();\n\n  await userEvent.click(docBtn);\n  expect(studioClient.openURL).toHaveBeenCalledWith('https://docs.instructlab.ai');\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/instructlab/StartInstructLabContainer.svelte",
    "content": "<script lang=\"ts\">\nimport { tasks } from '/@/stores/tasks';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { Button, ErrorMessage, FormPage } from '@podman-desktop/ui-svelte';\nimport { containerProviderConnections } from '/@/stores/containerProviderConnections';\nimport ContainerProviderConnectionSelect from '/@/lib/select/ContainerProviderConnectionSelect.svelte';\nimport TrackedTasks from '/@/lib/progress/TrackedTasks.svelte';\nimport { instructlabClient, studioClient } from '/@/utils/client';\nimport type { Task } from '@shared/models/ITask';\nimport { onMount } from 'svelte';\nimport { INSTRUCTLAB_CONTAINER_TRACKINGID } from '@shared/models/instructlab/IInstructlabContainerInfo';\nimport { filterByLabel } from '/@/utils/taskUtils';\n\n// The container provider connection to use\nlet containerProviderConnection: ContainerProviderConnectionInfo | undefined = $state(undefined);\n\n// Filtered connections (started)\nlet startedContainerProviderConnectionInfo: ContainerProviderConnectionInfo[] = $derived(\n  $containerProviderConnections.filter(connection => connection.status === 'started'),\n);\n\n// If the creation of the InstructLab fail\nlet errorMsg: string | undefined = $state(undefined);\n// The containerId will be included in the tasks when the creation\n// process will be completed\nlet containerId: string | undefined = $state(undefined);\n// available means the container is started\nlet available: boolean = $derived(!!containerId);\n// loading state\nlet loading = $derived(\n  containerId === undefined &&\n    filterByLabel($tasks, { trackingId: INSTRUCTLAB_CONTAINER_TRACKINGID }).length > 0 &&\n    !errorMsg,\n);\n\nonMount(async () => {\n  containerId = await instructlabClient.getInstructlabContainerId();\n});\n\n$effect(() => {\n  // Select default connection\n  if (!containerProviderConnection && startedContainerProviderConnectionInfo.length > 0) {\n    containerProviderConnection = startedContainerProviderConnectionInfo[0];\n  }\n});\n\nfunction processTasks(trackedTasks: Task[]): void {\n  // Check for errors\n  // hint: we do not need to display them as the TasksProgress component will\n  errorMsg = trackedTasks.find(task => task.error)?.error;\n\n  const task: Task | undefined = trackedTasks.find(task => 'containerId' in (task.labels ?? {}));\n  if (task === undefined) return;\n\n  containerId = task.labels?.['containerId'];\n}\n\n// Submit method when the form is valid\nasync function submit(): Promise<void> {\n  errorMsg = undefined;\n  try {\n    await instructlabClient.requestCreateInstructlabContainer({\n      connection: $state.snapshot(containerProviderConnection),\n    });\n  } catch (err: unknown) {\n    console.error('Something wrong while trying to create the InstructLab container.', err);\n    errorMsg = String(err);\n  }\n}\n\n// Navigate to the new created service\nfunction openInstructLabContainer(): void {\n  instructlabClient.routeToInstructLabContainerTerminal(containerId!).catch(console.error);\n}\n\nfunction openDocumentation(): void {\n  studioClient.openURL('https://docs.instructlab.ai').catch(console.error);\n}\n</script>\n\n<FormPage title=\"Run InstructLab as a container\">\n  {#snippet content()}\n    <div class=\"flex flex-col w-full\">\n      <header class=\"mx-5 mt-5\">\n        <div class=\"w-full flex flex-row\">\n          {#if available}\n            <Button inProgress={!available} title=\"Open InstructLab container\" on:click={openInstructLabContainer}>\n              Open InstructLab container\n            </Button>\n          {:else}\n            <Button title=\"Start InstructLab container\" inProgress={loading} on:click={submit}>\n              Start InstructLab container\n            </Button>\n          {/if}\n          <Button title=\"Read documentation\" type=\"link\" on:click={openDocumentation}>Read documentation</Button>\n        </div>\n      </header>\n      <!-- tasks tracked -->\n      <TrackedTasks\n        class=\"mx-5 mt-5\"\n        onChange={processTasks}\n        trackingId={INSTRUCTLAB_CONTAINER_TRACKINGID}\n        tasks={$tasks} />\n\n      <!-- form -->\n      <div class=\"bg-[var(--pd-content-card-bg)] m-5 space-y-6 px-8 sm:pb-6 xl:pb-8 rounded-lg h-fit\">\n        <div class=\"w-full text-[var(--pd-details-body-text)]\">\n          <!-- container provider connection input -->\n          {#if startedContainerProviderConnectionInfo.length > 1}\n            <label for=\"\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n              >Container engine</label>\n            <ContainerProviderConnectionSelect\n              bind:value={containerProviderConnection}\n              containerProviderConnections={startedContainerProviderConnectionInfo} />\n          {/if}\n\n          <h1 class=\"pt-4 mb-2 text-lg first-letter:uppercase\">Instructions</h1>\n\n          <p>\n            Once InstructLab container is started from Podman Desktop, you can start using and experimenting with it by\n            directly getting a terminal into the container. For that, go to the list of containers and search for\n            InstructLab container, click to get into the details of that container. Find the \"terminal\" tab, where you\n            can input the following commands.\n          </p>\n          <p></p>\n\n          <p>Please check the documentation to learn more about these commands and how InstructLab works.</p>\n\n          <h2 class=\"pt-4 mb-2\">Create InstructLab configuration</h2>\n\n          <code class=\"pt-4 mb-2 block\">ilab config init</code>\n\n          <h2 class=\"pt-4 mb-2\">Download model</h2>\n\n          <code class=\"pt-4 mb-2 block\">ilab model download</code>\n\n          <h2 class=\"pt-4 mb-2\">Serve the model</h2>\n\n          <code class=\"pt-4 mb-2 block\">ilab model serve</code>\n\n          <h2 class=\"pt-4 mb-2\">Chat with the model</h2>\n\n          <code class=\"pt-4 mb-2 block\">ilab model chat</code>\n\n          {#if errorMsg !== undefined}\n            <ErrorMessage error={errorMsg} />\n          {/if}\n        </div>\n      </div>\n    </div>\n  {/snippet}\n</FormPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/llama-stack/StartLlamaStackContainer.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\nimport { assert, beforeEach, expect, test, vi } from 'vitest';\nimport { fireEvent, render, screen } from '@testing-library/svelte';\nimport StartLlamaStackContainer from '/@/pages/llama-stack/StartLlamaStackContainer.svelte';\nimport { llamaStackClient, studioClient } from '/@/utils/client';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { LLAMA_STACK_CONTAINER_TRACKINGID } from '@shared/models/llama-stack/LlamaStackContainerInfo';\nimport { VMType } from '@shared/models/IPodman';\nimport userEvent from '@testing-library/user-event';\nimport * as tasks from '/@/stores/tasks';\nimport type { Task } from '@shared/models/ITask';\nimport { writable } from 'svelte/store';\nimport { tick } from 'svelte';\n\nvi.mock('../../stores/tasks', async () => {\n  return {\n    tasks: vi.fn(),\n  };\n});\n\nconst getContainerConnectionInfoMock = vi.fn();\n\nvi.mock('../../stores/containerProviderConnections', () => ({\n  containerProviderConnections: {\n    subscribe: (f: (msg: unknown) => void) => {\n      f(getContainerConnectionInfoMock());\n      return (): void => {};\n    },\n  },\n}));\n\nvi.mock('../../utils/client', async () => ({\n  llamaStackClient: {\n    getLlamaStackContainersInfo: vi.fn(),\n    routeToLlamaStackContainerTerminal: vi.fn().mockResolvedValue(undefined),\n    requestcreateLlamaStackContainerss: vi.fn(),\n  },\n  studioClient: {\n    openURL: vi.fn().mockResolvedValue(true),\n  },\n  rpcBrowser: {\n    subscribe: (): unknown => {\n      return {\n        unsubscribe: (): void => {},\n      };\n    },\n  },\n}));\n\nconst containerProviderConnection: ContainerProviderConnectionInfo = {\n  name: 'Dummy container connection provider',\n  status: 'started',\n  type: 'podman',\n  vmType: VMType.QEMU,\n  providerId: 'podman',\n};\n\nbeforeEach(() => {\n  getContainerConnectionInfoMock.mockReturnValue([containerProviderConnection]);\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue(undefined);\n  vi.mocked(tasks).tasks = writable([]);\n});\n\ntest('start button should be displayed if no Llama Stack container', async () => {\n  render(StartLlamaStackContainer);\n\n  const startBtn = screen.getByTitle('Start Llama Stack container');\n  expect(startBtn).toBeDefined();\n});\n\ntest('Instructions block should not be displayed if no Llama Stack container', async () => {\n  render(StartLlamaStackContainer);\n\n  await tick();\n  const instructions = screen.queryByText('Instructions');\n  expect(instructions).not.toBeInTheDocument();\n});\n\ntest('Instructions block should be displayed if Llama Stack container is found', async () => {\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue({\n    server: { containerId: 'containerId', port: 10000, state: 'running' },\n    playground: { containerId: 'pgId', port: 60000, state: 'running' },\n  });\n  render(StartLlamaStackContainer);\n\n  await vi.waitFor(() => screen.getByText('Instructions'));\n});\n\ntest('start button should be displayed and enabled', async () => {\n  render(StartLlamaStackContainer);\n\n  const startBtn = screen.getByTitle('Start Llama Stack container');\n  expect(startBtn).toBeDefined();\n  expect(startBtn).toBeEnabled();\n});\n\ntest('open button should be displayed if Llama Stack container is found', async () => {\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue({\n    server: { containerId: 'containerId', port: 10000, state: 'running' },\n    playground: { containerId: 'playgroundId', port: 5000, state: 'running' },\n  });\n  render(StartLlamaStackContainer);\n\n  await vi.waitFor(() => {\n    const openBtn = screen.getByTitle('Open Llama Stack Server container');\n    expect(openBtn).toBeDefined();\n  });\n});\n\ntest('playground button should be disabled if playground port is not available', async () => {\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue({\n    server: { containerId: 'containerId', port: 10000, state: 'running' },\n    playground: { containerId: 'playgroundId', port: 5000, state: 'running' },\n  });\n  render(StartLlamaStackContainer);\n\n  await vi.waitFor(() => {\n    const playgroundBtn = screen.getByTitle('Explore LLama-Stack environment');\n    expect(playgroundBtn).toBeDefined();\n    expect(playgroundBtn).toBeEnabled();\n  });\n});\n\ntest('playground button should be enabled if playground port is present', async () => {\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue({\n    server: { containerId: 'containerId', port: 10000, state: 'running' },\n    playground: { containerId: 'pgId', port: 10001, state: 'running' },\n  });\n  render(StartLlamaStackContainer);\n\n  await vi.waitFor(() => {\n    const playgroundBtn = screen.getByTitle('Explore LLama-Stack environment');\n    expect(playgroundBtn).toBeDefined();\n    expect(playgroundBtn).toBeEnabled();\n  });\n});\n\ntest('click playground button should open url', async () => {\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue({\n    server: { containerId: 'containerId', port: 10000, state: 'running' },\n    playground: { containerId: 'pgId', port: 10001, state: 'running' },\n  });\n  render(StartLlamaStackContainer);\n\n  const playgroundBtn = await vi.waitFor(() => {\n    const playgroundBtn = screen.getByTitle('Explore LLama-Stack environment');\n    expect(playgroundBtn).toBeDefined();\n    return playgroundBtn;\n  });\n\n  await userEvent.click(playgroundBtn);\n  expect(studioClient.openURL).toHaveBeenCalledWith('http://localhost:10001');\n});\n\ntest('click open button should redirect to Llama Stack server container', async () => {\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue({\n    server: { containerId: 'containerId', port: 10000, state: 'running' },\n    playground: { containerId: 'playgroundId', port: 5000, state: 'running' },\n  });\n  render(StartLlamaStackContainer);\n\n  const openBtn = await vi.waitFor(() => {\n    const openBtn = screen.getByTitle('Open Llama Stack Server container');\n    expect(openBtn).toBeDefined();\n    return openBtn;\n  });\n\n  await userEvent.click(openBtn);\n  expect(llamaStackClient.routeToLlamaStackContainerTerminal).toHaveBeenCalledWith('containerId');\n});\n\ntest('port should be displayed', async () => {\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue({\n    server: { containerId: 'containerId', port: 10000, state: 'running' },\n    playground: { containerId: 'pgId', port: 60000, state: 'running' },\n  });\n  render(StartLlamaStackContainer);\n\n  await vi.waitFor(() => {\n    screen.getByText(/http:\\/\\/localhost:10000/);\n  });\n});\n\ntest('link to Swagger UI should be displayed', async () => {\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue({\n    server: { containerId: 'containerId', port: 10000, state: 'running' },\n    playground: { containerId: 'pgId', port: 60000, state: 'running' },\n  });\n  render(StartLlamaStackContainer);\n\n  let link: HTMLElement | undefined;\n  await vi.waitFor(() => {\n    link = screen.getByText('swagger documentation');\n  });\n  assert(link, 'link should be defined');\n  await fireEvent.click(link);\n  expect(studioClient.openURL).toHaveBeenCalledWith('http://localhost:10000/docs');\n});\n\ntest('click start button triggers requestcreateLlamaStackContainerss', async () => {\n  vi.mocked(llamaStackClient.requestcreateLlamaStackContainerss).mockResolvedValue(undefined);\n\n  render(StartLlamaStackContainer);\n\n  const startBtn = await screen.findByTitle('Start Llama Stack container');\n  await userEvent.click(startBtn);\n\n  expect(llamaStackClient.requestcreateLlamaStackContainerss).toHaveBeenCalledWith({\n    connection: containerProviderConnection,\n  });\n});\n\ntest('displays error if requestcreateLlamaStackContainerss throws', async () => {\n  vi.mocked(llamaStackClient.requestcreateLlamaStackContainerss).mockRejectedValue(new Error('Creation failed'));\n\n  render(StartLlamaStackContainer);\n\n  const startBtn = await screen.findByTitle('Start Llama Stack container');\n  await userEvent.click(startBtn);\n\n  await vi.waitFor(() => {\n    screen.getByText('Error: Creation failed');\n  });\n});\n\ntest('updates stack_containers from tasks', async () => {\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue({\n    server: { containerId: 'serverId', port: 50000, state: 'running' },\n    playground: { containerId: 'pgId', port: 60000, state: 'running' },\n  });\n  const task = {\n    labels: {\n      containerId: 'serverId',\n      port: '50000',\n      state: 'running',\n      playgroundId: 'pgId',\n      playgroundPort: '60000',\n      playgroundState: 'running',\n    },\n  } as unknown as Task;\n\n  vi.mocked(tasks).tasks = writable([task]);\n  render(StartLlamaStackContainer);\n\n  await vi.waitFor(() => {\n    const btn = screen.getByTitle('Open Llama Stack Server container');\n    expect(btn).toBeEnabled();\n  });\n});\n\ntest('start button switches to open buttons when server and playground ready', async () => {\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue({\n    server: { containerId: 'serverId', port: 50000, state: 'running' },\n    playground: { containerId: 'pgId', port: 60000, state: 'running' },\n  });\n\n  render(StartLlamaStackContainer);\n\n  await vi.waitFor(() => {\n    const serverBtn = screen.getByTitle('Open Llama Stack Server container');\n    const playgroundBtn = screen.getByTitle('Open Llama Stack Playground container');\n    expect(serverBtn).toBeEnabled();\n    expect(playgroundBtn).toBeEnabled();\n  });\n});\n\ntest('selects first started container provider by default', async () => {\n  render(StartLlamaStackContainer);\n\n  await vi.waitFor(() => {\n    expect(containerProviderConnection).toEqual(containerProviderConnection);\n  });\n});\n\ntest('start button shows inProgress state when tasks are loading', async () => {\n  vi.mocked(llamaStackClient.getLlamaStackContainersInfo).mockResolvedValue(undefined);\n  const loadingTask = { state: 'loading', labels: { trackingId: LLAMA_STACK_CONTAINER_TRACKINGID } } as unknown as Task;\n  vi.mocked(tasks).tasks = writable([loadingTask]);\n  render(StartLlamaStackContainer);\n\n  await vi.waitFor(() => {\n    const startBtn = screen.getByTitle('Start Llama Stack container');\n    expect(startBtn).toBeInTheDocument();\n  });\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/llama-stack/StartLlamaStackContainer.svelte",
    "content": "<script lang=\"ts\">\nimport { tasks } from '/@/stores/tasks';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\nimport { Button, ErrorMessage, FormPage, Link, Tooltip } from '@podman-desktop/ui-svelte';\nimport { containerProviderConnections } from '/@/stores/containerProviderConnections';\nimport ContainerProviderConnectionSelect from '/@/lib/select/ContainerProviderConnectionSelect.svelte';\nimport TrackedTasks from '/@/lib/progress/TrackedTasks.svelte';\nimport { llamaStackClient, studioClient } from '/@/utils/client';\nimport type { Task } from '@shared/models/ITask';\nimport { onMount } from 'svelte';\nimport { filterByLabel } from '/@/utils/taskUtils';\nimport {\n  LLAMA_STACK_CONTAINER_TRACKINGID,\n  type LlamaStackContainers,\n} from '@shared/models/llama-stack/LlamaStackContainerInfo';\n\n// The container provider connection to use\nlet containerProviderConnection: ContainerProviderConnectionInfo | undefined = $state(undefined);\n\n// Filtered connections (started)\nlet startedContainerProviderConnectionInfo: ContainerProviderConnectionInfo[] = $derived(\n  $containerProviderConnections.filter(connection => connection.status === 'started'),\n);\n\n// If the creation of the llama stack fail\nlet errorMsg: string | undefined = $state(undefined);\n// The containerId will be included in the tasks when the creation\n// process will be completed\nlet stack_containers = $state<LlamaStackContainers | undefined>(undefined);\n// available means the container is started\nlet available: boolean = $derived(\n  stack_containers?.server?.state === 'running' && stack_containers?.playground?.state === 'running',\n);\n// flag to immediately show the spinner on submit button\nlet start_spinner: boolean = $state(false);\n\nlet loading = $derived(\n  stack_containers === undefined &&\n    filterByLabel($tasks, { trackingId: LLAMA_STACK_CONTAINER_TRACKINGID }).length > 0 &&\n    !errorMsg,\n);\n\n// Keep UI in \"starting\" state until tasks finish, even if containersInfo flips to ready\nlet hasActiveTasks = $derived(\n  filterByLabel($tasks, { trackingId: LLAMA_STACK_CONTAINER_TRACKINGID }).some(t => t.state === 'loading'),\n);\nlet uiReady = $derived(available && !hasActiveTasks);\n\nonMount(async () => {\n  stack_containers = await llamaStackClient.getLlamaStackContainersInfo();\n});\n\n$effect(() => {\n  // Select default connection\n  if (!containerProviderConnection && startedContainerProviderConnectionInfo.length > 0) {\n    containerProviderConnection = startedContainerProviderConnectionInfo[0];\n  }\n});\n\nfunction processTasks(trackedTasks: Task[]): void {\n  // capture first error (if any)\n  errorMsg = trackedTasks.find(task => task.error)?.error;\n\n  // find the first task with all required labels\n  const task = trackedTasks.find(\n    t =>\n      t.labels &&\n      'containerId' in t.labels &&\n      'port' in t.labels &&\n      'state' in t.labels &&\n      'playgroundId' in t.labels &&\n      'playgroundPort' in t.labels &&\n      'playgroundState' in t.labels,\n  );\n  if (!task) return;\n\n  stack_containers = {\n    server: {\n      containerId: task.labels!['containerId'],\n      port: parseInt(task.labels!['port']),\n      state: task.labels!['state'],\n    },\n    playground: {\n      containerId: task.labels!['playgroundId'],\n      port: parseInt(task.labels!['playgroundPort']),\n      state: task.labels!['playgroundState'],\n    },\n  };\n}\n\n// Submit method when the form is valid\nasync function submit(): Promise<void> {\n  start_spinner = true;\n  errorMsg = undefined;\n  try {\n    await llamaStackClient.requestcreateLlamaStackContainerss({\n      connection: $state.snapshot(containerProviderConnection),\n    });\n  } catch (err: unknown) {\n    console.error('Something wrong while trying to create the Llama Stack container.', err);\n    errorMsg = String(err);\n  } finally {\n    start_spinner = false;\n  }\n}\n\n// Navigate to the new created service\nfunction openLlamaStackServerContainer(): void {\n  llamaStackClient.routeToLlamaStackContainerTerminal(stack_containers!.server!.containerId!).catch(console.error);\n}\n\nfunction openLlamaStackPlaygroundContainer(): void {\n  llamaStackClient.routeToLlamaStackContainerTerminal(stack_containers!.playground!.containerId!).catch(console.error);\n}\n\nfunction openLlamaStackPlayground(): void {\n  openLink(`http://localhost:${stack_containers?.playground?.port}`);\n}\n\nfunction openLink(url: string): void {\n  studioClient.openURL(url).catch(err => console.error(`Error opening URL: ${url}`, err));\n}\n</script>\n\n<FormPage title=\"Run Llama Stack as a container\">\n  {#snippet content()}\n    <div class=\"flex flex-col w-full\">\n      <header class=\"mx-5 mt-5\">\n        <div class=\"w-full flex flex-row space-x-2\">\n          {#if uiReady}\n            <Button\n              inProgress={!uiReady}\n              title=\"Open Llama Stack Server container\"\n              on:click={openLlamaStackServerContainer}>\n              Open Llama Stack Server container\n            </Button>\n            <Button\n              inProgress={!uiReady}\n              title=\"Open Llama Stack Playground container\"\n              on:click={openLlamaStackPlaygroundContainer}>\n              Open Llama Stack Playground container\n            </Button>\n            <Button\n              disabled={!stack_containers?.playground?.port}\n              inProgress={!uiReady}\n              title=\"Explore LLama-Stack environment\"\n              on:click={openLlamaStackPlayground}>\n              Explore LLama-Stack environment\n            </Button>\n          {:else}\n            <Button\n              title=\"Start Llama Stack container\"\n              inProgress={hasActiveTasks || loading || start_spinner}\n              on:click={submit}>\n              Start Llama Stack container\n            </Button>\n          {/if}\n        </div>\n      </header>\n      <!-- tasks tracked -->\n      <TrackedTasks\n        class=\"mx-5 mt-5\"\n        onChange={processTasks}\n        trackingId={LLAMA_STACK_CONTAINER_TRACKINGID}\n        tasks={$tasks} />\n\n      <!-- form -->\n      {#if startedContainerProviderConnectionInfo.length > 1 || available || errorMsg !== undefined}\n        <div class=\"bg-[var(--pd-content-card-bg)] m-5 space-y-6 px-8 sm:pb-6 xl:pb-8 rounded-lg h-fit\">\n          <div class=\"w-full text-[var(--pd-details-body-text)]\">\n            <!-- container provider connection input -->\n            {#if startedContainerProviderConnectionInfo.length > 1}\n              <label for=\"\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\"\n                >Container engine</label>\n              <ContainerProviderConnectionSelect\n                bind:value={containerProviderConnection}\n                containerProviderConnections={startedContainerProviderConnectionInfo} />\n            {/if}\n\n            {#if available || errorMsg !== undefined}\n              <h1 class=\"pt-4 mb-2 text-lg first-letter:uppercase\">Instructions</h1>\n\n              {#if available}\n                <p>Llama Stack Server API is accessible at http://localhost:{stack_containers?.server?.port}</p>\n                <p>\n                  Access\n                  <Tooltip tip=\"Open swagger documentation\">\n                    <Link\n                      aria-label=\"swagger documentation\"\n                      on:click={openLink.bind(undefined, `http://localhost:${stack_containers?.server?.port}/docs`)}>\n                      swagger documentation\n                    </Link>\n                  </Tooltip>\n                </p>\n              {/if}\n              {#if errorMsg !== undefined}\n                <ErrorMessage error={errorMsg} />\n              {/if}\n            {/if}\n          </div>\n        </div>\n      {/if}\n    </div>\n  {/snippet}\n</FormPage>\n"
  },
  {
    "path": "packages/frontend/src/pages/server-information/LocalServer.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport '@testing-library/jest-dom/vitest';\n\nimport { render, screen } from '@testing-library/svelte';\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport LocalServer from './LocalServer.svelte';\nimport { writable, type Writable } from 'svelte/store';\nimport type { ExtensionConfiguration } from '@shared/models/IExtensionConfiguration';\nimport { configuration } from '/@/stores/extensionConfiguration';\nimport { userEvent } from '@testing-library/user-event';\nimport { studioClient } from '/@/utils/client';\n\nconst updateExtensionConfigurationMock = vi.fn();\n\nvi.mock('/@/stores/extensionConfiguration', () => ({\n  configuration: {\n    subscribe: vi.fn(),\n    unsubscribe: vi.fn(),\n  },\n}));\n\nvi.mock('/@/utils/client', () => ({\n  studioClient: {\n    updateExtensionConfiguration: vi.fn(),\n    telemetryLogUsage: vi.fn(),\n  },\n}));\n\nconst mockConfiguration: Writable<ExtensionConfiguration> = writable({\n  experimentalGPU: false,\n  modelsPath: '',\n  apiPort: 10434,\n  inferenceRuntime: 'llama-cpp',\n  modelUploadDisabled: false,\n  experimentalTuning: false,\n  showGPUPromotion: false,\n  appearance: 'dark',\n});\n\nbeforeEach(() => {\n  vi.resetAllMocks();\n  vi.mocked(studioClient.updateExtensionConfiguration).mockImplementation(updateExtensionConfigurationMock);\n  vi.mocked(configuration).subscribe.mockImplementation(run => mockConfiguration.subscribe(run));\n});\n\ntest('port input should update on user input', async () => {\n  render(LocalServer);\n\n  const portInput: HTMLInputElement = screen.getByRole('textbox');\n  expect(portInput).toBeDefined();\n\n  await userEvent.clear(portInput);\n  await userEvent.type(portInput, '8888');\n\n  await vi.waitFor(() => {\n    expect(portInput.value).toBe('8888');\n  });\n});\n\ntest('should show default port', async () => {\n  render(LocalServer);\n  const portInput: HTMLInputElement = screen.getByRole('textbox');\n\n  expect(portInput).toBeDefined();\n\n  await userEvent.clear(portInput);\n  // valid port should be >= 0 and <= 65535\n  await userEvent.type(portInput, '123456789');\n  await new Promise(resolve => setTimeout(resolve, 11));\n\n  await vi.waitFor(() => expect(updateExtensionConfigurationMock).toBeCalled());\n});\n"
  },
  {
    "path": "packages/frontend/src/pages/server-information/LocalServer.svelte",
    "content": "<script lang=\"ts\">\nimport { NavPage, Input, ErrorMessage } from '@podman-desktop/ui-svelte';\nimport { studioClient } from '/@/utils/client';\nimport { configuration } from '/@/stores/extensionConfiguration';\nimport { onMount, onDestroy } from 'svelte';\n\n// The AI Lab Port is the bind value to form input\nlet aiLabPort: number | undefined = $state(undefined);\nlet invalidPort: boolean = $state(false);\nlet errorMsg: string | undefined = $state(undefined);\n\nonMount(() => {\n  const port = $configuration?.apiPort;\n  if (port) {\n    aiLabPort = port;\n  } else {\n    aiLabPort = 10434;\n  }\n});\n\nonDestroy(async () => {\n  // Set port to default one when user closes the screen if this would be done\n  // in place where is invalidPort set the user would not be able to delete\n  // the whole port and start from scratch\n  if (invalidPort) {\n    await setPortToDefaultOne();\n  }\n});\n\nasync function setPortToDefaultOne(): Promise<void> {\n  aiLabPort = 10434;\n  await studioClient.updateExtensionConfiguration({ apiPort: aiLabPort });\n}\n\nasync function onAiLabPortInput(event: Event): Promise<void> {\n  const raw = (event.target as HTMLInputElement).value;\n  try {\n    const port = parseInt(raw);\n    // 0 <= port <= 65535\n    if (0 <= port && port <= 65535) {\n      await studioClient.updateExtensionConfiguration({ apiPort: port });\n      aiLabPort = port;\n      invalidPort = false;\n      errorMsg = undefined;\n    } else {\n      errorMsg = 'An invalid port has been passed, the port must be between 0 and 65535';\n      invalidPort = true;\n    }\n  } catch (e: unknown) {\n    errorMsg = String(e);\n    console.warn('invalid value for AI Lab API port', e);\n    await setPortToDefaultOne();\n  }\n}\n</script>\n\n<NavPage title=\"Local Server\" searchEnabled={false}>\n  {#snippet content()}\n    <div class=\"flex flex-col min-w-full min-h-full\">\n      <div class=\"min-w-full min-h-full flex-1\">\n        <div class=\"text-[var(--pd-details-body-text)] mt-4 px-5 space-y-5\" aria-label=\"inner-content\">\n          <p>\n            Integrate Podman AI Lab directly into your development workflows by using its REST API endpoints. Compatible\n            with Ollama's endpoints, you can seamlessly access and utilize the capabilities of Podman AI Lab without\n            relying on its graphical interface.\n          </p>\n        </div>\n        <div class=\"px-5 space-y-5\">\n          <div class=\"bg-[var(--pd-content-card-bg)] m-5 space-y-6 px-8 sm:pb-6 xl:pb-8 rounded-lg h-fit\">\n            <div class=\"w-full\">\n              <label for=\"aiLabPort\" class=\"pt-4 block mb-2 font-bold text-[var(--pd-content-card-header-text)]\">\n                Port on which the API is listening (requires restart of extension):\n              </label>\n              <Input\n                value={String(aiLabPort ?? 0)}\n                on:input={onAiLabPortInput}\n                class=\"w-full ml-2\"\n                placeholder=\"10434\"\n                name=\"aiLabPort\"\n                aria-label=\"Port input\" />\n            </div>\n            {#if errorMsg}\n              <ErrorMessage error={errorMsg} />\n            {/if}\n          </div>\n        </div>\n      </div>\n    </div>\n  {/snippet}\n</NavPage>\n"
  },
  {
    "path": "packages/frontend/src/stores/application-states.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Readable } from 'svelte/store';\nimport { readable } from 'svelte/store';\nimport { MSG_APPLICATIONS_STATE_UPDATE } from '@shared/Messages';\nimport { rpcBrowser, studioClient } from '/@/utils/client';\nimport type { ApplicationState } from '@shared/models/IApplicationState';\n\nexport const applicationStates: Readable<ApplicationState[]> = readable<ApplicationState[]>([], set => {\n  const sub = rpcBrowser.subscribe(MSG_APPLICATIONS_STATE_UPDATE, msg => {\n    set(msg);\n  });\n  // Initialize the store manually\n  studioClient\n    .getApplicationsState()\n    .then(state => {\n      set(state);\n    })\n    .catch((err: unknown) => console.error(`Error getting applications state:`, err));\n  return () => {\n    sub.unsubscribe();\n  };\n});\n"
  },
  {
    "path": "packages/frontend/src/stores/catalog.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Readable } from 'svelte/store';\nimport { readable } from 'svelte/store';\nimport { MSG_NEW_CATALOG_STATE } from '@shared/Messages';\nimport { rpcBrowser, studioClient } from '/@/utils/client';\nimport type { ApplicationCatalog } from '@shared/models/IApplicationCatalog';\n\nconst emptyCatalog = {\n  categories: [],\n  models: [],\n  recipes: [],\n};\n\nexport const catalog: Readable<ApplicationCatalog> = readable<ApplicationCatalog>(emptyCatalog, set => {\n  const sub = rpcBrowser.subscribe(MSG_NEW_CATALOG_STATE, msg => {\n    set(msg);\n  });\n  // Initialize the store manually\n  studioClient\n    .getCatalog()\n    .then(state => {\n      set(state);\n    })\n    .catch((err: unknown) => console.error('Error getting catalog:', err));\n  return () => {\n    sub.unsubscribe();\n  };\n});\n"
  },
  {
    "path": "packages/frontend/src/stores/containerProviderConnections.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { MSG_PODMAN_CONNECTION_UPDATE } from '@shared/Messages';\nimport { RPCReadable } from '/@/stores/rpcReadable';\nimport { studioClient } from '/@/utils/client';\nimport type { ContainerProviderConnectionInfo } from '@shared/models/IContainerConnectionInfo';\n\nexport const containerProviderConnections = RPCReadable<ContainerProviderConnectionInfo[]>(\n  [],\n  MSG_PODMAN_CONNECTION_UPDATE,\n  studioClient.getContainerProviderConnection,\n);\n"
  },
  {
    "path": "packages/frontend/src/stores/conversations.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Readable } from 'svelte/store';\nimport { readable } from 'svelte/store';\nimport { MSG_CONVERSATIONS_UPDATE } from '@shared/Messages';\nimport { rpcBrowser, studioClient } from '/@/utils/client';\nimport type { Conversation } from '@shared/models/IPlaygroundMessage';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport type { InferenceType } from '@shared/models/IInference';\nimport { toInferenceType } from '@shared/models/IInference';\n\nexport interface ConversationWithBackend extends Conversation {\n  backend: InferenceType;\n}\n\n// RPCReadable cannot be used here, as it is doing some debouncing, and we want\n// to get the conversation as soon as the tokens arrive here, instead getting them by packets\nexport const conversations: Readable<ConversationWithBackend[]> = readable<ConversationWithBackend[]>([], set => {\n  const sub = rpcBrowser.subscribe(MSG_CONVERSATIONS_UPDATE, conversations => {\n    setWithBackend(set, conversations);\n  });\n  // Initialize the store manually\n  studioClient\n    .getPlaygroundConversations()\n    .then(state => {\n      setWithBackend(set, state);\n    })\n    .catch((err: unknown) => console.error(`Error getting playground conversations:`, err));\n  return () => {\n    sub.unsubscribe();\n  };\n});\n\nfunction setWithBackend(set: (value: ConversationWithBackend[]) => void, conversations: Conversation[]): void {\n  studioClient\n    .getModelsInfo()\n    .then(modelsInfo => {\n      const conversationsWithBackend: ConversationWithBackend[] = conversations.map(conversation => ({\n        ...conversation,\n        backend: getModelBackend(modelsInfo, conversation.modelId),\n      }));\n      set(conversationsWithBackend);\n    })\n    .catch((err: unknown) => {\n      console.error('error getting models info', String(err));\n    });\n}\n\nfunction getModelBackend(modelsInfo: ModelInfo[], modelId: string): InferenceType {\n  const backend = modelsInfo.find(modelInfo => modelInfo.id === modelId)?.backend;\n  return toInferenceType(backend);\n}\n"
  },
  {
    "path": "packages/frontend/src/stores/extensionConfiguration.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Readable } from 'svelte/store';\nimport { readable } from 'svelte/store';\nimport { MSG_CONFIGURATION_UPDATE } from '@shared/Messages';\nimport { rpcBrowser, studioClient } from '/@/utils/client';\nimport type { ExtensionConfiguration } from '@shared/models/IExtensionConfiguration';\n\nexport const configuration: Readable<ExtensionConfiguration | undefined> = readable<ExtensionConfiguration>(\n  undefined,\n  set => {\n    const sub = rpcBrowser.subscribe(MSG_CONFIGURATION_UPDATE, msg => {\n      set(msg);\n    });\n    // Initialize the store manually\n    studioClient\n      .getExtensionConfiguration()\n      .then(state => {\n        set(state);\n      })\n      .catch((err: unknown) => console.error(`Error getting extension configuration:`, err));\n    return () => {\n      sub.unsubscribe();\n    };\n  },\n);\n"
  },
  {
    "path": "packages/frontend/src/stores/inferenceServers.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { RPCReadable } from '/@/stores/rpcReadable';\nimport { MSG_INFERENCE_SERVERS_UPDATE } from '@shared/Messages';\nimport { studioClient } from '/@/utils/client';\nimport type { InferenceServer } from '@shared/models/IInference';\n\nexport const inferenceServers = RPCReadable<InferenceServer[]>(\n  [],\n  MSG_INFERENCE_SERVERS_UPDATE,\n  studioClient.getInferenceServers,\n);\n"
  },
  {
    "path": "packages/frontend/src/stores/instructlabSessions.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport { RPCReadable } from '/@/stores/rpcReadable';\nimport { MSG_INSTRUCTLAB_SESSIONS_UPDATE } from '@shared/Messages';\nimport { instructlabClient } from '/@/utils/client';\nimport type { InstructlabSession } from '@shared/models/instructlab/IInstructlabSession';\n\nexport const instructlabSessions = RPCReadable<InstructlabSession[]>(\n  [],\n  MSG_INSTRUCTLAB_SESSIONS_UPDATE,\n  instructlabClient.getIsntructlabSessions,\n);\n"
  },
  {
    "path": "packages/frontend/src/stores/localRepositories.ts",
    "content": "import type { Readable } from 'svelte/store';\nimport { MSG_LOCAL_REPOSITORY_UPDATE } from '@shared/Messages';\nimport { studioClient } from '/@/utils/client';\nimport type { LocalRepository } from '@shared/models/ILocalRepository';\nimport { RPCReadable } from '/@/stores/rpcReadable';\n\nexport const localRepositories: Readable<LocalRepository[]> = RPCReadable<LocalRepository[]>(\n  [],\n  MSG_LOCAL_REPOSITORY_UPDATE,\n  studioClient.getLocalRepositories,\n);\n"
  },
  {
    "path": "packages/frontend/src/stores/modelsInfo.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { afterEach, beforeEach, expect, test, vi } from 'vitest';\nimport { rpcBrowser } from '../utils/client';\nimport type { Unsubscriber } from 'svelte/store';\nimport { modelsInfo } from './modelsInfo';\nimport { type RpcChannel, createRpcChannel, type Listener, clearRpcChannelList } from '@shared/messages/MessageProxy';\nimport { MSG_NEW_MODELS_STATE } from '@shared/Messages';\n\nconst mocks = vi.hoisted(() => {\n  return {\n    getModelsInfoMock: vi.fn().mockResolvedValue([]),\n  };\n});\n\nvi.mock('../utils/client', async () => {\n  const subscriber = new Map();\n  const invokeMethod = (msgId: string, _: unknown[]): void => {\n    const f = subscriber.get(msgId);\n    f();\n  };\n  const subscribeMethod = (rpcChannel: RpcChannel<unknown>, f: Listener<unknown>): unknown => {\n    subscriber.set(rpcChannel.name, f);\n    return {\n      unsubscribe: (): void => {\n        subscriber.clear();\n      },\n    };\n  };\n  const getProxyMethod = (_: unknown): unknown => {\n    return {\n      send: invokeMethod,\n    };\n  };\n  const rpcBrowser = {\n    getProxy: getProxyMethod,\n    invoke: invokeMethod,\n    subscribe: subscribeMethod,\n  };\n  return {\n    rpcBrowser,\n    studioClient: {\n      getModelsInfo: mocks.getModelsInfoMock,\n    },\n  };\n});\n\nlet unsubscriber: Unsubscriber | undefined;\nbeforeEach(() => {\n  vi.clearAllMocks();\n  clearRpcChannelList();\n  unsubscriber = modelsInfo.subscribe(_ => {});\n});\n\nafterEach(() => {\n  if (unsubscriber) {\n    unsubscriber();\n    unsubscriber = undefined;\n  }\n});\n\ntest('check getLocalModels is called at subscription', async () => {\n  expect(mocks.getModelsInfoMock).toHaveBeenCalledOnce();\n});\n\ntest('check getLocalModels is called twice if event is fired (one at init, one for the event)', async () => {\n  type MyModel = {\n    send: (message: string) => Promise<void>;\n  };\n  const channel = createRpcChannel<MyModel>(MSG_NEW_MODELS_STATE.name);\n  const proxy = rpcBrowser.getProxy<MyModel>(channel);\n  await proxy.send(MSG_NEW_MODELS_STATE.name);\n  // wait for the timeout in the debouncer\n  await new Promise(resolve => setTimeout(resolve, 600));\n  expect(mocks.getModelsInfoMock).toHaveBeenCalledTimes(2);\n});\n"
  },
  {
    "path": "packages/frontend/src/stores/modelsInfo.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { ModelInfo } from '@shared/models/IModelInfo';\nimport { studioClient } from '/@/utils/client';\nimport { MSG_NEW_MODELS_STATE } from '@shared/Messages';\nimport { RPCReadable } from './rpcReadable';\n\nexport const modelsInfo = RPCReadable<ModelInfo[]>([], MSG_NEW_MODELS_STATE, studioClient.getModelsInfo);\n"
  },
  {
    "path": "packages/frontend/src/stores/rpcReadable.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { beforeEach, expect, test, vi } from 'vitest';\nimport { clearRpcChannelList, createRpcChannel, RpcBrowser } from '@shared/messages/MessageProxy';\nimport { RPCReadable } from './rpcReadable';\nimport { studioClient, rpcBrowser } from '../utils/client';\nimport type { ModelInfo } from '@shared/models/IModelInfo';\n\nconst mocks = vi.hoisted(() => {\n  return {\n    getModelsInfoMock: vi.fn().mockResolvedValue([]),\n  };\n});\n\nvi.mock('../utils/client', async () => {\n  const window = {\n    addEventListener: (_: string, _f: (message: unknown) => void) => {},\n  } as unknown as Window;\n\n  const api = {\n    postMessage: (message: unknown) => {\n      if (message && typeof message === 'object' && 'channel' in message) {\n        const f = rpcBrowser.subscribers.get(message.channel as string);\n        f?.forEach(listener => listener(''));\n      }\n    },\n  } as unknown as PodmanDesktopApi;\n\n  const rpcBrowser = new RpcBrowser(window, api);\n\n  return {\n    rpcBrowser: rpcBrowser,\n    studioClient: {\n      getModelsInfo: mocks.getModelsInfoMock,\n    },\n  };\n});\n\nbeforeEach(() => {\n  vi.clearAllMocks();\n});\n\ntest('check updater is called once at subscription', async () => {\n  const rpcChannel = createRpcChannel<string[]>('event1');\n  const rpcWritable = RPCReadable<string[]>([], rpcChannel, async () => {\n    await studioClient.getModelsInfo();\n    return Promise.resolve(['']);\n  });\n  rpcWritable.subscribe(_ => {});\n  expect(mocks.getModelsInfoMock).toHaveBeenCalledOnce();\n});\n\ntest('check updater is called twice if there is one event fired', async () => {\n  const channelModel = createRpcChannel<string[]>('event2');\n  clearRpcChannelList();\n  const channel = createRpcChannel<Update>('event2');\n  type Update = {\n    event: () => Promise<string[]>;\n  };\n\n  const rpcWritable = RPCReadable<string[]>([], channelModel, () => {\n    console.log('being called');\n    studioClient.getModelsInfo().catch((err: unknown) => console.error(err));\n    return Promise.resolve(['']);\n  });\n  rpcWritable.subscribe(_ => {});\n\n  // get proxy\n  const proxy = rpcBrowser.getProxy<Update>(channel);\n\n  proxy.event().catch((err: unknown) => console.error(err));\n  // wait for the timeout in the debouncer\n  await new Promise(resolve => setTimeout(resolve, 600));\n  expect(mocks.getModelsInfoMock).toHaveBeenCalledTimes(2);\n});\n\ntest('check updater is called only twice because of the debouncer if there is more than one event in a row', async () => {\n  const channelModel = createRpcChannel<ModelInfo[]>('event3');\n  clearRpcChannelList();\n  const channel = createRpcChannel<Update>('event3');\n  type Update = {\n    event: () => Promise<string[]>;\n  };\n\n  const rpcWritable = RPCReadable<ModelInfo[]>([], channelModel, () => {\n    return studioClient.getModelsInfo();\n  });\n  rpcWritable.subscribe(_ => {});\n\n  // get proxy\n  const proxy = rpcBrowser.getProxy<Update>(channel);\n\n  proxy.event().catch((err: unknown) => console.error(err));\n  proxy.event().catch((err: unknown) => console.error(err));\n  proxy.event().catch((err: unknown) => console.error(err));\n  proxy.event().catch((err: unknown) => console.error(err));\n  // wait for the timeout in the debouncer\n  await new Promise(resolve => setTimeout(resolve, 600));\n  expect(mocks.getModelsInfoMock).toHaveBeenCalledTimes(2);\n});\n"
  },
  {
    "path": "packages/frontend/src/stores/rpcReadable.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { writable, type Subscriber, type Unsubscriber, type Readable } from 'svelte/store';\nimport { rpcBrowser } from '../utils/client';\nimport type { RpcChannel, Subscriber as SharedSubscriber } from '@shared/messages/MessageProxy';\n\nexport function RPCReadable<T>(\n  value: T,\n  // The event channel used to subscribe to a webview postMessage event\n  subscriptionEventChannel: RpcChannel<T>,\n  // The initialization function that will be called to update the store at creation.\n  // For example, you can pass in a custom function such as \"getPullingStatuses\".\n  updater: () => Promise<T>,\n): Readable<T> {\n  let timeoutId: NodeJS.Timeout | undefined;\n  let timeoutThrottle: NodeJS.Timeout | undefined;\n\n  const debouncedUpdater = debounce(updater);\n  const origWritable = writable(value);\n\n  function subscribe(this: void, run: Subscriber<T>, invalidate?: () => void): Unsubscriber {\n    const rcpSubscribes: SharedSubscriber[] = [];\n\n    const rcpSubscribe = rpcBrowser.subscribe<T>(subscriptionEventChannel, (_: unknown) => {\n      debouncedUpdater()\n        .then(v => origWritable.set(v))\n        .catch((e: unknown) => console.error('failed at updating store', String(e)));\n    });\n    rcpSubscribes.push(rcpSubscribe);\n\n    updater()\n      .then(v => origWritable.set(v))\n      .catch((e: unknown) => console.error('failed at init store', String(e)));\n\n    const unsubscribe = origWritable.subscribe(run, invalidate);\n    return () => {\n      rcpSubscribes.forEach(r => r.unsubscribe());\n      unsubscribe();\n    };\n  }\n\n  function debounce(func: () => Promise<T>): () => Promise<T> {\n    return () =>\n      new Promise<T>(resolve => {\n        if (timeoutId) {\n          clearTimeout(timeoutId);\n          timeoutId = undefined;\n        }\n\n        // throttle timeout, ask after 5s to update anyway to have at least UI being refreshed every 5s if there is a lot of events\n        // because debounce will defer all the events until the end so it's not so nice from UI side.\n        // eslint-disable-next-line sonarjs/no-nested-functions\n        timeoutThrottle ??= setTimeout(() => {\n          if (timeoutId) {\n            clearTimeout(timeoutId);\n            timeoutId = undefined;\n          }\n          resolve(func());\n        }, 5000);\n\n        // eslint-disable-next-line sonarjs/no-nested-functions\n        timeoutId = setTimeout(() => {\n          if (timeoutThrottle) {\n            clearTimeout(timeoutThrottle);\n            timeoutThrottle = undefined;\n          }\n          resolve(func());\n        }, 500);\n      });\n  }\n\n  return {\n    subscribe,\n  };\n}\n"
  },
  {
    "path": "packages/frontend/src/stores/snippetLanguages.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { Readable } from 'svelte/store';\nimport { MSG_SUPPORTED_LANGUAGES_UPDATE } from '@shared/Messages';\nimport { studioClient } from '/@/utils/client';\nimport { RPCReadable } from '/@/stores/rpcReadable';\nimport type { Language } from 'postman-code-generators';\n\nexport const snippetLanguages: Readable<Language[]> = RPCReadable<Language[]>(\n  [],\n  MSG_SUPPORTED_LANGUAGES_UPDATE,\n  studioClient.getSnippetLanguages,\n);\n"
  },
  {
    "path": "packages/frontend/src/stores/tasks.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { studioClient } from '/@/utils/client';\nimport { MSG_TASKS_UPDATE } from '@shared/Messages';\nimport { RPCReadable } from './rpcReadable';\nimport type { Task } from '@shared/models/ITask';\n\nexport const tasks = RPCReadable<Task[]>([], MSG_TASKS_UPDATE, studioClient.getTasks);\n"
  },
  {
    "path": "packages/frontend/src/utils/categoriesUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { IconDefinition } from '@fortawesome/free-regular-svg-icons';\n\nimport { faAlignLeft, faEdit, faImages, faQuestion } from '@fortawesome/free-solid-svg-icons';\n\nexport const getIcon = (scope: string | undefined): IconDefinition => {\n  switch (scope) {\n    case 'natural-language-processing':\n      return faAlignLeft;\n    case 'generator':\n      return faEdit;\n    case 'computer-vision':\n      return faImages;\n    default:\n      return faQuestion;\n  }\n};\n"
  },
  {
    "path": "packages/frontend/src/utils/client.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { StudioAPI } from '@shared/StudioAPI';\nimport { STUDIO_API_CHANNEL } from '@shared/StudioAPI';\nimport { RpcBrowser } from '@shared/messages/MessageProxy';\nimport type { RouterState } from '/@/models/IRouterState';\nimport type { InstructlabAPI } from '@shared/InstructlabAPI';\nimport { INSTRUCTLAB_API_CHANNEL } from '@shared/InstructlabAPI';\nimport { LLAMA_STACK_API_CHANNEL, type LlamaStackAPI } from '@shared/LlamaStackAPI';\n\nconst podmanDesktopApi = acquirePodmanDesktopApi();\nexport const rpcBrowser: RpcBrowser = new RpcBrowser(window, podmanDesktopApi);\n\nexport const studioClient: StudioAPI = rpcBrowser.getProxy<StudioAPI>(STUDIO_API_CHANNEL, {\n  noTimeoutMethods: ['openDialog'],\n});\nexport const instructlabClient: InstructlabAPI = rpcBrowser.getProxy<InstructlabAPI>(INSTRUCTLAB_API_CHANNEL);\nexport const llamaStackClient: LlamaStackAPI = rpcBrowser.getProxy<LlamaStackAPI>(LLAMA_STACK_API_CHANNEL, {\n  noTimeoutMethods: ['requestcreateLlamaStackContainerss'],\n});\n\nexport const saveRouterState = (state: RouterState): void => {\n  podmanDesktopApi.setState(state);\n};\n\nconst isRouterState = (value: unknown): value is RouterState => {\n  return typeof value === 'object' && !!value && 'url' in value;\n};\n\nexport async function getRouterState(): Promise<RouterState> {\n  const route: string | undefined = await studioClient.readRoute();\n  if (route) {\n    return {\n      url: route,\n    };\n  }\n\n  const state = podmanDesktopApi.getState();\n  if (isRouterState(state)) return state;\n  return { url: '/' };\n}\n\nObject.defineProperty(window, 'studioClient', {\n  value: studioClient,\n});\n"
  },
  {
    "path": "packages/frontend/src/utils/dimensions.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport moment from 'moment';\nimport humanizeDuration from 'humanize-duration';\n\nexport function humanizeAge(created: number): string {\n  // get start time in ms (using unix timestamp for the created)\n  const age = moment().diff(moment.unix(created));\n  // make it human friendly\n  return humanizeDuration(age, { round: true, largest: 1 });\n}\n"
  },
  {
    "path": "packages/frontend/src/utils/fileUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { LocalModelImportInfo } from '@shared/models/ILocalModelInfo';\n\n/**\n * This would only work in Electron as the `path` property is\n * not available is browser.\n */\nexport function getFilesFromDropEvent(event: DragEvent): LocalModelImportInfo[] {\n  if (!event.dataTransfer) return [];\n  const output: LocalModelImportInfo[] = [];\n\n  let files: File[];\n  if (event.dataTransfer.files.length) {\n    files = Array.from(event.dataTransfer.files);\n  } else {\n    files = Array.from(event.dataTransfer.items)\n      .map(item => item.getAsFile())\n      .filter((item): item is File => !!item);\n  }\n  for (const file of files) {\n    if (file && 'path' in file && typeof file.path === 'string') output.push({ path: file.path, name: file.name });\n  }\n  return output;\n}\n"
  },
  {
    "path": "packages/frontend/src/utils/localRepositoriesUtils.ts",
    "content": "import type { LocalRepository } from '@shared/models/ILocalRepository';\n\nexport const findLocalRepositoryByRecipeId = (\n  store: LocalRepository[],\n  recipeId: string | undefined,\n): LocalRepository | undefined => {\n  if (!recipeId) return undefined;\n  return store.find(local => !!local.labels && 'recipe-id' in local.labels && local.labels['recipe-id'] === recipeId);\n};\n"
  },
  {
    "path": "packages/frontend/src/utils/printers.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport function displayPorts(ports: number[]): string {\n  if (!ports || ports.length === 0) {\n    return '';\n  }\n  if (ports.length === 1) {\n    return `PORT ${ports[0]}`;\n  } else {\n    return `PORTS ${ports.join(', ')}`;\n  }\n}\n"
  },
  {
    "path": "packages/frontend/src/utils/taskUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Task } from '@shared/models/ITask';\n\nexport const filterByLabel = (tasks: Task[], requestedLabels: { [key: string]: string }): Task[] => {\n  return tasks.filter(task => {\n    const labels = task.labels;\n    if (labels === undefined) return false;\n\n    for (const [key, value] of Object.entries(requestedLabels)) {\n      if (!(key in labels) || labels[key] !== value) return false;\n    }\n\n    return true;\n  });\n};\n"
  },
  {
    "path": "packages/frontend/src/utils/versionControlUtils.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nconst GITHUB_PREFIX = 'https://github.com/';\n\nexport const getDisplayName = (link: string | undefined): string => {\n  if (link === undefined) return '?';\n\n  if (link.startsWith(GITHUB_PREFIX)) return link.substring(GITHUB_PREFIX.length);\n\n  return link;\n};\n"
  },
  {
    "path": "packages/frontend/tailwind.config.cjs",
    "content": "/**********************************************************************\n * Copyright (C) 2022-2023 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nconst defaultTheme = require('tailwindcss/defaultTheme');\nconst tailwindColors = require('tailwindcss/colors')\nconst tailwindTypography = require('@tailwindcss/typography')\n\nmodule.exports = {\n  content: [\n    'index.html',\n    'src/**/*.{svelte,ts,css}',\n    '../../node_modules/@podman-desktop/ui-svelte/dist/**/*.{svelte,ts,css}',\n  ],\n  darkMode: 'class',\n  theme: {\n    fontSize: {\n      'xs': '10px',\n      'sm': '11px',\n      'base': '12px',\n      'lg': '14px',\n      'xl': '16px',\n      '2xl': '18px',\n      '3xl': '20px',\n      '4xl': '24px',\n      '5xl': '30px',\n      '6xl': '36px',\n    },\n    extend: {\n      boxShadow: {\n        \"titlebar\": 'inset 0px -1px 0px 0 rgb(54 54 61 / 0.6)', // highlight for bottom of titlebar\n\t\"pageheader\": 'inset 0 0px 10px 0 rgb(0 0 0 / 0.4)',\n\t\"nav\": 'inset 7px -4px 6px 0 rgb(0 0 0 / 0.15)',\n      },\n      transitionProperty: {\n        width: 'width',\n      },\n      width: {\n        'leftnavbar': '54px',\n        'leftsidebar': '225px',\n      },\n      minWidth: {\n        'leftnavbar': '54px',\n        'leftsidebar': '225px',\n      },\n      typography: (theme) => ({\n        DEFAULT: {\n          css: {\n            color: 'var(--pd-details-body-text)',\n            '--tw-prose-body': 'var(--pd-details-body-text)',\n            '--tw-prose-bold': 'var(--pd-details-body-text)',\n            '--tw-prose-headings': 'var(--pd-details-body-text)',\n            '--tw-prose-quotes': 'var(--pd-details-body-text)',\n            '--tw-prose-hr': 'var(--pd-details-body-text)',\n            '--tw-prose-links': 'var(--pd-link)',\n            '--tw-prose-code': 'var(--pd-details-body-text)',\n          },\n        },\n      }),\n    },\n    colors: {\n      // The \"status\" colours to be used for Podman and Kubernetes containers\n      // these can be referenced by in the form of \"bg-status-running\" or \"text-status-running\"\n      'status': {\n        // Podman & Kubernetes\n        'running': tailwindColors.green[400],\n\n        // Kubernetes only\n        'terminated': tailwindColors.red[500],\n        'waiting': tailwindColors.amber[600],\n\n        // Podman only\n\n        // Stopped & Exited are the same color / same thing in the eyes of statuses\n        'stopped': tailwindColors.gray[300],\n        'exited': tailwindColors.gray[300],\n\n        // \"Warning\"\n        'paused': tailwindColors.amber[600],\n        'degraded': tailwindColors.amber[700],\n\n        // Others\n        'created': tailwindColors.green[300],\n        'dead': tailwindColors.red[500],\n\n        // If we don't know the status, use gray\n        'unknown': tailwindColors.gray[100],\n      },\n      'charcoal': {\n         50: '#767676',\n        100: '#707073',\n        200: '#5c5c5c',\n        300: '#464649',\n        400: '#4a4b4f',\n        500: '#36363d',\n        600: '#27272a',\n        700: '#222222',\n        800: '#18181b',\n        900: '#0f0f11',\n      },\n      'gray': {\n         50: '#f9fafb',\n        100: '#f6f6f6',\n        200: '#efefef',\n        300: '#e4e4e4',\n        400: '#d1d1d1',\n        500: '#c8c8c8',\n        600: '#b4b4b4',\n        700: '#aaabac',\n        800: '#9a9a9a',\n        900: '#818181',\n      },\n      'purple': {\n         50: '#f7f3ff',\n        100: '#efe9fe',\n        200: '#e2d6fe',\n        300: '#bfa7f6',\n        400: '#ad8bfa',\n        500: '#8b5cf6',\n        600: '#6d48bf',\n        700: '#6234b1',\n        800: '#4d2d87',\n        900: '#37255d',\n      },\n      'dustypurple': {\n         50: '#f2f2fb',\n        100: '#e7e8f8',\n        200: '#d3d3f2',\n        300: '#b9b8e9',\n        400: '#a09adf',\n        500: '#8f81d3',\n        600: '#8772c7',\n        700: '#6d57ab',\n        800: '#59498a',\n        900: '#4a406f',\n      },\n      'fuschia': {\n         50: '#fdf2ff',\n        100: '#f9e3ff',\n        200: '#f4c6ff',\n        300: '#f099ff',\n        400: '#e85dff',\n        500: '#d721ff',\n        600: '#c200ff',\n        700: '#a200cf',\n        800: '#8600a9',\n        900: '#710689',\n      },\n      'sky': {\n         50: '#f2f8fd',\n        100: '#e5eff9',\n        200: '#c4def3',\n        300: '#90c3e9',\n        400: '#51a2da',\n        500: '#2f88c8',\n        600: '#206ca9',\n        700: '#1b5789',\n        800: '#1a4a72',\n        900: '#1b3f5f',\n      },\n      'green': {\n         50: '#f0f9f0',\n        100: '#ddefdc',\n        200: '#bbdfbb',\n        300: '#8ec792',\n        400: '#64ad6c',\n        500: '#3c8d47',\n        600: '#2b7037',\n        700: '#225a2d',\n        800: '#1d4825',\n        900: '#193b20',\n      },\n      'red': {\n         50: '#fff4f1',\n        100: '#ffe7e1',\n        200: '#ffd1c7',\n        300: '#ffb3a1',\n        400: '#ff866a',\n        500: '#f86847',\n        600: '#e5421d',\n        700: '#c13414',\n        800: '#9f2f15',\n        900: '#842c18',\n      },\n      'amber': {\n         50: tailwindColors.amber[50],\n        100: tailwindColors.amber[100],\n        200: tailwindColors.amber[200],\n        300: tailwindColors.amber[300],\n        400: tailwindColors.amber[400],\n        500: tailwindColors.amber[500],\n        600: tailwindColors.amber[600],\n        700: tailwindColors.amber[700],\n        800: tailwindColors.amber[800],\n        900: tailwindColors.amber[900],\n      },\n      transparent: 'transparent',\n      black: '#000',\n      white: '#fff',\n      // The remaining colors below are not part of our palette and are only here\n      // to maintain existing code. No new use.\n      'slate': {\n        400: tailwindColors.slate[400],\n        800: tailwindColors.slate[800],\n      },\n      'zinc': {\n        100: tailwindColors.zinc[100],\n        200: tailwindColors.zinc[200],\n        300: tailwindColors.zinc[300],\n        400: tailwindColors.zinc[400],\n        600: tailwindColors.zinc[600],\n        700: tailwindColors.zinc[700],\n      },\n      'indigo': { // website only\n        500: tailwindColors.indigo[500],\n        600: tailwindColors.indigo[600],\n      },\n      'violet': {\n         50: tailwindColors.violet[50],\n        400: tailwindColors.violet[400],\n        500: tailwindColors.violet[500],\n        600: tailwindColors.violet[600],\n        700: tailwindColors.violet[700],\n        800: tailwindColors.violet[800],\n      },\n    },\n  },\n  plugins: [\n    tailwindTypography\n  ],\n};\n"
  },
  {
    "path": "packages/frontend/tsconfig.json",
    "content": "{\n  \"extends\": \"@tsconfig/svelte/tsconfig.json\",\n  \"compilerOptions\": {\n    \"target\": \"esnext\",\n    \"module\": \"esnext\",\n    \"strict\": true,\n    \"noImplicitOverride\": true,\n    \"noUnusedLocals\": true,\n    \"noImplicitReturns\": true,\n    \"resolveJsonModule\": true,\n    \"preserveValueImports\": false,\n    \"baseUrl\": \".\",\n    \"paths\": {\n      \"/@/*\": [\"./src/*\"],\n      \"@shared/*\": [\"../shared/src/*\"]\n    },\n    /**\n     * Typecheck JS in `.svelte` and `.js` files by default.\n     * Disable checkJs if you'd like to use dynamic types in JS.\n     * Note that setting allowJs false does not prevent the use\n     * of JS in `.svelte` files.\n     */\n    \"allowJs\": true,\n    \"checkJs\": true,\n    \"types\": [\"@testing-library/jest-dom\", \"vite/client\"]\n  },\n  \"include\": [\n    \"src/**/*.d.ts\",\n    \"src/**/*.ts\",\n    \"src/**/*.js\",\n    \"src/**/*.svelte\",\n    \"../../types/**/*.d.ts\",\n    \"../shared/**/*.ts\"\n  ]\n}\n"
  },
  {
    "path": "packages/frontend/vite.config.js",
    "content": "/**********************************************************************\n * Copyright (C) 2023-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\n/* eslint-env node */\nimport { join } from 'path';\nimport * as path from 'path';\nimport { svelte } from '@sveltejs/vite-plugin-svelte';\nimport { svelteTesting } from '@testing-library/svelte/vite';\nimport { defineConfig } from 'vite';\nimport { fileURLToPath } from 'url';\nimport tailwindcss from '@tailwindcss/vite';\n\nlet filename = fileURLToPath(import.meta.url);\nconst PACKAGE_ROOT = path.dirname(filename);\n\n// https://vitejs.dev/config/\nexport default defineConfig({\n  mode: process.env.MODE,\n  root: PACKAGE_ROOT,\n  resolve: {\n    alias: {\n      '/@/': join(PACKAGE_ROOT, 'src') + '/',\n      '@shared/': join(PACKAGE_ROOT, '../shared', 'src') + '/',\n    },\n  },\n  plugins: [tailwindcss(), svelte({ hot: !process.env.VITEST }), svelteTesting()],\n  optimizeDeps: {\n    exclude: [],\n  },\n  test: {\n    include: ['src/**/*.{test,spec}.{js,mjs,cjs,ts,mts,cts,jsx,tsx}'],\n    globals: true,\n    environment: 'jsdom',\n    alias: [\n      { find: '@testing-library/svelte', replacement: '@testing-library/svelte/svelte5' },\n      {\n        find: /^monaco-editor$/,\n        replacement: `${PACKAGE_ROOT}/../../node_modules/monaco-editor/esm/vs/editor/editor.api`,\n      },\n    ],\n    deps: {\n      inline: [],\n    },\n  },\n  base: '',\n  server: {\n    fs: {\n      strict: true,\n    },\n  },\n  build: {\n    sourcemap: true,\n    outDir: '../backend/media',\n    assetsDir: '.',\n\n    emptyOutDir: true,\n    reportCompressedSize: false,\n  },\n});\n"
  },
  {
    "path": "packages/shared/__mocks__/@podman-desktop/api.js",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\n/**\n * Mock the extension API for vitest.\n * This file is referenced from vitest.config.js file.\n */\nconst plugin = {};\nmodule.exports = plugin;\n"
  },
  {
    "path": "packages/shared/src/InstructlabAPI.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { createRpcChannel } from './messages/MessageProxy';\nimport type { InstructlabSession } from './models/instructlab/IInstructlabSession';\nimport type { InstructlabContainerConfiguration } from './models/instructlab/IInstructlabContainerConfiguration';\n\nexport const INSTRUCTLAB_API_CHANNEL = createRpcChannel<InstructlabAPI>('InstructlabAPI');\nexport interface InstructlabAPI {\n  /**\n   * Get sessions of InstructLab tuning\n   */\n  getIsntructlabSessions(): Promise<InstructlabSession[]>;\n\n  /**\n   * Start a container for InstructLab\n   *\n   * @param config\n   */\n  requestCreateInstructlabContainer(config: InstructlabContainerConfiguration): Promise<void>;\n\n  routeToInstructLabContainerTerminal(containerId: string): Promise<void>;\n\n  getInstructlabContainerId(): Promise<string | undefined>;\n}\n"
  },
  {
    "path": "packages/shared/src/LlamaStackAPI.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { createRpcChannel } from './messages/MessageProxy';\nimport type { LlamaStackContainerConfiguration } from './models/llama-stack/LlamaStackContainerConfiguration';\nimport type { LlamaStackContainers } from './models/llama-stack/LlamaStackContainerInfo';\n\nexport const LLAMA_STACK_API_CHANNEL = createRpcChannel<LlamaStackAPI>('LlamaStackAPI');\nexport interface LlamaStackAPI {\n  requestcreateLlamaStackContainerss(config: LlamaStackContainerConfiguration): Promise<void>;\n\n  routeToLlamaStackContainerTerminal(containerId: string): Promise<void>;\n\n  getLlamaStackContainersInfo(): Promise<LlamaStackContainers | undefined>;\n}\n"
  },
  {
    "path": "packages/shared/src/Messages.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Language } from 'postman-code-generators';\nimport type { McpSettings } from '@shared/models/McpSettings';\nimport { createRpcChannel } from './messages/MessageProxy';\nimport type { Task } from './models/ITask';\nimport type { ModelInfo } from './models/IModelInfo';\nimport type { ContainerProviderConnectionInfo } from './models/IContainerConnectionInfo';\nimport type { InferenceServer } from './models/IInference';\nimport type { InstructlabSession } from './models/instructlab/IInstructlabSession';\nimport type { LocalRepository } from './models/ILocalRepository';\nimport type { Conversation } from './models/IPlaygroundMessage';\nimport type { ExtensionConfiguration } from './models/IExtensionConfiguration';\nimport type { ApplicationCatalog } from './models/IApplicationCatalog';\nimport type { ApplicationState } from './models/IApplicationState';\nimport type { IGPUInfo } from './models/IGPUInfo';\nimport type { StatsHistory } from '../../backend/src/managers/monitoringManager';\n\nexport const MSG_TASKS_UPDATE = createRpcChannel<Task[]>('tasks-update');\nexport const MSG_SUPPORTED_LANGUAGES_UPDATE = createRpcChannel<Language[]>('supported-languages-supported');\nexport const MSG_NEW_MODELS_STATE = createRpcChannel<ModelInfo[]>('new-models-state');\nexport const MSG_PODMAN_CONNECTION_UPDATE =\n  createRpcChannel<ContainerProviderConnectionInfo[]>('podman-connecting-update');\nexport const MSG_INFERENCE_SERVERS_UPDATE = createRpcChannel<InferenceServer[]>('inference-servers-update');\nexport const MSG_INSTRUCTLAB_SESSIONS_UPDATE = createRpcChannel<InstructlabSession[]>('instructlab-sessions-update');\nexport const MSG_LOCAL_REPOSITORY_UPDATE = createRpcChannel<LocalRepository[]>('local-repository-update');\nexport const MSG_CONVERSATIONS_UPDATE = createRpcChannel<Conversation[]>('conversations-update');\nexport const MSG_CONFIGURATION_UPDATE = createRpcChannel<ExtensionConfiguration>('configuration-update');\nexport const MSG_MCP_SERVERS_UPDATE = createRpcChannel<McpSettings>('mcp-servers-update');\nexport const MSG_NEW_CATALOG_STATE = createRpcChannel<ApplicationCatalog>('new-catalog-state');\nexport const MSG_APPLICATIONS_STATE_UPDATE = createRpcChannel<ApplicationState[]>('applications-state-update');\nexport const MSG_GPUS_UPDATE = createRpcChannel<IGPUInfo[]>('gpus-update');\nexport const MSG_MONITORING_UPDATE = createRpcChannel<StatsHistory[]>('monitoring-update');\nexport const MSG_NAVIGATION_ROUTE_UPDATE = createRpcChannel<string>('navigation-route-update');\n\n// array of model handler names\nexport const MSG_MODEL_HANDLERS_UPDATE = createRpcChannel<string[]>('model-handlers-update');\n// array of provider names\nexport const MSG_INFERENCE_PROVIDER_UPDATE = createRpcChannel<string[]>('inference-provider-update');\n"
  },
  {
    "path": "packages/shared/src/StudioAPI.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { ModelInfo } from './models/IModelInfo';\nimport type { InferenceType } from '@shared/models/IInference';\nimport type { ApplicationCatalog } from './models/IApplicationCatalog';\nimport type { OpenDialogOptions, Uri } from '@podman-desktop/api';\nimport type { ApplicationState } from './models/IApplicationState';\nimport type { Task } from './models/ITask';\nimport type { LocalRepository } from './models/ILocalRepository';\nimport type { InferenceServer } from './models/IInference';\nimport type { RequestOptions } from './models/RequestOptions';\nimport type { Language } from 'postman-code-generators';\nimport type { CreationInferenceServerOptions } from './models/InferenceServerConfig';\nimport type { ModelOptions } from './models/IModelOptions';\nimport type { Conversation } from './models/IPlaygroundMessage';\nimport type { LocalModelImportInfo } from './models/ILocalModelInfo';\nimport type {\n  CheckContainerConnectionResourcesOptions,\n  ContainerConnectionInfo,\n  ContainerProviderConnectionInfo,\n} from './models/IContainerConnectionInfo';\nimport type { ExtensionConfiguration } from './models/IExtensionConfiguration';\nimport type { RecipePullOptions } from './models/IRecipe';\nimport type { FilterRecipesResult, RecipeFilters } from './models/FilterRecipesResult';\nimport { createRpcChannel } from './messages/MessageProxy';\n\nexport const STUDIO_API_CHANNEL = createRpcChannel<StudioAPI>('StudioAPI');\nexport interface StudioAPI {\n  ping(): Promise<string>;\n  getCatalog(): Promise<ApplicationCatalog>;\n  filterRecipes(filters: RecipeFilters): Promise<FilterRecipesResult>;\n\n  // Application related methods\n  /**\n   * Clone a recipe\n   * @param recipeId\n   */\n  cloneApplication(recipeId: string): Promise<void>;\n\n  /**\n   * Allow the frontend to get a list of the container provider connections available.\n   * This object can be used in requestPullApplication\n   */\n  getContainerProviderConnection(): Promise<ContainerProviderConnectionInfo[]>;\n  /**\n   * Pull an application (clone, download model, build container, start pod)\n   *\n   * @return a promise with a tracking id used in each task labels\n   * @param options\n   */\n  requestPullApplication(options: RecipePullOptions): Promise<string>;\n  requestStopApplication(recipeId: string, modelId: string): Promise<void>;\n  requestStartApplication(recipeId: string, modelId: string): Promise<void>;\n  requestRemoveApplication(recipeId: string, modelId: string): Promise<void>;\n  requestRestartApplication(recipeId: string, modelId: string): Promise<void>;\n  requestOpenApplication(recipeId: string, modelId: string): Promise<void>;\n  getApplicationsState(): Promise<ApplicationState[]>;\n\n  openURL(url: string): Promise<boolean>;\n  openFile(file: string, recipeId?: string): Promise<boolean>;\n  openDialog(options?: OpenDialogOptions): Promise<Uri[] | undefined>;\n\n  /**\n   * Get the information of models saved locally into the user's directory\n   */\n  getModelsInfo(): Promise<ModelInfo[]>;\n\n  /**\n   * Given a modelId will return the model metadata\n   * @remark If the model is not available locally, a fetch request will be used to get its metadata from the header.\n   * @param modelId\n   */\n  getModelMetadata(modelId: string): Promise<Record<string, unknown>>;\n\n  /**\n   * Delete the folder containing the model from local storage\n   */\n  requestRemoveLocalModel(modelId: string): Promise<void>;\n\n  navigateToContainer(containerId: string): Promise<void>;\n  navigateToPod(podId: string): Promise<void>;\n  navigateToResources(): Promise<void>;\n  navigateToEditConnectionProvider(connectionName: string): Promise<void>;\n\n  telemetryLogUsage(eventName: string, data?: Record<string, unknown>): Promise<void>;\n  telemetryLogError(eventName: string, data?: Record<string, unknown>): Promise<void>;\n\n  getLocalRepositories(): Promise<LocalRepository[]>;\n\n  getTasks(): Promise<Task[]>;\n\n  /**\n   * Open the VSCode editor\n   * @param directory the directory to open the editor from\n   */\n  openVSCode(directory: string, recipeId?: string): Promise<void>;\n\n  /**\n   * Download a model from the catalog\n   * @param modelId the id of the model we want to download\n   */\n  downloadModel(modelId: string): Promise<void>;\n\n  /**\n   * Get inference servers\n   */\n  getInferenceServers(): Promise<InferenceServer[]>;\n\n  /**\n   * Get inference providers\n   */\n  getRegisteredProviders(): Promise<InferenceType[]>;\n\n  /**\n   * Request to start an inference server\n   * @param options The options to use\n   *\n   * @return a tracking identifier to follow progress\n   */\n  requestCreateInferenceServer(options: CreationInferenceServerOptions): Promise<string>;\n\n  /**\n   * Start an inference server\n   * @param containerId the container id of the inference server\n   */\n  startInferenceServer(containerId: string): Promise<void>;\n\n  /**\n   * Stop an inference server\n   * @param containerId the container id of the inference server\n   */\n  stopInferenceServer(containerId: string): Promise<void>;\n\n  /**\n   * Delete an inference server container\n   * @param containerIds ids of the container to delete\n   */\n  requestDeleteInferenceServer(...containerIds: string[]): Promise<void>;\n\n  /**\n   * Return a free random port on the host machine\n   */\n  getHostFreePort(): Promise<number>;\n\n  /**\n   * Submit a user input to the Playground linked to a conversation, model, and inference server\n   * @param containerId the container id of the inference server we want to use\n   * @param modelId the model to use\n   * @param conversationId the conversation to input the message in\n   * @param userInput the user input, e.g. 'What is the capital of France ?'\n   * @param options the options for the model, e.g. temperature\n   */\n  submitPlaygroundMessage(containerId: string, userInput: string, options?: ModelOptions): Promise<number>;\n\n  /**\n   * Given a conversation, update the system prompt.\n   * If none exists, it will create one, otherwise it will replace the content with the new one\n   * @param conversationId the conversation id to set the system id\n   * @param content the new system prompt to use\n   */\n  setPlaygroundSystemPrompt(conversationId: string, content: string | undefined): Promise<void>;\n\n  /**\n   * Return the conversations\n   */\n  getPlaygroundConversations(): Promise<Conversation[]>;\n\n  /**\n   * Get the extension configuration (preferences)\n   */\n  getExtensionConfiguration(): Promise<ExtensionConfiguration>;\n  updateExtensionConfiguration(update: Partial<ExtensionConfiguration>): Promise<void>;\n\n  /**\n   * Get the Podman Desktop version\n   */\n  getPodmanDesktopVersion(): Promise<string>;\n\n  /**\n   * Return the list of supported languages to generate code from.\n   */\n  getSnippetLanguages(): Promise<Language[]>;\n\n  /**\n   * return a code snippet as a string matching the arguments and options provided\n   * @param options the options for the request\n   * @param language the language to use\n   * @param variant the variant of the language\n   */\n  createSnippet(options: RequestOptions, language: string, variant: string): Promise<string>;\n\n  requestCreatePlayground(name: string, model: ModelInfo): Promise<string>;\n\n  /**\n   * Delete a conversation\n   * @param conversationId the conversation identifier that will be deleted\n   */\n  requestDeleteConversation(conversationId: string): Promise<void>;\n\n  /**\n   * Delete a local path\n   * @param path path to delete\n   */\n  requestDeleteLocalRepository(path: string): Promise<void>;\n\n  /**\n   * Request the cancellation of a token\n   * @param tokenId the id of the CancellationToken to cancel\n   */\n  requestCancelToken(tokenId: number): Promise<void>;\n\n  /**\n   * Import local models selected by user\n   * @param models list of local models to import\n   */\n  importModels(models: LocalModelImportInfo[]): Promise<void>;\n\n  /**\n   * Validate a LocalModelImportInfo\n   * throw an error if invalid\n   */\n  validateLocalModel(model: LocalModelImportInfo): Promise<void>;\n\n  /**\n   * Copy the provided content to the user clipboard\n   * @param content\n   */\n  copyToClipboard(content: string): Promise<void>;\n\n  /**\n   * Check if the running podman machine is running and has enough resources to execute task\n   * @param options\n   */\n  checkContainerConnectionStatusAndResources(\n    options: CheckContainerConnectionResourcesOptions,\n  ): Promise<ContainerConnectionInfo>;\n\n  /**\n   * This method is used by the frontend on reveal to get any potential navigation\n   * route it should use. This method has a side effect of removing the pending route after calling.\n   */\n  readRoute(): Promise<string | undefined>;\n}\n"
  },
  {
    "path": "packages/shared/src/messages/MessageProxy.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { test, expect, vi, describe, beforeEach, afterEach } from 'vitest';\nimport { createRpcChannel, RpcBrowser, RpcExtension } from './MessageProxy';\nimport type { Webview } from '@podman-desktop/api';\n\nlet webview: Webview;\nlet window: Window;\nlet api: PodmanDesktopApi;\n\nconst originalConsoleError = console.error;\n\nbeforeEach(() => {\n  let windowListener: (message: unknown) => void;\n  let webviewListener: (message: unknown) => void;\n  console.error = vi.fn();\n\n  webview = {\n    onDidReceiveMessage: (listener: (message: unknown) => void) => {\n      webviewListener = listener;\n    },\n    postMessage: async (message: unknown): Promise<void> => {\n      windowListener({ data: message } as MessageEvent);\n    },\n  } as unknown as Webview;\n\n  window = {\n    addEventListener: (channel: string, listener: (message: unknown) => void) => {\n      expect(channel).toBe('message');\n      windowListener = listener;\n    },\n  } as unknown as Window;\n\n  api = {\n    postMessage: (message: unknown) => {\n      webviewListener(message);\n    },\n  } as unknown as PodmanDesktopApi;\n});\n\nafterEach(() => {\n  console.error = originalConsoleError;\n});\n\ntest('init logic should be executing once', () => {\n  vi.spyOn(webview, 'onDidReceiveMessage');\n  const rpcExtension = new RpcExtension(webview);\n  rpcExtension.init();\n\n  expect(webview.onDidReceiveMessage).toHaveBeenCalledOnce();\n});\n\ntest('Test register channel with method without argument', async () => {\n  const rpcExtension = new RpcExtension(webview);\n  rpcExtension.init();\n  const rpcBrowser = new RpcBrowser(window, api);\n\n  type Ping = {\n    ping: () => Promise<string>;\n  };\n\n  const channel = createRpcChannel<Ping>('ping');\n\n  const pingImpl: Ping = {\n    ping: async (): Promise<string> => {\n      return 'pong';\n    },\n  };\n  rpcExtension.registerInstance(channel, pingImpl);\n\n  // get proxy\n  const proxy = rpcBrowser.getProxy<Ping>(channel);\n  expect(await proxy.ping()).toBe('pong');\n});\n\ntest('Test register channel one argument', async () => {\n  const rpcExtension = new RpcExtension(webview);\n  rpcExtension.init();\n  const rpcBrowser = new RpcBrowser(window, api);\n\n  type Double = {\n    double: (value: number) => Promise<number>;\n  };\n\n  const channel = createRpcChannel<Double>('double');\n\n  const doubleImpl: Double = {\n    double: async (value: number): Promise<number> => {\n      return value * 2;\n    },\n  };\n  rpcExtension.registerInstance(channel, doubleImpl);\n\n  // get proxy\n  const proxy = rpcBrowser.getProxy<Double>(channel);\n  expect(await proxy.double(4)).toBe(8);\n});\n\ntest('Test register channel multiple arguments', async () => {\n  const rpcExtension = new RpcExtension(webview);\n  rpcExtension.init();\n  const rpcBrowser = new RpcBrowser(window, api);\n\n  type Sum = {\n    sum: (...args: number[]) => Promise<number>;\n  };\n  const channel = createRpcChannel<Sum>('sum');\n\n  const sumImpl: Sum = {\n    sum: async (...args: number[]): Promise<number> => {\n      return args.reduce((prev, current) => prev + current, 0);\n    },\n  };\n\n  rpcExtension.registerInstance(channel, sumImpl);\n\n  // get proxy\n  const proxy = rpcBrowser.getProxy<Sum>(channel);\n  expect(await proxy.sum(1, 2, 3, 4, 5)).toBe(15);\n});\n\ntest('Test register instance with async', async () => {\n  class Dummy {\n    async ping(): Promise<string> {\n      return 'pong';\n    }\n  }\n\n  const channel = createRpcChannel<Dummy>('Dummy');\n\n  const rpcExtension = new RpcExtension(webview);\n  rpcExtension.init();\n  const rpcBrowser = new RpcBrowser(window, api);\n\n  rpcExtension.registerInstance(channel, new Dummy());\n\n  const proxy = rpcBrowser.getProxy<Dummy>(channel);\n  expect(await proxy.ping()).toBe('pong');\n});\n\ntest('Test register instance and implemented interface', async () => {\n  interface Foo {\n    ping(): Promise<'pong'>;\n  }\n\n  class Dummy implements Foo {\n    async ping(): Promise<'pong'> {\n      return 'pong';\n    }\n  }\n  const channel = createRpcChannel<Foo>('Foo');\n\n  const rpcExtension = new RpcExtension(webview);\n  rpcExtension.init();\n  const rpcBrowser = new RpcBrowser(window, api);\n\n  rpcExtension.registerInstance(channel, new Dummy());\n\n  const proxy = rpcBrowser.getProxy<Foo>(channel);\n  expect(await proxy.ping()).toBe('pong');\n});\n\ntest('Test raising exception', async () => {\n  const rpcExtension = new RpcExtension(webview);\n  rpcExtension.init();\n  const rpcBrowser = new RpcBrowser(window, api);\n\n  type TestError = {\n    raiseError: () => Promise<void>;\n  };\n  const channel = createRpcChannel<TestError>('TestError');\n\n  const testErrorImpl: TestError = {\n    raiseError: async (): Promise<void> => {\n      throw new Error('big error');\n    },\n  };\n  rpcExtension.registerInstance(channel, testErrorImpl);\n\n  // get proxy\n  const proxy = rpcBrowser.getProxy<TestError>(channel);\n  await expect(proxy.raiseError).rejects.toThrow('big error');\n});\n\ndescribe('subscribe', () => {\n  beforeEach(() => {\n    window.addEventListener = vi.fn();\n  });\n\n  function getMessageListener(): (event: MessageEvent) => void {\n    expect(window.addEventListener).toHaveBeenCalledOnce();\n    expect(window.addEventListener).toHaveBeenCalledWith('message', expect.any(Function));\n    return vi.mocked(window.addEventListener).mock.calls[0][1] as (event: MessageEvent) => void;\n  }\n\n  test('subscriber should be called on event received', async () => {\n    const rpcBrowser = new RpcBrowser(window, api);\n    const messageListener = getMessageListener();\n\n    interface EventTest {\n      foo: string;\n    }\n    const rpcChannel = createRpcChannel<EventTest>('example');\n\n    const listener = vi.fn();\n    rpcBrowser.subscribe<EventTest>(rpcChannel, listener);\n\n    messageListener({\n      data: {\n        id: rpcChannel.name,\n        body: 'hello',\n      },\n    } as unknown as MessageEvent);\n\n    expect(listener).toHaveBeenCalledOnce();\n  });\n\n  test('all subscribers should be called if multiple exists', async () => {\n    const rpcBrowser = new RpcBrowser(window, api);\n    const messageListener = getMessageListener();\n\n    const listeners = Array.from({ length: 10 }, _ => vi.fn());\n\n    interface EventTest {\n      foo: string;\n    }\n    const rpcChannel = createRpcChannel<EventTest>('example-all-subscribers');\n    listeners.forEach(listener => rpcBrowser.subscribe(rpcChannel, listener));\n\n    messageListener({\n      data: {\n        id: rpcChannel.name,\n        body: 'hello',\n      },\n    } as unknown as MessageEvent);\n\n    for (const listener of listeners) {\n      expect(listener).toHaveBeenCalledWith('hello');\n    }\n  });\n\n  test('subscribers which unsubscribe should not be called', async () => {\n    const rpcBrowser = new RpcBrowser(window, api);\n    const messageListener = getMessageListener();\n\n    const [listenerA, listenerB] = [vi.fn(), vi.fn()];\n\n    interface EventTest {\n      foo: string;\n    }\n    const rpcChannel = createRpcChannel<EventTest>('example-unsubscribe');\n\n    const unsubscriberA = rpcBrowser.subscribe(rpcChannel, listenerA);\n    const unsubscriberB = rpcBrowser.subscribe(rpcChannel, listenerB);\n\n    messageListener({\n      data: {\n        id: rpcChannel.name,\n        body: 'hello',\n      },\n    } as unknown as MessageEvent);\n\n    // unsubscriber the listener B\n    unsubscriberB.unsubscribe();\n\n    messageListener({\n      data: {\n        id: rpcChannel.name,\n        body: 'hello',\n      },\n    } as unknown as MessageEvent);\n\n    // unsubscriber the listener A\n    unsubscriberA.unsubscribe();\n\n    messageListener({\n      data: {\n        id: rpcChannel.name,\n        body: 'hello',\n      },\n    } as unknown as MessageEvent);\n\n    expect(listenerA).toHaveBeenCalledTimes(2);\n    expect(listenerB).toHaveBeenCalledOnce();\n  });\n});\n\ndescribe('no timeout channel', () => {\n  beforeEach(() => {\n    vi.resetAllMocks();\n    vi.useFakeTimers();\n  });\n\n  afterEach(() => {\n    vi.restoreAllMocks();\n  });\n\n  test('default function should have a timeout', async () => {\n    class Dummy {\n      async ping(): Promise<'pong'> {\n        return new Promise(vi.fn());\n      }\n    }\n\n    const channel = createRpcChannel<Dummy>('Timeout');\n    const rpcExtension = new RpcExtension(webview);\n    rpcExtension.init();\n    const rpcBrowser = new RpcBrowser(window, api);\n\n    rpcExtension.registerInstance(channel, new Dummy());\n\n    const proxy = rpcBrowser.getProxy<Dummy>(channel);\n\n    let error: Error | undefined;\n    proxy.ping().catch((err: unknown) => {\n      error = err as Error;\n    });\n\n    await vi.advanceTimersByTimeAsync(5_000);\n    expect(error?.message).toBe('Timeout');\n  });\n\n  test('noTimeoutChannels should not have a timeout', async () => {\n    class DummyTimeout {\n      async ping(): Promise<'pong'> {\n        return new Promise(resolve => {\n          setTimeout(resolve.bind(undefined, 'pong'), 8_000);\n        });\n      }\n    }\n\n    const channel = createRpcChannel<DummyTimeout>('DummyTimeout');\n\n    const rpcExtension = new RpcExtension(webview);\n    rpcExtension.init();\n    const rpcBrowser = new RpcBrowser(window, api);\n\n    rpcExtension.registerInstance(channel, new DummyTimeout());\n\n    // flag ping method as being without a timeout\n    const proxy = rpcBrowser.getProxy<DummyTimeout>(channel, { noTimeoutMethods: ['ping'] });\n\n    let error: Error | undefined;\n    let result: 'pong' | undefined;\n    proxy\n      .ping()\n      .then(mResult => {\n        result = mResult;\n      })\n      .catch((err: unknown) => {\n        error = err as Error;\n      });\n\n    await vi.advanceTimersByTimeAsync(5_000);\n    expect(error).toBeUndefined();\n    await vi.advanceTimersByTimeAsync(5_000);\n    expect(error).toBeUndefined();\n    expect(result).toBe('pong');\n  });\n});\n\ntest('dispose', () => {\n  const dummyWebview = { onDidReceiveMessage: vi.fn() } as unknown as Webview;\n  const fakeDispose = vi.fn();\n  vi.mocked(dummyWebview.onDidReceiveMessage).mockReturnValue({ dispose: fakeDispose });\n  const rpcExtension = new RpcExtension(dummyWebview);\n  rpcExtension.init();\n\n  // call dispose\n  rpcExtension.dispose();\n\n  expect(fakeDispose).toHaveBeenCalledOnce();\n});\n\ndescribe('rpcExtension onDidReceiveMessage', () => {\n  const dummyWebview = { onDidReceiveMessage: vi.fn() } as unknown as Webview;\n  const fakeDispose = vi.fn();\n  let rpcExtension: RpcExtension;\n\n  let onDidReceiveMessageCallback: (message: unknown) => void;\n\n  beforeEach(() => {\n    vi.mocked(dummyWebview.onDidReceiveMessage).mockReturnValue({ dispose: fakeDispose });\n    rpcExtension = new RpcExtension(dummyWebview);\n    rpcExtension.init();\n    // get the callback from the onDidReceiveMessage call\n    onDidReceiveMessageCallback = vi.mocked(dummyWebview.onDidReceiveMessage).mock.calls[0][0];\n    // expect it is defined\n    expect(onDidReceiveMessageCallback).toBeDefined();\n  });\n\n  test('isMessageRequest', () => {\n    // send a message that is not a request\n    onDidReceiveMessageCallback('');\n\n    // check that console.error was called\n    expect(console.error).toHaveBeenCalledWith('Received incompatible message.', '');\n  });\n\n  test('hasInstance', async () => {\n    // send a message that is a 'valid' request\n    await expect(\n      onDidReceiveMessageCallback({ id: 'test', channel: 'test', method: 'test', args: [] }),\n    ).rejects.toThrow('channel does not exist.');\n\n    // check that console.error was called\n    expect(console.error).toHaveBeenCalledWith('Trying to call on an unknown channel test. Available: ');\n  });\n});\n\ntest('Test register duplicated channel', async () => {\n  type Double = {\n    double: (value: number) => Promise<number>;\n  };\n\n  const channel1 = createRpcChannel<Double>('existing');\n  expect(channel1.name).toBe('existing');\n  expect(() => createRpcChannel<Double>('existing')).toThrowError('Duplicate channel. Channel existing already exists');\n});\n"
  },
  {
    "path": "packages/shared/src/messages/MessageProxy.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n/* eslint-disable @typescript-eslint/no-explicit-any */\n\nimport type { Webview, Disposable } from '@podman-desktop/api';\n\nexport interface IMessage {\n  id: number;\n  channel: string;\n  method: string;\n}\n\nexport interface IMessageRequest extends IMessage {\n  args: unknown[];\n}\n\nexport interface IMessageResponse extends IMessageRequest {\n  status: 'error' | 'success';\n  error?: string;\n  body: unknown;\n}\n\nexport interface ISubscribedMessage {\n  id: string;\n  body: any;\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype UnaryRPC = (...args: any[]) => Promise<unknown>;\n\nexport function isMessageRequest(content: unknown): content is IMessageRequest {\n  return !!content && typeof content === 'object' && 'id' in content && 'channel' in content;\n}\n\nexport function isMessageResponse(content: unknown): content is IMessageResponse {\n  return isMessageRequest(content) && 'status' in content;\n}\n\n// instance has methods that are callable\n// eslint-disable-next-line @typescript-eslint/no-unused-vars\ntype ObjectInstance<T> = {\n  [key: string]: (...args: unknown[]) => Promise<unknown>;\n};\n\nexport class RpcExtension implements Disposable {\n  #webviewDisposable: Disposable | undefined;\n\n  #instances: Map<string, ObjectInstance<unknown>> = new Map();\n\n  constructor(private webview: Webview) {}\n\n  dispose(): void {\n    this.#webviewDisposable?.dispose();\n  }\n\n  init(): void {\n    this.#webviewDisposable = this.webview.onDidReceiveMessage(async (message: unknown) => {\n      if (!isMessageRequest(message)) {\n        console.error('Received incompatible message.', message);\n        return;\n      }\n\n      if (!this.#instances.has(message.channel)) {\n        console.error(\n          `Trying to call on an unknown channel ${message.channel}. Available: ${Array.from(this.#instances.keys())}`,\n        );\n        throw new Error('channel does not exist.');\n      }\n\n      try {\n        const result = await this.#instances.get(message.channel)?.[message.method]?.(...message.args);\n        await this.webview.postMessage({\n          id: message.id,\n          channel: message.channel,\n          body: result,\n          status: 'success',\n        } as IMessageResponse);\n      } catch (err: unknown) {\n        let errorMessage: string;\n        // Depending on the object throw we try to extract the error message\n        if (err instanceof Error) {\n          errorMessage = err.message;\n        } else if (typeof err === 'string') {\n          errorMessage = err;\n        } else {\n          errorMessage = String(err);\n        }\n\n        await this.webview.postMessage({\n          id: message.id,\n          channel: message.channel,\n          body: undefined,\n          status: 'error',\n          error: errorMessage,\n        } as IMessageResponse);\n      }\n    });\n  }\n\n  fire<T>(channel: RpcChannel<T>, body: T): Promise<boolean> {\n    return this.webview.postMessage({\n      id: channel.name,\n      body,\n    });\n  }\n\n  registerInstance<T extends Record<keyof T, UnaryRPC>, R extends T>(channel: RpcChannel<T>, instance: R): void {\n    // convert the instance to an object with method names as keys\n    this.#instances.set(channel.name, instance as ObjectInstance<unknown>);\n  }\n}\n\nexport interface Subscriber {\n  unsubscribe(): void;\n}\n\nexport type Listener<T> = (value: T) => void;\n\nexport class RpcBrowser {\n  counter: number = 0;\n  promises: Map<number, { resolve: (value: unknown) => unknown; reject: (value: unknown) => void }> = new Map();\n  subscribers: Map<string, Set<Listener<unknown>>> = new Map();\n\n  getUniqueId(): number {\n    return ++this.counter;\n  }\n\n  constructor(\n    private window: Window,\n    private api: PodmanDesktopApi,\n  ) {\n    this.init();\n  }\n\n  init(): void {\n    // eslint-disable-next-line sonarjs/post-message\n    this.window.addEventListener('message', (event: MessageEvent) => {\n      const message = event.data;\n      if (isMessageResponse(message)) {\n        if (!this.promises.has(message.id)) {\n          console.error('Unknown message id.');\n          return;\n        }\n\n        const { resolve, reject } = this.promises.get(message.id) ?? {};\n\n        if (message.status === 'error') {\n          reject?.(message.error);\n        } else {\n          resolve?.(message.body);\n        }\n        this.promises.delete(message.id);\n      } else if (this.isSubscribedMessage(message)) {\n        this.subscribers.get(message.id)?.forEach(handler => handler(message.body));\n      } else {\n        console.error('Received incompatible message.', message);\n        return;\n      }\n    });\n  }\n\n  getProxy<T extends Record<keyof T, UnaryRPC>>(\n    channel: RpcChannel<T>,\n    options?: { noTimeoutMethods: Array<keyof T> },\n  ): T {\n    // transform noTimeoutMethods keyof into an array of strings\n    const noTimeoutMethodsValues: string[] = options?.noTimeoutMethods\n      ? (Object.values(options.noTimeoutMethods) as string[])\n      : [];\n\n    const proxyHandler: ProxyHandler<object> = {\n      get: (target, prop, receiver) => {\n        if (typeof prop === 'string') {\n          return (...args: unknown[]) => {\n            return this.invoke(channel.name, noTimeoutMethodsValues, prop, ...args);\n          };\n        }\n        return Reflect.get(target, prop, receiver);\n      },\n    };\n\n    // eslint-disable-next-line no-null/no-null\n    return new Proxy(Object.create(null), proxyHandler);\n  }\n\n  protected async invoke(\n    channel: string,\n    noTimeoutMethodsValues: string[],\n    method: string,\n    ...args: unknown[]\n  ): Promise<unknown> {\n    // Generate a unique id for the request\n    const requestId = this.getUniqueId();\n\n    const promise = new Promise((resolve, reject) => {\n      this.promises.set(requestId, { resolve, reject });\n    });\n\n    // Post the message\n    this.api.postMessage({\n      id: requestId,\n      channel,\n      method,\n      args,\n    } as IMessageRequest);\n\n    // Add some timeout\n    if (Array.isArray(noTimeoutMethodsValues) && !noTimeoutMethodsValues.includes(method)) {\n      setTimeout(() => {\n        const { reject } = this.promises.get(requestId) ?? {};\n        if (!reject) return;\n        reject(new Error('Timeout'));\n        this.promises.delete(requestId);\n      }, 5000);\n    }\n\n    // Create a Promise\n    return promise;\n  }\n\n  subscribe<T>(rpcChannel: RpcChannel<T>, f: Listener<T>): Subscriber {\n    this.subscribers.set(\n      rpcChannel.name,\n      (this.subscribers.get(rpcChannel.name) ?? new Set()).add(f as Listener<unknown>),\n    );\n\n    return {\n      unsubscribe: (): void => {\n        this.subscribers.get(rpcChannel.name)?.delete(f as Listener<unknown>);\n      },\n    };\n  }\n\n  isSubscribedMessage(content: any): content is ISubscribedMessage {\n    // After migrating Svelte 4 → 5, snippets don’t always render at startup\n    // so `content.id` may not be in `this.subscribers` yet\n    return !!content && 'id' in content && 'body' in content;\n  }\n}\n\n// identifier for a given interface\nexport class RpcChannel<T> {\n  // variable used to use the marker interface T\n  protected _marker: T | undefined;\n\n  constructor(private readonly channel: string) {}\n\n  public get name(): string {\n    return this.channel;\n  }\n}\n\n// keep the list of all RPC Channels being created\n// allow to check if a channel is already created\nconst rpcChannelList = new Set<string>();\n\n// defines a channel with the given name for the interface T\nexport function createRpcChannel<T>(channel: string): RpcChannel<T> {\n  if (rpcChannelList.has(channel)) {\n    throw new Error(`Duplicate channel. Channel ${channel} already exists`);\n  }\n  rpcChannelList.add(channel);\n  return new RpcChannel<T>(channel);\n}\n\nexport function clearRpcChannelList(): void {\n  rpcChannelList.clear();\n}\n"
  },
  {
    "path": "packages/shared/src/models/FilterRecipesResult.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Recipe } from './IRecipe';\n\nexport interface FilterRecipesResult {\n  filters: RecipeFilters;\n  choices: RecipeChoices;\n  result: Recipe[];\n}\n\nexport type RecipeFilters = {\n  [key in CatalogFilterKey]?: string[];\n};\n\nexport type RecipeChoices = {\n  [key in CatalogFilterKey]?: Choice[];\n};\n\nexport type Choice = {\n  name: string;\n  count: number;\n};\n\nexport type CatalogFilterKey = 'languages' | 'tools' | 'frameworks';\n"
  },
  {
    "path": "packages/shared/src/models/IApplicationCatalog.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Category } from './ICategory';\nimport type { ModelInfo } from './IModelInfo';\nimport type { Recipe } from './IRecipe';\n\nexport interface ApplicationCatalog {\n  version?: string;\n  recipes: Recipe[];\n  models: ModelInfo[];\n  categories: Category[];\n}\n"
  },
  {
    "path": "packages/shared/src/models/IApplicationState.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { PodInfo } from '@podman-desktop/api';\nimport type { InferenceType } from './IInference';\n\nexport type PodHealth = 'none' | 'starting' | 'healthy' | 'unhealthy';\n\nexport interface ApplicationState {\n  recipeId: string;\n  modelId: string;\n  pod: PodInfo;\n  appPorts: number[];\n  modelPorts: number[];\n  health: PodHealth;\n  backend: InferenceType;\n  name?: string;\n}\n"
  },
  {
    "path": "packages/shared/src/models/ICategory.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport interface Category {\n  id: string;\n  name: string;\n  description?: string;\n}\n"
  },
  {
    "path": "packages/shared/src/models/IContainerConnectionInfo.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { VMType } from './IPodman';\nimport type { ModelCheckerContext, ModelInfo } from './IModelInfo';\n\nexport interface ContainerProviderConnectionInfo {\n  providerId: string;\n  name: string;\n  type: 'podman'; // we only support podman\n  status: 'started' | 'stopped' | 'starting' | 'stopping' | 'unknown';\n  vmType: VMType;\n}\n\nexport interface CheckContainerConnectionResourcesOptions {\n  model: ModelInfo & { memory: number };\n  context: ModelCheckerContext;\n  connection?: ContainerProviderConnectionInfo;\n}\n\nexport type ContainerConnectionInfo =\n  | RunningContainerConnection\n  | LowResourcesContainerConnection\n  | NoContainerConnection\n  | NativeContainerConnection;\n\nexport type ContainerConnectionInfoStatus = 'running' | 'no-machine' | 'low-resources';\n\nexport interface RunningContainerConnection {\n  name: string;\n  status: 'running';\n  canRedirect: boolean;\n}\n\nexport interface LowResourcesContainerConnection {\n  name: string;\n  cpus: number;\n  memoryIdle: number;\n  cpusExpected: number;\n  memoryExpected: number;\n  status: 'low-resources';\n  canEdit: boolean;\n  canRedirect: boolean;\n}\n\nexport interface NoContainerConnection {\n  status: 'no-machine';\n  canRedirect: boolean;\n}\n\nexport interface NativeContainerConnection {\n  status: 'native';\n  canRedirect: boolean;\n}\n"
  },
  {
    "path": "packages/shared/src/models/IExtensionConfiguration.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport interface ExtensionConfiguration {\n  experimentalGPU: boolean;\n  modelsPath: string;\n  apiPort: number;\n  inferenceRuntime: string;\n  experimentalTuning: boolean;\n  modelUploadDisabled: boolean;\n  showGPUPromotion: boolean;\n  appearance: string;\n}\n"
  },
  {
    "path": "packages/shared/src/models/IGPUInfo.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport interface IGPUInfo {\n  model: string;\n  vendor: GPUVendor;\n  vram: number | undefined;\n}\n\nexport enum GPUVendor {\n  NVIDIA = 'NVIDIA',\n  APPLE = 'Apple',\n  INTEL = 'Intel Corporation',\n  UNKNOWN = 'unknown',\n}\n"
  },
  {
    "path": "packages/shared/src/models/IInference.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect, test } from 'vitest';\nimport { InferenceType, toInferenceType } from './IInference';\n\ntest('toInferenceType', () => {\n  expect(toInferenceType('llama-cpp')).toEqual(InferenceType.LLAMA_CPP);\n  expect(toInferenceType(InferenceType.LLAMA_CPP)).toEqual(InferenceType.LLAMA_CPP);\n  expect(toInferenceType('not-known')).toEqual(InferenceType.NONE);\n  expect(toInferenceType('')).toEqual(InferenceType.NONE);\n  expect(toInferenceType(undefined)).toEqual(InferenceType.NONE);\n});\n"
  },
  {
    "path": "packages/shared/src/models/IInference.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { ModelInfo } from './IModelInfo';\n\nexport enum InferenceType {\n  LLAMA_CPP = 'llama-cpp',\n  WHISPER_CPP = 'whisper-cpp',\n  OPENVINO = 'openvino',\n  NONE = 'none',\n}\n\nconst InferenceTypeLabel = {\n  'llama-cpp': 'llamacpp',\n  'whisper-cpp': 'whispercpp',\n  openvino: 'openvino',\n  none: 'None',\n};\n\n// toInferenceType casts a string to InferenceType\n// returns NONE value if input value is undefined or unknown\nexport function toInferenceType(type: string | undefined): InferenceType {\n  if (!type) {\n    return InferenceType.NONE;\n  }\n  if (Object.values(InferenceType).includes(type as InferenceType)) {\n    return type as InferenceType;\n  }\n  return InferenceType.NONE;\n}\n\nexport function inferenceTypeLabel(type: InferenceType): string {\n  if (type in InferenceTypeLabel) {\n    return InferenceTypeLabel[type];\n  }\n  return InferenceTypeLabel['none'];\n}\n\nexport type InferenceServerStatus = 'stopped' | 'running' | 'deleting' | 'stopping' | 'error' | 'starting';\n\nexport interface InferenceServer {\n  /**\n   * Supported models\n   */\n  models: ModelInfo[];\n  /**\n   * Container info\n   */\n  container: {\n    engineId: string;\n    containerId: string;\n  };\n  connection: {\n    port: number;\n  };\n  /**\n   * Inference server status\n   */\n  status: InferenceServerStatus;\n  /**\n   * Health check\n   */\n  health?: {\n    Status: string;\n    FailingStreak: number;\n    Log: Array<{\n      Start: string;\n      End: string;\n      ExitCode: number;\n      Output: string;\n    }>;\n  };\n  /**\n   * Exit code\n   */\n  exit?: number;\n  /**\n   * The type of inference server (aka backend)\n   */\n  type: InferenceType;\n  /**\n   * Inference labels\n   */\n  labels: Record<string, string>;\n  /**\n   * Optional name\n   */\n  name?: string;\n}\n"
  },
  {
    "path": "packages/shared/src/models/ILocalModelInfo.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { InferenceType } from './IInference';\n\nexport interface LocalModelInfo {\n  file: string;\n  path: string;\n  size?: number;\n  creation?: Date;\n}\n\nexport interface LocalModelImportInfo {\n  /**\n   * Absolute path to the models file\n   */\n  path: string;\n  /**\n   * Name that will be used to display the model\n   */\n  name: string;\n  /**\n   * The backend to use to run the model\n   */\n  backend?: InferenceType;\n}\n"
  },
  {
    "path": "packages/shared/src/models/ILocalRepository.ts",
    "content": "export interface LocalRepository {\n  // recipeFolder\n  path: string;\n  // recipeFolder + basedir\n  sourcePath: string;\n  labels: { [id: string]: string };\n}\n"
  },
  {
    "path": "packages/shared/src/models/IModelInfo.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { LocalModelInfo } from './ILocalModelInfo';\n\nexport interface ModelInfo {\n  id: string;\n  name: string;\n  description: string;\n  registry?: string;\n  license?: string;\n  url?: string;\n  file?: LocalModelInfo;\n  state?: 'deleting';\n  memory?: number;\n  properties?: {\n    [key: string]: string;\n  };\n  sha256?: string;\n  /**\n   * The backend field aims to target which inference\n   * server the model requires\n   */\n  backend?: string;\n}\n\nexport type ModelCheckerContext = 'inference' | 'recipe';\n"
  },
  {
    "path": "packages/shared/src/models/IModelOptions.ts",
    "content": "interface StreamOptions {\n  include_usage?: boolean;\n}\n\nexport interface ModelOptions {\n  temperature?: number;\n  max_tokens?: number;\n  top_p?: number;\n  stream_options?: StreamOptions;\n}\n"
  },
  {
    "path": "packages/shared/src/models/IModelResponse.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport interface ModelResponse {\n  created: number;\n  object: string;\n  id: string;\n  model: string;\n  choices: ModelResponseChoice[];\n  usage?: ModelResponseUsage;\n}\n\nexport interface ModelResponseChoice {\n  index: number;\n  finish_reason: string;\n  text: string;\n}\n\nexport interface ModelResponseUsage {\n  prompt_tokens: number;\n  completion_tokens: number;\n  total_tokens: number;\n}\n"
  },
  {
    "path": "packages/shared/src/models/IPlaygroundMessage.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { ModelOptions } from './IModelOptions';\n\nexport interface Message {\n  id: string;\n  timestamp: number;\n}\n\nexport interface ErrorMessage extends Message {\n  error: string;\n}\n\nexport interface ModelUsage {\n  completion_tokens: number;\n  prompt_tokens: number;\n}\n\nexport interface ChatMessage extends Message {\n  role: 'system' | 'user' | 'assistant';\n  content?: string | object;\n}\n\nexport interface AssistantChat extends ChatMessage {\n  role: 'assistant';\n  completed?: number;\n  content?: string | ToolCall;\n}\n\nexport interface SystemPrompt extends ChatMessage {\n  role: 'system';\n  content: string;\n}\n\nexport interface PendingChat extends AssistantChat {\n  completed: undefined;\n  choices: Choice[];\n}\n\nexport interface UserChat extends ChatMessage {\n  role: 'user';\n  options?: ModelOptions;\n}\n\nexport interface Conversation {\n  id: string;\n  messages: Message[];\n  modelId: string;\n  name: string;\n  usage?: ModelUsage;\n}\n\nexport interface Choice {\n  content: string;\n}\n\nexport interface ToolCall {\n  type: 'tool-call';\n  toolCallId: string;\n  toolName: string;\n  args: object;\n  result?: string | object;\n}\n\nexport function isErrorMessage(msg: Message): msg is ErrorMessage {\n  return 'error' in msg;\n}\n\nexport function isChatMessage(msg: Message): msg is ChatMessage {\n  return 'role' in msg;\n}\n\nexport function isAssistantChat(msg: Message): msg is AssistantChat {\n  return isChatMessage(msg) && msg.role === 'assistant';\n}\n\nexport function isAssistantToolCall(msg: Message): msg is AssistantChat {\n  return isAssistantChat(msg) && (msg.content as ToolCall)?.type === 'tool-call';\n}\n\nexport function isUserChat(msg: Message): msg is UserChat {\n  return isChatMessage(msg) && msg.role === 'user';\n}\n\nexport function isPendingChat(msg: Message): msg is PendingChat {\n  return isAssistantChat(msg) && !msg.completed;\n}\n\nexport function isSystemPrompt(msg: Message): msg is SystemPrompt {\n  return isChatMessage(msg) && msg.role === 'system';\n}\n"
  },
  {
    "path": "packages/shared/src/models/IPlaygroundV2.ts",
    "content": "export interface PlaygroundV2 {\n  id: string;\n  name: string;\n  modelId: string;\n}\n"
  },
  {
    "path": "packages/shared/src/models/IPodman.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport enum VMType {\n  WSL = 'wsl',\n  LIBKRUN = 'libkrun',\n  LIBKRUN_LABEL = 'GPU enabled (LibKrun)',\n  QEMU = 'qemu',\n  APPLEHV = 'applehv',\n  APPLEHV_LABEL = 'default (Apple HyperVisor)',\n  HYPERV = 'hyperv',\n  UNKNOWN = 'unknown',\n}\n"
  },
  {
    "path": "packages/shared/src/models/IRecipe.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { ContainerProviderConnectionInfo } from './IContainerConnectionInfo';\n\nimport type { InferenceServer } from './IInference';\n\nexport type RecipePullOptions = RecipePullOptionsDefault | RecipePullOptionsWithModelInference;\n\nexport interface RecipePullOptionsDefault {\n  connection?: ContainerProviderConnectionInfo;\n  recipeId: string;\n  dependencies?: RecipeDependencies;\n}\n\nexport type RecipePullOptionsWithModelInference = RecipePullOptionsDefault & {\n  modelId: string;\n};\n\nexport interface RecipeDependencies {\n  llamaStack?: boolean;\n}\n\nexport function isRecipePullOptionsWithModelInference(\n  options: RecipePullOptions,\n): options is RecipePullOptionsWithModelInference {\n  return 'modelId' in options;\n}\n\nexport interface RecipeComponents {\n  images: RecipeImage[];\n  inferenceServer?: InferenceServer;\n}\n\nexport interface RecipeImage {\n  id: string;\n  engineId: string;\n  name?: string;\n  // recipe related\n  recipeId: string;\n  modelService: boolean;\n  ports: string[];\n  appName: string;\n}\n\nexport interface Recipe {\n  id: string;\n  name: string;\n  categories: string[];\n  description: string;\n  icon?: string;\n  repository: string;\n  ref?: string;\n  readme: string;\n  basedir?: string;\n  recommended?: string[];\n  /**\n   * The backend field aims to target which inference\n   * server the recipe requires\n   */\n  backend?: string;\n  languages?: string[];\n  frameworks?: string[];\n}\n"
  },
  {
    "path": "packages/shared/src/models/IRecipeModelIndex.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport interface RecipeModelIndex {\n  recipeId: string;\n  modelId: string;\n}\n"
  },
  {
    "path": "packages/shared/src/models/ITask.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport type TaskState = 'loading' | 'error' | 'success';\n\nexport interface Task {\n  readonly id: string;\n  error?: string;\n  name: string;\n  state: TaskState;\n  progress?: number;\n  labels?: { [id: string]: string };\n  cancellationToken?: number;\n}\n"
  },
  {
    "path": "packages/shared/src/models/InferenceServerConfig.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { ModelInfo } from './IModelInfo';\nimport type { ContainerProviderConnectionInfo } from './IContainerConnectionInfo';\n\nexport type CreationInferenceServerOptions = Partial<InferenceServerConfig> & { modelsInfo: ModelInfo[] };\n\nexport interface InferenceServerConfig {\n  /**\n   * Port to expose\n   */\n  port: number;\n  /**\n   * The connection info to use\n   */\n  connection?: ContainerProviderConnectionInfo;\n  /**\n   * The name of the inference provider to use\n   */\n  inferenceProvider?: string;\n  /**\n   * Image to use\n   */\n  image?: string;\n  /**\n   * Labels to use for the container\n   */\n  labels: { [id: string]: string };\n\n  /**\n   * Model info for the models\n   */\n  modelsInfo: ModelInfo[];\n  /**\n   * Number of layers to offload to the GPU\n   * @default undefined the GPU will not be used\n   * 999 to offload all the layers\n   */\n  gpuLayers?: number;\n}\n"
  },
  {
    "path": "packages/shared/src/models/McpSettings.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { ToolSet } from 'ai';\n\nexport interface McpSettings {\n  servers: Record<string, McpServer>;\n}\n\nexport enum McpServerType {\n  STDIO = 'stdio',\n  SSE = 'sse',\n}\n\nexport interface McpServer {\n  name: string;\n  enabled: boolean;\n  type: McpServerType;\n  command: string;\n  args: string[];\n  url: string;\n  headers: Record<string, string>;\n}\n\nexport interface McpClient {\n  init?(): Promise<this>;\n  close(): Promise<void>;\n  tools(): Promise<ToolSet>;\n}\n"
  },
  {
    "path": "packages/shared/src/models/RequestOptions.ts",
    "content": "export interface FormParamDefinition {\n  key: string;\n  value: string;\n  type: string;\n}\n\nexport interface RequestOptions {\n  url: string;\n  method?: string;\n  header?: {\n    key?: string;\n    value?: string;\n    system?: boolean;\n  }[];\n  body?: {\n    mode: 'raw' | 'formdata';\n    raw?: string;\n    formdata?: FormParamDefinition[];\n  };\n}\n"
  },
  {
    "path": "packages/shared/src/models/instructlab/IInstructlabContainerConfiguration.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { ContainerProviderConnectionInfo } from '../IContainerConnectionInfo';\n\nexport interface InstructlabContainerConfiguration {\n  /**\n   * The connection info to use\n   */\n  connection?: ContainerProviderConnectionInfo;\n}\n"
  },
  {
    "path": "packages/shared/src/models/instructlab/IInstructlabContainerInfo.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nexport const INSTRUCTLAB_CONTAINER_TRACKINGID = 'instructlab.trackingid';\n\nexport interface InstructlabContainerInfo {\n  /**\n   * The container engine it is running on\n   */\n  engineId: string;\n  /**\n   * The container id\n   */\n  containerId: string;\n  /**\n   * the content of the terminal\n   */\n  content?: string;\n}\n"
  },
  {
    "path": "packages/shared/src/models/instructlab/IInstructlabSession.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nexport type InstructlabSessionStatus = 'fine-tuned' | 'generating-instructions';\n\nexport interface InstructlabSession {\n  name: string;\n\n  modelId: string;\n\n  targetModel: string;\n\n  repository: string;\n\n  createdTime: number;\n\n  status: InstructlabSessionStatus;\n}\n"
  },
  {
    "path": "packages/shared/src/models/llama-stack/LlamaStackContainerConfiguration.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { ContainerProviderConnectionInfo } from '../IContainerConnectionInfo';\n\nexport interface LlamaStackContainerConfiguration {\n  /**\n   * The connection info to use\n   */\n  connection?: ContainerProviderConnectionInfo;\n}\n"
  },
  {
    "path": "packages/shared/src/models/llama-stack/LlamaStackContainerInfo.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nexport const LLAMA_STACK_CONTAINER_TRACKINGID = 'llama-stack.trackingid';\n\nexport interface LlamaStackContainerInfo {\n  containerId: string;\n  port: number;\n  state: string;\n}\nexport interface LlamaStackContainers {\n  server: LlamaStackContainerInfo | undefined;\n  playground: LlamaStackContainerInfo | undefined;\n}\n"
  },
  {
    "path": "packages/shared/src/uri/Uri.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Uri as APIUri } from '@podman-desktop/api';\nimport { afterEach, expect, test, vi } from 'vitest';\n\nimport { Uri } from './Uri';\n\nafterEach(() => {\n  vi.resetAllMocks();\n  vi.clearAllMocks();\n});\n\ntest('Expect revive to return revived Uri object', () => {\n  const uriSerialized = {\n    _scheme: 'scheme',\n    _authority: 'authority',\n    _path: 'path',\n    _query: 'query',\n    _fragment: 'fragment',\n  } as unknown as APIUri;\n\n  const revived = Uri.revive(uriSerialized);\n  expect(revived.authority).equals('authority');\n  expect(revived.scheme).equals('scheme');\n  expect(revived.path).equals('path');\n  expect(revived.fsPath).equals('path');\n  expect(revived.query).equals('query');\n  expect(revived.fragment).equals('fragment');\n});\n"
  },
  {
    "path": "packages/shared/src/uri/Uri.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { Uri as APIUri } from '@podman-desktop/api';\n\nexport class Uri {\n  private constructor(\n    private _scheme: string,\n    private _authority: string,\n    private _path: string,\n    private _query: string,\n    private _fragment: string,\n  ) {}\n\n  get fsPath(): string {\n    return this._path;\n  }\n  get scheme(): string {\n    return this._scheme;\n  }\n\n  get authority(): string {\n    return this._authority;\n  }\n\n  get path(): string {\n    return this._path;\n  }\n\n  get query(): string {\n    return this._query;\n  }\n\n  get fragment(): string {\n    return this._fragment;\n  }\n\n  with(_change?: { scheme?: string; authority?: string; path?: string; query?: string; fragment?: string }): Uri {\n    throw new Error('unsupported');\n  }\n\n  toString(): string {\n    throw new Error('unsupported');\n  }\n\n  static revive(serialized: APIUri): Uri {\n    if (serialized instanceof Uri) {\n      return serialized;\n    }\n    const serializedProps: Map<string, string> = Object.entries(serialized)\n      .map(([key, value]) => [key.startsWith('_') ? key.substring(1) : key, value])\n      .reduce((map, [key, value]) => {\n        map.set(key, value);\n        return map;\n      }, new Map());\n    return new Uri(\n      serializedProps.get('scheme') ?? '',\n      serializedProps.get('authority') ?? '',\n      serializedProps.get('path') ?? '',\n      serializedProps.get('query') ?? '',\n      serializedProps.get('fragment') ?? '',\n    );\n  }\n}\n"
  },
  {
    "path": "packages/shared/tsconfig.json",
    "content": "{\n  \"extends\": \"@tsconfig/svelte/tsconfig.json\",\n  \"compilerOptions\": {\n    \"target\": \"esnext\",\n    \"module\": \"esnext\",\n    \"strict\": true,\n    \"resolveJsonModule\": true,\n    \"preserveValueImports\": false,\n    \"baseUrl\": \".\",\n    /**\n     * Typecheck JS in `.svelte` and `.js` files by default.\n     * Disable checkJs if you'd like to use dynamic types in JS.\n     * Note that setting allowJs false does not prevent the use\n     * of JS in `.svelte` files.\n     */\n    \"allowJs\": true,\n    \"checkJs\": true,\n    \"paths\": {\n      \"@shared/*\": [\"../shared/src/*\"]\n    }\n  },\n  \"include\": [\"src/**/*.d.ts\", \"src/**/*.ts\", \"src/**/*.js\", \"src/**/*.svelte\", \"types/*.d.ts\", \"../../types/**/*.d.ts\"]\n}\n"
  },
  {
    "path": "packages/shared/vite.config.js",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport {join} from 'path';\nimport {builtinModules} from 'module';\n\nconst PACKAGE_ROOT = __dirname;\n\n/**\n * @type {import('vite').UserConfig}\n * @see https://vitejs.dev/config/\n */\nconst config = {\n  mode: process.env.MODE,\n  root: PACKAGE_ROOT,\n  envDir: process.cwd(),\n  resolve: {\n    alias: {\n      '/@/': join(PACKAGE_ROOT, 'src') + '/',\n    },\n  },\n  build: {\n    sourcemap: 'inline',\n    target: 'esnext',\n    outDir: 'dist',\n    assetsDir: '.',\n    minify: process.env.MODE === 'production' ? 'esbuild' : false,\n    lib: {\n      entry: 'src/extension.ts',\n      formats: ['cjs'],\n    },\n    rollupOptions: {\n      external: [\n        '@podman-desktop/api',\n        ...builtinModules.flatMap(p => [p, `node:${p}`]),\n      ],\n      output: {\n        entryFileNames: '[name].js',\n      },\n    },\n    emptyOutDir: true,\n    reportCompressedSize: false,\n  },\n};\n\nexport default config;\n"
  },
  {
    "path": "packages/shared/vitest.config.js",
    "content": "/**********************************************************************\n * Copyright (C) 2023 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport path from 'node:path';\nimport { join } from 'path';\n\nconst PACKAGE_ROOT = __dirname;\n\nconst config = {\n  test: {\n    include: ['**/*.{test,spec}.?(c|m)[jt]s?(x)', '../shared/**/*.{test,spec}.?(c|m)[jt]s?(x)']\n  },\n  resolve: {\n    alias: {\n      '@podman-desktop/api': path.resolve(__dirname, '__mocks__/@podman-desktop/api.js'),\n      '/@/': join(PACKAGE_ROOT, 'src') + '/',\n    },\n  },\n};\n\nexport default config;\n"
  },
  {
    "path": "pnpm-workspace.yaml",
    "content": "packages:\n  - 'packages/*'\n  - 'tools'\n  - 'tests/*'\n"
  },
  {
    "path": "tests/playwright/package.json",
    "content": "{\n  \"name\": \"ai-lab-tests-playwright\",\n  \"version\": \"1.10.0-next\",\n  \"description\": \"Podman Desktop AI Lab extension Playwright E2E tests\",\n  \"scripts\": {\n    \"test:e2e\": \"xvfb-maybe --auto-servernum --server-args='-screen 0 1280x960x24' -- npx playwright test src/\",\n    \"test:e2e:smoke\": \"xvfb-maybe --auto-servernum --server-args='-screen 0 1280x960x24' -- npx playwright test src/ -g @smoke\",\n    \"test:e2e:instructlab\": \"xvfb-maybe --auto-servernum --server-args='-screen 0 1280x960x24' -- npx playwright test src/ -g @instructlab\"\n  },\n  \"author\": \"Red Hat\",\n  \"license\": \"Apache-2.0\",\n  \"devDependencies\": {\n    \"@playwright/test\": \"^1.59.1\",\n    \"@podman-desktop/tests-playwright\": \"1.27.0-next.202604201816-610b704\",\n    \"@types/node\": \"^24\",\n    \"typescript\": \"^5.9.3\",\n    \"xvfb-maybe\": \"^0.2.1\"\n  },\n  \"type\": \"module\"\n}\n"
  },
  {
    "path": "tests/playwright/playwright.config.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { defineConfig, devices } from '@playwright/test';\n\nexport default defineConfig({\n  outputDir: './output/',\n  workers: 1,\n  timeout: 60_000,\n\n  reporter: [\n    ['list'],\n    ['junit', { outputFile: './output/junit-results.xml' }],\n    ['json', { outputFile: './output/json-results.json' }],\n    ['html', { open: 'never', outputFolder: './output/html-results/' }],\n  ],\n\n  projects: [\n    {\n      name: 'chromium',\n      use: {\n        ...devices['Desktop Chrome'],\n      },\n    },\n  ],\n});\n"
  },
  {
    "path": "tests/playwright/src/ai-lab-extension.spec.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\n/**\n * The 'test-audio-to-text.wav' file used in this test was sourced from the\n * whisper.cpp project (https://github.com/ggml-org/whisper.cpp).\n * It is licensed under the MIT License (see https://github.com/ggml-org/whisper.cpp/blob/master/LICENSE for details).\n * This specific WAV file is used solely for Playwright testing purposes within this repository.\n */\n\nimport type { APIResponse, Locator } from '@playwright/test';\nimport type { NavigationBar, ExtensionsPage } from '@podman-desktop/tests-playwright';\nimport {\n  ContainerDetailsPage,\n  ContainerState,\n  expect as playExpect,\n  test,\n  RunnerOptions,\n  waitForPodmanMachineStartup,\n  isWindows,\n  isLinux,\n  isMac,\n  isCI,\n  resetPodmanMachinesFromCLI,\n  handleConfirmationDialog,\n  podmanAILabExtension,\n} from '@podman-desktop/tests-playwright';\nimport type { AILabDashboardPage } from './model/ai-lab-dashboard-page';\nimport type { AILabRecipesCatalogPage } from './model/ai-lab-recipes-catalog-page';\nimport type { AILabCatalogPage } from './model/ai-lab-model-catalog-page';\nimport type { AILabPlaygroundsPage } from './model/ai-lab-playgrounds-page';\nimport type { AILabPlaygroundDetailsPage } from './model/ai-lab-playground-details-page';\nimport {\n  getExtensionCard,\n  getExtensionVersion,\n  openAILabExtensionDetails,\n  openAILabPreferences,\n  reopenAILabDashboard,\n  waitForExtensionToInitialize,\n} from './utils/aiLabHandler';\nimport * as fs from 'node:fs';\nimport * as path from 'node:path';\nimport { fileURLToPath } from 'node:url';\nimport type { AILabTryInstructLabPage } from './model/ai-lab-try-instructlab-page';\nimport type { AiLlamaStackPage } from './model/ai-lab-model-llamastack-page';\nimport type { ApplicationCatalog } from '../../../packages/shared/src/models/IApplicationCatalog';\n\nconst AI_LAB_EXTENSION_OCI_IMAGE =\n  process.env.EXTENSION_OCI_IMAGE ?? 'ghcr.io/containers/podman-desktop-extension-ai-lab:nightly';\nconst AI_LAB_EXTENSION_PREINSTALLED: boolean = process.env.EXTENSION_PREINSTALLED === 'true';\nconst EXT_TEST_RAG_CHATBOT: boolean = process.env.EXT_TEST_RAG_CHATBOT === 'true';\nconst AI_LAB_CATALOG_STATUS_ACTIVE: string = 'ACTIVE';\nconst AI_LAB_TESTS_WITH_GPU_ENABLED: boolean = process.env.EXT_TEST_GPU_SUPPORT_ENABLED === 'true';\n\nlet aiLabPage: AILabDashboardPage;\nconst runnerOptions = {\n  customFolder: 'ai-lab-tests-pd',\n  aiLabModelUploadDisabled: isWindows ? true : false,\n};\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = path.dirname(__filename);\nconst TEST_AUDIO_FILE_PATH: string = path.resolve(\n  __dirname,\n  '..',\n  '..',\n  'playwright',\n  'resources',\n  `test-audio-to-text.wav`,\n);\nconst AI_JSON_FILE_PATH: string = path.resolve(\n  __dirname,\n  '..',\n  '..',\n  '..',\n  'packages',\n  'backend',\n  'src',\n  'assets',\n  'ai.json',\n);\n\nconst aiJSONFile = fs.readFileSync(AI_JSON_FILE_PATH, 'utf8');\nconst AI_JSON: ApplicationCatalog = JSON.parse(aiJSONFile) as ApplicationCatalog;\nconst AI_APP_MODELS: Set<string> = new Set();\nAI_JSON.recipes.forEach(recipe => {\n  recipe.recommended?.forEach(model => {\n    AI_APP_MODELS.add(model);\n  });\n});\n// Create a set of AI models that are not the first recommended model for any app\n// eslint-disable-next-line @typescript-eslint/no-unused-vars\nconst _AI_APP_UNUSED_MODELS: string[] = [\n  ...AI_APP_MODELS.values().filter(model => {\n    // Check if the model is not the first recommended model for any app\n    return !Array.from(AI_JSON.recipes).some(recipe => {\n      return recipe.recommended?.at(0) === model;\n    });\n  }),\n];\nconst AI_APP_MODEL_AND_NAMES: Map<string, string[]> = new Map();\nAI_JSON.recipes.forEach(recipe => {\n  const recommendedModel = recipe.recommended?.at(0);\n  if (recommendedModel) {\n    const actualModelName = AI_JSON.models.find(model => model.id === recommendedModel)?.name;\n    if (actualModelName) {\n      if (!AI_APP_MODEL_AND_NAMES.has(actualModelName)) {\n        AI_APP_MODEL_AND_NAMES.set(actualModelName, []);\n      }\n      AI_APP_MODEL_AND_NAMES.get(actualModelName)?.push(recipe.name);\n    }\n  }\n});\n\n// Do not use non-instruct models in playground tests.\n// They break out of guiderails and fail the tests.\nconst PLAYGROUND_TEST_MODELS: string[] = ['ibm-granite/granite-4.0-micro-GGUF'];\n\nconst AI_APP_HTTP_TEST_APP_NAMES: string[] = ['Object Detection'];\nconst AI_APP_SERVICE_RESPONSE_TEST_APP_NAMES: string[] = ['Audio to Text', 'Function calling'];\n\nconst PLAYGROUND_NAME = 'test playground';\nconst SYSTEM_PROMPT = 'Always respond with: \"Hello, I am Chat Bot\"';\n\ntest.use({\n  runnerOptions: new RunnerOptions(runnerOptions),\n});\n\ntest.beforeAll(async ({ runner, welcomePage, page }) => {\n  const window = await runner.getElectronApp().firstWindow();\n  // Increase Window Size to improve video recording and screenshots\n  await window.setViewportSize({ width: 1050, height: 700 });\n\n  runner.setVideoAndTraceName('ai-lab-e2e');\n  await welcomePage.handleWelcomePage(true);\n  await waitForPodmanMachineStartup(page, 180_000);\n\n  console.log(`\n**********************************\n* TEST CONFIGURATION INFORMATION *\n**********************************\n\n  AI Lab Extension OCI Image: ${AI_LAB_EXTENSION_OCI_IMAGE}\n  AI Lab Extension Preinstalled: ${AI_LAB_EXTENSION_PREINSTALLED}\n  Test Audio File Path: ${TEST_AUDIO_FILE_PATH}\n  Playground Test Models: ${PLAYGROUND_TEST_MODELS.join(', ')}\n  AI App HTTP Tests: ${AI_APP_HTTP_TEST_APP_NAMES.join(', ')}\n  AI App Service Response Tests: ${AI_APP_SERVICE_RESPONSE_TEST_APP_NAMES.join(', ')}\n\n  Test RAG Chatbot Apps: ${EXT_TEST_RAG_CHATBOT}\n\n  IS WINDOWS: ${isWindows}\n  IS LINUX: ${isLinux}\n  IS MAC: ${isMac}\n  IS CI: ${isCI} CI env variable: ${process.env.CI ?? 'undefined'}\n\n**********************************\n`);\n});\n\ntest.afterAll(async ({ runner }) => {\n  test.setTimeout(180_000);\n  if (isCI) {\n    await resetPodmanMachinesFromCLI();\n  }\n  await runner.close();\n});\n\ntest.describe.serial(`AI Lab extension installation and verification`, () => {\n  test.describe.serial(`AI Lab extension installation`, { tag: ['@smoke', '@instructLab'] }, () => {\n    let extensionsPage: ExtensionsPage;\n\n    test(`Open Settings -> Extensions page`, async ({ navigationBar }) => {\n      const dashboardPage = await navigationBar.openDashboard();\n      await playExpect(dashboardPage.mainPage).toBeVisible();\n      extensionsPage = await navigationBar.openExtensions();\n      await playExpect(extensionsPage.header).toBeVisible();\n    });\n\n    test(`Install AI Lab extension`, async () => {\n      test.skip(AI_LAB_EXTENSION_PREINSTALLED, 'AI Lab extension is preinstalled');\n      test.setTimeout(120_000);\n      await extensionsPage.installExtensionFromOCIImage(AI_LAB_EXTENSION_OCI_IMAGE);\n    });\n\n    test('Extension (card) is installed, present and active', async ({ navigationBar }) => {\n      await waitForExtensionToInitialize(navigationBar);\n      const extensionCard = await getExtensionCard(navigationBar);\n      await playExpect(extensionCard.status).toHaveText(AI_LAB_CATALOG_STATUS_ACTIVE);\n    });\n\n    test(`Extension's details show correct status, no error`, async ({ navigationBar }) => {\n      const aiLabExtensionDetailsPage = await openAILabExtensionDetails(navigationBar);\n      await aiLabExtensionDetailsPage.waitForLoad();\n      await aiLabExtensionDetailsPage.checkIsActive(AI_LAB_CATALOG_STATUS_ACTIVE);\n      await aiLabExtensionDetailsPage.checkForErrors();\n    });\n\n    test(`Verify AI Lab is accessible`, async ({ runner, page, navigationBar }) => {\n      aiLabPage = await reopenAILabDashboard(runner, page, navigationBar);\n      await aiLabPage.navigationBar.waitForLoad();\n    });\n  });\n\n  test.describe.serial(`AI Lab extension GPU preferences`, { tag: '@smoke' }, () => {\n    test(`Verify GPU support banner is visible, preferences are disabled`, async ({ page, navigationBar }) => {\n      test.setTimeout(15_000);\n      await playExpect(aiLabPage.gpuSupportBanner).toBeVisible();\n      await playExpect(aiLabPage.enableGpuButton).toBeVisible();\n      await playExpect(aiLabPage.dontDisplayButton).toBeVisible();\n      const preferencesPage = await openAILabPreferences(navigationBar, page);\n      await preferencesPage.waitForLoad();\n      playExpect(await preferencesPage.isGPUPreferenceEnabled()).toBeFalsy();\n    });\n\n    test(`Enable GPU support and verify preferences`, async ({ runner, page, navigationBar }) => {\n      test.setTimeout(30_000);\n      aiLabPage = await reopenAILabDashboard(runner, page, navigationBar);\n      await aiLabPage.waitForLoad();\n      await aiLabPage.enableGpuSupport();\n      const preferencesPage = await openAILabPreferences(navigationBar, page);\n      await preferencesPage.waitForLoad();\n      playExpect(await preferencesPage.isGPUPreferenceEnabled()).toBeTruthy();\n    });\n\n    test.afterAll(\n      `Disable GPU support, return to AI Lab Dashboard and hide banner`,\n      async ({ runner, page, navigationBar }) => {\n        test.skip(\n          AI_LAB_TESTS_WITH_GPU_ENABLED,\n          'Skipping GPU preference reset as tests are running with GPU support enabled',\n        );\n        test.setTimeout(30_000);\n        const preferencesPage = await openAILabPreferences(navigationBar, page);\n        await preferencesPage.waitForLoad();\n        await preferencesPage.disableGPUPreference();\n        playExpect(await preferencesPage.isGPUPreferenceEnabled()).toBeFalsy();\n        aiLabPage = await reopenAILabDashboard(runner, page, navigationBar);\n        await playExpect(aiLabPage.gpuSupportBanner).toBeVisible();\n        await playExpect(aiLabPage.enableGpuButton).toBeVisible();\n        await playExpect(aiLabPage.dontDisplayButton).toBeVisible();\n        await aiLabPage.dontDisplayButton.click();\n        await playExpect(aiLabPage.gpuSupportBanner).toBeHidden();\n      },\n    );\n  });\n\n  test.describe.serial(`Download model facebook/detr-resnet-101 via Local Server API`, () => {\n    const modelName: string = 'facebook/detr-resnet-101';\n    let localServerPort: string;\n    let extensionVersion: string | undefined;\n\n    test.beforeAll(\n      'Get AI Lab extension version and open AI Lab navigation bar',\n      async ({ page, runner, navigationBar }) => {\n        extensionVersion = await getExtensionVersion(navigationBar);\n        aiLabPage = await reopenAILabDashboard(runner, page, navigationBar);\n        await aiLabPage.navigationBar.waitForLoad();\n      },\n    );\n\n    test('Retrieve local server dynamic port and verify server response', async () => {\n      const localServerPage = await aiLabPage.navigationBar.openLocalServer();\n      await localServerPage.waitForLoad();\n      localServerPort = await localServerPage.getLocalServerPort();\n\n      const response: Response = await fetch(`http://127.0.0.1:${localServerPort}/`, { cache: 'no-store' });\n      const blob: Blob = await response.blob();\n      const text: string = await blob.text();\n      playExpect(text).toContain('OK');\n    });\n\n    test('Fetch API Version', async ({ request }) => {\n      const response = await request.get(`http://127.0.0.1:${localServerPort}/api/version`, {\n        headers: {\n          Accept: 'application/json',\n        },\n      });\n      playExpect(response.ok()).toBeTruthy();\n      const apiResponse = await response.json();\n\n      console.log(`API version: ${apiResponse.version}`);\n      playExpect(apiResponse.version).toBe(extensionVersion);\n    });\n\n    test(`Download ${modelName} via API`, async ({ request }) => {\n      test.skip(true, `Skipping test due to https://github.com/containers/podman-desktop-extension-ai-lab/issues/3406`);\n      test.setTimeout(610_000);\n      const catalogPage = await aiLabPage.navigationBar.openCatalog();\n      await catalogPage.waitForLoad();\n      console.log(`Downloading ${modelName}...`);\n      const response = await request.post(`http://127.0.0.1:${localServerPort}/api/pull`, {\n        headers: {\n          'Content-Type': 'application/json',\n          Accept: 'application/x-ndjson',\n        },\n        data: {\n          model: modelName,\n          insecure: false,\n          stream: true,\n        },\n        timeout: 600_000,\n      });\n\n      const body = await response.body();\n      const text = body.toString();\n      playExpect(text).toContain('success');\n      await aiLabPage.navigationBar.openCatalog();\n      await catalogPage.waitForLoad();\n      await playExpect\n        // eslint-disable-next-line sonarjs/no-nested-functions\n        .poll(async () => await waitForCatalogModel(modelName))\n        .toBeTruthy();\n    });\n\n    test(`Verify ${modelName} is listed in models fetched from API`, async ({ request }) => {\n      test.skip(true, `Skipping test due to https://github.com/containers/podman-desktop-extension-ai-lab/issues/3406`);\n      const response = await request.get(`http://127.0.0.1:${localServerPort}/api/tags`, {\n        headers: {\n          Accept: 'application/json',\n        },\n      });\n      playExpect(response.ok()).toBeTruthy();\n      const parsedJson = await response.json();\n      console.log(parsedJson);\n      playExpect(parsedJson.models.length).not.toBe(0);\n      playExpect(\n        (parsedJson.models as unknown[]).find(modelEntry => (modelEntry as { model: string }).model === modelName),\n      ).toBeTruthy();\n    });\n  });\n\n  //todo: implement model service tests for appropriate models https://github.com/containers/podman-desktop-extension-ai-lab/issues/3844\n\n  AI_APP_MODEL_AND_NAMES.forEach((appNames, appModel) => {\n    /* eslint-disable sonarjs/no-nested-functions */\n    test.describe.serial(`Model download, Playground, AI App tests for ${appModel}`, { tag: '@smoke' }, () => {\n      let catalogPage: AILabCatalogPage;\n      let playgroundsPage: AILabPlaygroundsPage;\n      let playgroundDetailsPage: AILabPlaygroundDetailsPage;\n      let assistantResponse: Locator;\n\n      /*\n       * Model Download\n       */\n      test.beforeAll(`Download model ${appModel} via AI Lab Catalog`, async ({ runner, page, navigationBar }) => {\n        test.setTimeout(620_000);\n        aiLabPage = await reopenAILabDashboard(runner, page, navigationBar);\n        await aiLabPage.navigationBar.waitForLoad();\n\n        catalogPage = await aiLabPage.navigationBar.openCatalog();\n        await catalogPage.waitForLoad();\n\n        if (!(await catalogPage.isModelDownloaded(appModel))) {\n          await catalogPage.downloadModel(appModel);\n        }\n        await playExpect\n          // eslint-disable-next-line sonarjs/no-nested-functions\n          .poll(async () => await waitForCatalogModel(appModel), { timeout: 600_000, intervals: [5_000] })\n          .toBeTruthy();\n      });\n\n      /*\n       * Playground tests\n       */\n      if (PLAYGROUND_TEST_MODELS.includes(appModel)) {\n        test.describe.serial(`Run Playground tests for ${appModel}`, () => {\n          test(`Create AI Lab playground for ${appModel}`, async () => {\n            test.setTimeout(310_000);\n            playgroundsPage = await aiLabPage.navigationBar.openPlaygrounds();\n            await playgroundsPage.waitForLoad();\n\n            await playgroundsPage.createNewPlayground(PLAYGROUND_NAME);\n            await playgroundsPage.waitForLoad();\n            await playExpect\n              // eslint-disable-next-line sonarjs/no-nested-functions\n              .poll(async () => await playgroundsPage.doesPlaygroundExist(PLAYGROUND_NAME), { timeout: 60_000 })\n              .toBeTruthy();\n          });\n\n          test(`Go to AI Lab playground details for ${appModel}`, async () => {\n            playgroundDetailsPage = await playgroundsPage.goToPlaygroundDetails(PLAYGROUND_NAME);\n            await playgroundDetailsPage.waitForLoad();\n\n            await playExpect(playgroundDetailsPage.conversationSectionLocator).toBeVisible();\n            await playExpect(playgroundDetailsPage.temperatureSliderLocator).toBeVisible();\n            await playExpect(playgroundDetailsPage.maxTokensSliderLocator).toBeVisible();\n            await playExpect(playgroundDetailsPage.topPSliderLocator).toBeVisible();\n            await playExpect(playgroundDetailsPage.deletePlaygroundButton).toBeEnabled();\n          });\n\n          test('Set system prompt, submit user input, and verify assistant response is visible', async () => {\n            test.setTimeout(100_000);\n            await playgroundDetailsPage.defineSystemPrompt(SYSTEM_PROMPT);\n            await playgroundDetailsPage.submitUserInput('Hello');\n            // Get the first assistant response\n            assistantResponse = await playgroundDetailsPage.getAssistantResponse(0);\n            await playExpect(assistantResponse).toBeVisible();\n          });\n\n          test('Verify assistant response contains the expected system prompt', async () => {\n            playExpect(await assistantResponse.innerText()).toContain('Hello, I am Chat Bot');\n          });\n\n          test(`Delete AI Lab playground for ${appModel}`, async () => {\n            test.setTimeout(70_000);\n            playgroundsPage = await aiLabPage.navigationBar.openPlaygrounds();\n            await playgroundsPage.waitForLoad();\n\n            await playgroundsPage.deletePlayground(PLAYGROUND_NAME);\n\n            await playExpect\n              // eslint-disable-next-line sonarjs/no-nested-functions\n              .poll(async () => await playgroundsPage.doesPlaygroundExist(PLAYGROUND_NAME), { timeout: 60_000 })\n              .toBeFalsy();\n          });\n\n          test(`Cleaning up model service`, async () => {\n            test.setTimeout(120_000);\n            await cleanupServices();\n          });\n        });\n      }\n\n      /*\n       * Perform app installation tests for each app name associated with the model\n       */\n      appNames.forEach(appName => {\n        test.describe.serial(`AI Recipe installation ${appName}`, () => {\n          let recipesCatalogPage: AILabRecipesCatalogPage;\n\n          test.skip(\n            !process.env.EXT_TEST_RAG_CHATBOT &&\n              (appName === 'RAG Chatbot' ||\n                appName === 'Node.js RAG Chatbot' ||\n                appName === 'Graph RAG Chat Application'),\n            'EXT_TEST_RAG_CHATBOT variable not set, skipping test',\n          );\n\n          test.skip(\n            appName === 'Audio to Text' && !!isCI && !!isLinux,\n            'Audio to Text app is skipped on Linux CI due to stability issues https://github.com/containers/podman-desktop-extension-ai-lab/issues/4227 , https://github.com/containers/podman-desktop-extension-ai-lab/issues/3111',\n          );\n\n          test.skip(\n            appName === 'Object Detection' && !!isCI && !!isWindows,\n            'Object Detection app is skipped on Windows CI due to https://github.com/containers/podman-desktop-extension-ai-lab/issues/3197',\n          );\n\n          test(`Open Recipes Catalog`, async ({ runner, page, navigationBar }) => {\n            aiLabPage = await reopenAILabDashboard(runner, page, navigationBar);\n            await aiLabPage.navigationBar.waitForLoad();\n\n            recipesCatalogPage = await aiLabPage.navigationBar.openRecipesCatalog();\n            await recipesCatalogPage.waitForLoad();\n          });\n\n          test(`Install ${appName} example app`, async () => {\n            test.setTimeout(1_500_000);\n            const demoApp = await recipesCatalogPage.openRecipesCatalogApp(appName);\n            await demoApp.waitForLoad();\n            await demoApp.startNewDeployment(1_400_000);\n          });\n\n          /*\n           * Test application functionality\n           */\n          if (AI_APP_HTTP_TEST_APP_NAMES.includes(appName)) {\n            test(`Verify ${appName} app HTTP page is reachable`, async ({ request }) => {\n              test.setTimeout(60_000);\n              let response: APIResponse | undefined = undefined;\n\n              switch (appName) {\n                case 'Object Detection': {\n                  const aiRunningAppsPage = await aiLabPage.navigationBar.openRunningApps();\n                  const appPort = await aiRunningAppsPage.getAppPort(appName);\n                  response = await request.get(`http://localhost:${appPort}`, { timeout: 60_000 });\n                  playExpect(response.ok()).toBeTruthy();\n                  const body = await response.text();\n                  playExpect(body).toContain('<title>Streamlit</title>');\n                  break;\n                }\n              }\n            });\n          }\n\n          if (AI_APP_SERVICE_RESPONSE_TEST_APP_NAMES.includes(appName)) {\n            test(`Verify that model service for the ${appName} is working`, async ({ request }) => {\n              test.setTimeout(600_000);\n\n              let port: string = '';\n              let baseUrl: string = '';\n              let response: APIResponse | undefined = undefined;\n              let expectedResponse: string = '';\n\n              switch (appName) {\n                case 'Audio to Text': {\n                  test.fail(\n                    appName === 'Audio to Text',\n                    'Expected failure due to issue #3111: https://github.com/containers/podman-desktop-extension-ai-lab/issues/3111',\n                  );\n                  port = await getModelServicePort(appModel);\n                  baseUrl = `http://localhost:${port}`;\n                  expectedResponse =\n                    'And so my fellow Americans, ask not what your country can do for you, ask what you can do for your country';\n                  const audioFileContent = fs.readFileSync(TEST_AUDIO_FILE_PATH);\n\n                  response = await request.post(`${baseUrl}/inference`, {\n                    headers: {\n                      Accept: 'application/json',\n                    },\n                    multipart: {\n                      file: {\n                        name: 'test.wav',\n                        mimeType: 'audio/wav',\n                        buffer: audioFileContent,\n                      },\n                    },\n                    timeout: 600_000,\n                  });\n                  break;\n                }\n\n                case 'Function calling': {\n                  port = await getModelServicePort(appModel);\n                  baseUrl = `http://localhost:${port}`;\n                  expectedResponse = 'Prague';\n                  response = await request.post(`${baseUrl}/v1/chat/completions`, {\n                    data: {\n                      messages: [\n                        { role: 'system', content: 'You are a helpful assistant.' },\n                        { role: 'user', content: 'What is the capital of Czech Republic?' },\n                      ],\n                    },\n                    timeout: 600_000,\n                  });\n                  break;\n                }\n              }\n\n              if (response) {\n                playExpect(response.ok()).toBeTruthy();\n                const body = await response?.body();\n                const text = body?.toString() ?? '';\n                playExpect(text).toContain(expectedResponse);\n              }\n            });\n          }\n\n          test(`${appName}: Restart, Stop, Delete.`, async () => {\n            test.setTimeout(240_000);\n\n            await restartApp(appName);\n            await stopAndDeleteApp(appName);\n          });\n        });\n      });\n\n      test.afterAll(\n        `Ensure cleanup of \"${appModel}\", related services, and images`,\n        async ({ runner, page, navigationBar }) => {\n          test.setTimeout(180_000);\n          aiLabPage = await reopenAILabDashboard(runner, page, navigationBar);\n          await cleanupServices();\n          await deleteAllModels();\n          await deleteUnusedImages(navigationBar);\n        },\n      );\n    });\n  });\n\n  test.describe.serial('InstructLab container startup', { tag: ['@smoke', '@instructLab'] }, () => {\n    let instructLabPage: AILabTryInstructLabPage;\n    const instructLabContainerName = /^instructlab-\\d+$/;\n    let exactInstructLabContainerName = '';\n\n    test.beforeAll('Open Try InstructLab page', async ({ runner, page, navigationBar }) => {\n      aiLabPage = await reopenAILabDashboard(runner, page, navigationBar);\n      await aiLabPage.navigationBar.waitForLoad();\n\n      instructLabPage = await aiLabPage.navigationBar.openTryInstructLab();\n      await instructLabPage.waitForLoad();\n    });\n\n    test('Start and verify InstructLab container', async ({ page }) => {\n      test.setTimeout(1_000_000);\n      await playExpect(instructLabPage.startInstructLabButton).toBeVisible();\n      await playExpect(instructLabPage.startInstructLabButton).toBeEnabled();\n      await instructLabPage.startInstructLabButton.click();\n\n      await playExpect(instructLabPage.openInstructLabButton).toBeVisible({ timeout: 900_000 });\n      await playExpect(instructLabPage.openInstructLabButton).toBeEnabled({ timeout: 10_000 });\n      await playExpect(instructLabPage.statusMessageBox).toContainText('Starting InstructLab container');\n\n      const checkMarkLocator = instructLabPage.statusMessageBox.locator('[class*=\"text-green\"]');\n      await playExpect(checkMarkLocator).toHaveCount(3);\n      await instructLabPage.openInstructLabButton.click();\n\n      const containerName = await page\n        .getByRole('region', { name: 'Header' })\n        .getByLabel(instructLabContainerName)\n        .textContent();\n      if (typeof containerName === 'string') {\n        exactInstructLabContainerName = containerName;\n      }\n      const containerDetailsPage = new ContainerDetailsPage(page, exactInstructLabContainerName);\n      await playExpect(containerDetailsPage.heading).toBeVisible();\n      await playExpect(containerDetailsPage.heading).toContainText(exactInstructLabContainerName);\n      await playExpect\n        .poll(async () => containerDetailsPage.getState(), { timeout: 90_000, intervals: [1_000] })\n        .toContain(ContainerState.Running);\n    });\n\n    test('Cleanup the InstructLab container', async ({ runner, page, navigationBar }) => {\n      const containersPage = await navigationBar.openContainers();\n      await playExpect(containersPage.heading).toBeVisible();\n      await containersPage.deleteContainer(exactInstructLabContainerName);\n      await playExpect\n        .poll(async () => await containersPage.containerExists(exactInstructLabContainerName), { timeout: 60_000 })\n        .toBeFalsy();\n      await deleteUnusedImages(navigationBar);\n      aiLabPage = await reopenAILabDashboard(runner, page, navigationBar);\n      await aiLabPage.navigationBar.waitForLoad();\n      instructLabPage = await aiLabPage.navigationBar.openTryInstructLab();\n      await instructLabPage.waitForLoad();\n      await playExpect(instructLabPage.startInstructLabButton).toBeEnabled();\n    });\n  });\n\n  test.describe.serial(`Start Llama Stack from sidebar and verify containers`, { tag: '@smoke' }, () => {\n    test.skip(!!isCI && !!isWindows, 'Skipping Llama Stack tests on GitHub Actions with Windows platform');\n    let llamaStackPage: AiLlamaStackPage;\n    const llamaStackContainerNames: string[] = [];\n\n    test.beforeAll(`Open Llama Stack`, async ({ runner, page, navigationBar }) => {\n      aiLabPage = await reopenAILabDashboard(runner, page, navigationBar);\n      await aiLabPage.navigationBar.waitForLoad();\n      llamaStackPage = await aiLabPage.navigationBar.openLlamaStack();\n      await llamaStackPage.waitForLoad();\n    });\n\n    test(`Start Llama Stack containers`, async () => {\n      test.setTimeout(300_000);\n      await llamaStackPage.waitForLoad();\n      await llamaStackPage.runLlamaStackContainer();\n      await playExpect(llamaStackPage.openLlamaStackContainerButton).toBeVisible({ timeout: 120_000 });\n      await playExpect(llamaStackPage.exploreLlamaStackEnvironmentButton).toBeVisible({ timeout: 120_000 });\n      await playExpect(llamaStackPage.openLlamaStackContainerButton).toBeEnabled({ timeout: 30_000 });\n      await playExpect(llamaStackPage.exploreLlamaStackEnvironmentButton).toBeEnabled({ timeout: 30_000 });\n    });\n\n    test(`Verify Llama Stack containers are running`, async ({ navigationBar }) => {\n      let containersPage = await navigationBar.openContainers();\n      await playExpect(containersPage.heading).toBeVisible();\n\n      await playExpect\n        .poll(\n          async () => {\n            const allRows = await containersPage.getAllTableRows();\n            llamaStackContainerNames.length = 0;\n            for (const row of allRows) {\n              const text = await row.textContent();\n              if (text?.includes('llama-stack')) {\n                const containerNameMatch = RegExp(/\\b(llama-stack[^\\s]*)/).exec(text);\n                if (containerNameMatch) {\n                  llamaStackContainerNames.push(containerNameMatch[1]);\n                }\n              }\n            }\n            return llamaStackContainerNames.length;\n          },\n          {\n            timeout: 30_000,\n            intervals: [5_000],\n          },\n        )\n        .toBe(2);\n\n      console.log(`Found containers: ${llamaStackContainerNames.join(', ')}`);\n\n      for (const container of llamaStackContainerNames) {\n        containersPage = await navigationBar.openContainers();\n        await playExpect(containersPage.heading).toBeVisible();\n        const containersDetailsPage = await containersPage.openContainersDetails(container);\n        await playExpect(containersDetailsPage.heading).toBeVisible();\n        await playExpect\n          .poll(async () => containersDetailsPage.getState(), { timeout: 30_000 })\n          .toContain(ContainerState.Running);\n      }\n    });\n\n    test.afterAll(`Stop Llama Stack containers`, async ({ navigationBar }) => {\n      for (const container of llamaStackContainerNames) {\n        const containersPage = await navigationBar.openContainers();\n        await playExpect(containersPage.heading).toBeVisible();\n        await containersPage.deleteContainer(container);\n        await playExpect\n          .poll(async () => await containersPage.containerExists(container), { timeout: 30_000 })\n          .toBeFalsy();\n      }\n      await deleteUnusedImages(navigationBar);\n    });\n  });\n});\n\nasync function cleanupServices(): Promise<void> {\n  try {\n    const modelServicePage = await aiLabPage.navigationBar.openServices();\n    await modelServicePage.waitForLoad();\n    if ((await modelServicePage.getCurrentModelCount()) === 0) return;\n    await modelServicePage.deleteAllCurrentModels();\n    await playExpect.poll(async () => await modelServicePage.getCurrentModelCount(), { timeout: 60_000 }).toBe(0);\n  } catch (error) {\n    console.log(`Error while cleaning up service models: ${error}`);\n  }\n}\n\nasync function getModelServicePort(appModelName: string): Promise<string> {\n  const modelServicePage = await aiLabPage.navigationBar.openServices();\n  await modelServicePage.waitForLoad();\n  const serviceDetailsPage = await modelServicePage.openServiceDetails(appModelName);\n\n  await playExpect\n    // eslint-disable-next-line sonarjs/no-nested-functions\n    .poll(async () => await serviceDetailsPage.getServiceState(), { timeout: 60_000 })\n    .toBe('RUNNING');\n\n  return await serviceDetailsPage.getInferenceServerPort();\n}\n\nasync function deleteAllModels(): Promise<void> {\n  const modelCatalogPage = await aiLabPage.navigationBar.openCatalog();\n  await modelCatalogPage.waitForLoad();\n  await modelCatalogPage.deleteAllModels();\n}\n\nasync function restartApp(appName: string): Promise<void> {\n  const aiRunningAppsPage = await aiLabPage.navigationBar.openRunningApps();\n  await aiRunningAppsPage.waitForLoad();\n  await playExpect.poll(async () => await aiRunningAppsPage.appExists(appName), { timeout: 10_000 }).toBeTruthy();\n  await playExpect\n    .poll(async () => await aiRunningAppsPage.getCurrentStatusForApp(appName), { timeout: 60_000 })\n    .toBe('RUNNING');\n  const aiApp = await aiRunningAppsPage.getRowForApp(appName);\n  const appProgressBar = aiApp.getByRole('progressbar', { name: 'Loading' });\n  // Trigger restart and watch for dialog/progress bar in parallel to avoid race condition\n  // See: https://github.com/containers/podman-desktop-extension-ai-lab/issues/3663\n\n  const dialogPromise = handleConfirmationDialog(\n    aiLabPage.page,\n    podmanAILabExtension.extensionName,\n    true,\n    'Reset',\n    'Cancel',\n    25_000,\n  ).catch(() => {\n    // Dialog didn't appear - this is expected when repo is clean\n  });\n  const progressBarPromise = playExpect(appProgressBar)\n    .toBeVisible({ timeout: 60_000 })\n    .catch(() => {\n      console.log(`Warning: Progress bar did not appear for app \"${appName}\" during restart`);\n    });\n\n  const restartPromise = aiRunningAppsPage.restartApp(appName);\n\n  await Promise.all([dialogPromise, progressBarPromise, restartPromise]);\n  await playExpect\n    .poll(async () => await aiRunningAppsPage.getCurrentStatusForApp(appName), { timeout: 60_000 })\n    .toBe('RUNNING');\n}\n\nasync function stopAndDeleteApp(appName: string): Promise<void> {\n  const aiRunningAppsPage = await aiLabPage.navigationBar.openRunningApps();\n  await aiRunningAppsPage.waitForLoad();\n  if (!(await aiRunningAppsPage.appExists(appName))) {\n    console.log(`\"${appName}\" is not present in the running apps list. Skipping stop and delete operations.`);\n    return;\n  }\n  await playExpect.poll(async () => await aiRunningAppsPage.appExists(appName), { timeout: 10_000 }).toBeTruthy();\n  await playExpect\n    .poll(async () => await aiRunningAppsPage.getCurrentStatusForApp(appName), { timeout: 60_000 })\n    .toBe('RUNNING');\n  await aiRunningAppsPage.stopApp(appName);\n  await playExpect\n    .poll(async () => await aiRunningAppsPage.getCurrentStatusForApp(appName), { timeout: 60_000 })\n    .toBe('UNKNOWN');\n  await aiRunningAppsPage.deleteAIApp(appName);\n  await playExpect.poll(async () => await aiRunningAppsPage.appExists(appName), { timeout: 60_000 }).toBeFalsy();\n}\n\nasync function deleteUnusedImages(navigationBar: NavigationBar): Promise<void> {\n  try {\n    const imagesPage = await navigationBar.openImages();\n    await playExpect(imagesPage.heading).toBeVisible();\n\n    await imagesPage.deleteAllUnusedImages();\n    await playExpect.poll(async () => await imagesPage.getCountOfImagesByStatus('UNUSED'), { timeout: 90_000 }).toBe(0);\n  } catch (error) {\n    console.error('Error during deleteUnusedImages:', error);\n  }\n}\n\nasync function waitForCatalogModel(modelName: string): Promise<boolean> {\n  const recipeCatalogPage = await aiLabPage.navigationBar.openRecipesCatalog();\n  await recipeCatalogPage.waitForLoad();\n\n  const catalogPage = await aiLabPage.navigationBar.openCatalog();\n  await catalogPage.waitForLoad();\n\n  return await catalogPage.isModelDownloaded(modelName);\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-app-details-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect as playExpect } from '@playwright/test';\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport { AILabStartRecipePage } from './ai-lab-start-recipe-page';\n\nexport class AILabAppDetailsPage extends AILabBasePage {\n  readonly appName: string;\n  readonly startRecipeButton: Locator;\n\n  constructor(page: Page, webview: Page, appName: string) {\n    super(page, webview, appName);\n    this.appName = appName;\n    this.startRecipeButton = this.webview.getByRole('button', { name: 'Start recipe' });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n\n  async deleteLocalClone(): Promise<void> {\n    throw new Error('Method Not implemented');\n  }\n\n  async startNewDeployment(timeout: number = 1_400_000): Promise<void> {\n    await playExpect(this.startRecipeButton).toBeEnabled();\n    await this.startRecipeButton.click();\n    const starRecipePage = new AILabStartRecipePage(this.page, this.webview);\n    await starRecipePage.waitForLoad();\n    // ensure model download tumeout is the closest integer to two fifths of total timeout divisible by 10_000\n    const modelDownloadTimeout = Math.ceil(Math.floor((timeout * 2) / 5) / 10_000) * 10_000;\n    const appStartupTimeout = timeout - modelDownloadTimeout;\n    await starRecipePage.startRecipe(this.appName, modelDownloadTimeout, appStartupTimeout);\n  }\n\n  async openRunningApps(): Promise<void> {\n    throw new Error('Method Not implemented');\n  }\n\n  async deleteRunningApp(_containerName: string): Promise<void> {\n    throw new Error('Method Not implemented');\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-base-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Locator, Page } from '@playwright/test';\nimport { expect as playExpect } from '@playwright/test';\n\nexport abstract class AILabBasePage {\n  readonly page: Page;\n  readonly webview: Page;\n  readonly heading: Locator;\n  readonly gpuSupportBanner: Locator;\n  readonly enableGpuButton: Locator;\n  readonly dontDisplayButton: Locator;\n\n  constructor(page: Page, webview: Page, heading: string | undefined) {\n    this.page = page;\n    this.webview = webview;\n    this.heading = webview.getByRole('heading', { name: heading, exact: true }).first();\n    this.gpuSupportBanner = this.webview.getByLabel('GPU promotion banner');\n    this.enableGpuButton = this.gpuSupportBanner.getByRole('button', { name: 'Enable GPU support' });\n    this.dontDisplayButton = this.gpuSupportBanner.getByRole('button', { name: `Don't display anymore` });\n  }\n\n  abstract waitForLoad(): Promise<void>;\n\n  async enableGpuSupport(): Promise<void> {\n    await playExpect(this.gpuSupportBanner).toBeVisible();\n    await this.enableGpuButton.click();\n    await playExpect(this.gpuSupportBanner).not.toBeVisible();\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-creating-model-service-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect as playExpect } from '@playwright/test';\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport { AILabServiceDetailsPage } from './ai-lab-service-details-page';\n\nexport class AILabCreatingModelServicePage extends AILabBasePage {\n  readonly modelInput: Locator;\n  readonly portInput: Locator;\n  readonly createButton: Locator;\n  readonly openServiceDetailsButton: Locator;\n  readonly serviceStatus: Locator;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'Creating Model service');\n    this.modelInput = this.webview.getByLabel('Select Model');\n    this.portInput = this.webview.getByLabel('Port input');\n    this.createButton = this.webview.getByRole('button', { name: 'Create service' });\n    this.openServiceDetailsButton = this.webview.getByRole('button', { name: 'Open service details' });\n    this.serviceStatus = this.webview.getByRole('status');\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n\n  async getCurrentStatus(): Promise<string> {\n    const statusList = await this.getStatusListLocator();\n\n    if (statusList.length < 1) return '';\n\n    const content = await statusList[statusList.length - 1].textContent();\n    if (!content) return '';\n\n    return content;\n  }\n\n  async getLastStatusIconClass(): Promise<string> {\n    const statusList = await this.getStatusListLocator();\n\n    if (statusList.length < 1) return '';\n\n    const icon = statusList[statusList.length - 1].getByRole('img');\n    return (await icon.getAttribute('class')) ?? '';\n  }\n\n  async createService(modelName: string = '', port: number = 0): Promise<AILabServiceDetailsPage> {\n    if (modelName) {\n      await this.modelInput.fill(modelName);\n      await this.webview.keyboard.press('Enter');\n    }\n\n    if (port) {\n      await this.portInput.clear();\n      await this.portInput.fill(port.toString());\n    }\n\n    await playExpect(this.createButton).toBeEnabled();\n    await this.createButton.click();\n\n    await playExpect\n      .poll(async () => await this.getCurrentStatus(), { timeout: 300_000 })\n      .toContain('Creating container');\n    await playExpect\n      .poll(async () => await this.getLastStatusIconClass(), { timeout: 120_000 })\n      .toContain('text-green-500');\n    await playExpect(this.openServiceDetailsButton).toBeEnabled();\n    await this.openServiceDetailsButton.click();\n    return new AILabServiceDetailsPage(this.page, this.webview);\n  }\n\n  private async getStatusListLocator(): Promise<Locator[]> {\n    return await this.serviceStatus.locator('ul > li').all();\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-dashboard-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Page } from '@playwright/test';\nimport { expect as playExpect } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport { AILabNavigationBar } from './ai-lab-navigation-bar';\n\nexport class AILabDashboardPage extends AILabBasePage {\n  readonly navigationBar: AILabNavigationBar;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'Welcome to Podman AI Lab');\n    this.navigationBar = new AILabNavigationBar(this.page, this.webview);\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n    await this.navigationBar.waitForLoad();\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-local-server-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *025\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect as playExpect } from '@playwright/test';\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\n\nexport class AILabLocalServerPage extends AILabBasePage {\n  readonly localServerPort: Locator;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'Local Server');\n    this.localServerPort = this.webview.getByRole('textbox', { name: 'Port input' });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n\n  async getLocalServerPort(): Promise<string> {\n    await playExpect(this.localServerPort).toBeVisible();\n    try {\n      return await this.localServerPort.inputValue();\n    } catch {\n      throw new Error('Could not get local server port');\n    }\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-model-catalog-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Locator, Page } from '@playwright/test';\nimport { expect as playExpect } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport { handleConfirmationDialog, podmanAILabExtension } from '@podman-desktop/tests-playwright';\nimport { AILabCreatingModelServicePage } from './ai-lab-creating-model-service-page';\n\nexport class AILabCatalogPage extends AILabBasePage {\n  readonly catalogTable: Locator;\n  readonly modelsGroup: Locator;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'Models');\n    this.catalogTable = this.webview.getByRole('table', { name: 'model' });\n    this.modelsGroup = this.catalogTable.getByRole('rowgroup').nth(1);\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n    await playExpect(this.catalogTable).toBeVisible();\n    await playExpect(this.modelsGroup).toBeVisible();\n  }\n\n  async getModelRowByName(modelName: string): Promise<Locator | undefined> {\n    const modelRows = await this.getAllModelRows();\n    for (const modelRow of modelRows) {\n      const modelNameCell = modelRow.getByText(modelName, { exact: true });\n      if ((await modelNameCell.count()) > 0) {\n        return modelRow;\n      }\n    }\n\n    return undefined;\n  }\n\n  async getModelNameByRow(row: Locator): Promise<string> {\n    const modelNameCell = row.getByLabel('Model Name');\n    const modelName = await modelNameCell.textContent();\n    return modelName?.trim() ?? '';\n  }\n\n  async downloadModel(modelName: string): Promise<void> {\n    const modelRow = await this.getModelRowByName(modelName);\n    if (!modelRow) {\n      throw new Error(`Model ${modelName} not found`);\n    }\n    const downloadButton = modelRow.getByRole('button', { name: 'Download Model' });\n    await playExpect(downloadButton).toBeEnabled();\n    await downloadButton.focus();\n    await downloadButton.click();\n  }\n\n  async createModelService(modelName: string): Promise<AILabCreatingModelServicePage> {\n    const modelRow = await this.getModelRowByName(modelName);\n    if (!modelRow) {\n      throw new Error(`Model ${modelName} not found`);\n    }\n    const createServiceButton = modelRow.getByRole('button', { name: 'Create Model Service' });\n    await playExpect(createServiceButton).toBeEnabled();\n    await createServiceButton.focus();\n    await createServiceButton.click();\n\n    return new AILabCreatingModelServicePage(this.page, this.webview);\n  }\n\n  async deleteModel(modelName: string): Promise<void> {\n    if (!modelName || modelName.trim() === '') {\n      console.warn('Model name is empty, skipping deletion.');\n      return;\n    }\n    const modelRow = await this.getModelRowByName(modelName);\n    if (!modelRow) {\n      throw new Error(`Model ${modelName} not found`);\n    }\n    const deleteButton = modelRow.getByRole('button', { name: 'Delete Model' });\n    await playExpect.poll(async () => await deleteButton.isEnabled(), { timeout: 10_000 }).toBeTruthy();\n    await deleteButton.focus();\n    await deleteButton.click();\n    await this.page.waitForTimeout(1_000);\n    await handleConfirmationDialog(this.page, podmanAILabExtension.extensionName, true, 'Confirm');\n    await playExpect.poll(async () => await this.isModelDownloaded(modelName), { timeout: 30_000 }).toBeFalsy();\n  }\n\n  async deleteAllModels(): Promise<void> {\n    try {\n      let modelRows = await this.getAllModelRows();\n      if (modelRows.length === 0) {\n        return;\n      }\n\n      for (const modelRow of modelRows) {\n        const modelName = await this.getModelNameByRow(modelRow);\n        if (await this.isModelDownloaded(modelName)) {\n          await this.deleteModel(modelName);\n        }\n      }\n\n      modelRows = await this.getAllModelRows();\n      if (modelRows.length === 0) {\n        return;\n      }\n\n      for (const modelRow of modelRows) {\n        const modelName = await this.getModelNameByRow(modelRow);\n        playExpect(await this.isModelDownloaded(modelName)).toBeFalsy();\n      }\n    } catch (error) {\n      const remainingModels = await this.getAllModelRows();\n      const remainingModelNames = [];\n      for (const modelRow of remainingModels) {\n        const modelName = await this.getModelNameByRow(modelRow);\n        if (await this.isModelDownloaded(modelName)) {\n          remainingModelNames.push(modelName);\n        }\n      }\n      const errInfo = error instanceof Error ? { message: error.message } : error;\n      console.error('Error during deleteAllModels:', errInfo, 'Remaining models:', remainingModelNames);\n    }\n  }\n\n  async isModelDownloaded(modelName: string): Promise<boolean> {\n    const modelRow = await this.getModelRowByName(modelName);\n    if (!modelRow) {\n      return false;\n    }\n\n    const deleteButton = modelRow.getByRole('button', { name: 'Delete Model' });\n    return (await deleteButton.count()) > 0;\n  }\n\n  private async getAllModelRows(): Promise<Locator[]> {\n    return this.modelsGroup.getByRole('row').all();\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-model-llamastack-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\n\nexport class AiLlamaStackPage extends AILabBasePage {\n  readonly startLlamaStackContainerButton: Locator;\n  readonly openLlamaStackContainerButton: Locator;\n  readonly exploreLlamaStackEnvironmentButton: Locator;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'Llama Stack');\n    this.startLlamaStackContainerButton = this.webview.getByRole('button', { name: 'Start Llama Stack container' });\n    this.openLlamaStackContainerButton = this.webview.getByRole('button', {\n      name: 'Open Llama Stack Server container',\n    });\n    this.exploreLlamaStackEnvironmentButton = this.webview.getByRole('button', {\n      name: 'Explore Llama-Stack environment',\n    });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await this.startLlamaStackContainerButton.waitFor({ state: 'visible' });\n  }\n\n  async runLlamaStackContainer(): Promise<void> {\n    await this.startLlamaStackContainerButton.click();\n  }\n\n  async waitForOpenLlamaStackContainerButton(): Promise<void> {\n    await this.openLlamaStackContainerButton.waitFor({ state: 'visible' });\n  }\n\n  async waitForExploreLlamaStackEnvironmentButton(): Promise<void> {\n    await this.exploreLlamaStackEnvironmentButton.waitFor({ state: 'visible' });\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-model-service-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect as playExpect } from '@playwright/test';\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport { handleConfirmationDialog, podmanAILabExtension } from '@podman-desktop/tests-playwright';\nimport { AILabCreatingModelServicePage } from './ai-lab-creating-model-service-page';\nimport { AILabServiceDetailsPage } from './ai-lab-service-details-page';\n\nexport class AiModelServicePage extends AILabBasePage {\n  readonly additionalActions: Locator;\n  readonly deleteSelectedItems: Locator;\n  readonly toggleAllCheckbox: Locator;\n  readonly newModelButton: Locator;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'Model Services');\n    this.additionalActions = this.webview.getByRole('group', { name: 'additionalActions' });\n    this.deleteSelectedItems = this.additionalActions.getByRole('button', { name: 'Delete' });\n    this.toggleAllCheckbox = this.webview.getByRole('checkbox').and(this.webview.getByLabel('Toggle all'));\n    this.newModelButton = this.additionalActions.getByRole('button', { name: 'New Model Service' });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n\n  async checkAllModelsForDeletion(): Promise<void> {\n    await playExpect(this.toggleAllCheckbox).toBeVisible();\n    await this.toggleAllCheckbox.check();\n    await playExpect(this.toggleAllCheckbox).toBeChecked();\n  }\n\n  async navigateToCreateNewModelPage(): Promise<AILabCreatingModelServicePage> {\n    await playExpect(this.newModelButton).toBeEnabled();\n    await this.newModelButton.click();\n    return new AILabCreatingModelServicePage(this.page, this.webview);\n  }\n\n  async deleteAllCurrentModels(): Promise<void> {\n    try {\n      if (!(await this.toggleAllCheckbox.count())) return;\n\n      await this.checkAllModelsForDeletion();\n      await playExpect(this.deleteSelectedItems).toBeEnabled();\n      await this.deleteSelectedItems.click();\n\n      await handleConfirmationDialog(this.page, podmanAILabExtension.extensionName, true, 'Confirm');\n\n      await playExpect.poll(async () => (await this.getCurrentModelCount()) === 0, { timeout: 60_000 }).toBeTruthy();\n    } catch (error) {\n      const remainingRows = await this.getAllTableRows();\n      const remainingNames: string[] = [];\n      for (let rowNum = 1; rowNum < remainingRows.length; rowNum++) {\n        const serviceModel = remainingRows[rowNum].getByRole('cell').nth(4);\n        const modelName = await serviceModel.textContent();\n        if (modelName) {\n          remainingNames.push(modelName);\n        }\n      }\n      console.group('Model Service Cleanup');\n      console.log(`[${new Date().toISOString()}] Model service deletion failed.`);\n      if (remainingNames.length > 0) {\n        console.log('Could not delete:');\n        remainingNames.forEach(name => console.log(` - ${name}`));\n      } else {\n        console.log('All model services deleted successfully.');\n      }\n      console.error('Error details:', error);\n      console.groupEnd();\n    }\n  }\n\n  async getCurrentModelCount(): Promise<number> {\n    return (await this.getAllTableRows()).length;\n  }\n\n  async openServiceDetails(modelName: string): Promise<AILabServiceDetailsPage> {\n    const serviceRow = await this.getServiceByModel(modelName);\n    if (serviceRow === undefined) {\n      throw new Error(`Model [${modelName}] service doesn't exist`);\n    }\n    const serviceRowName = serviceRow.getByRole('cell').nth(3);\n    await serviceRowName.click();\n    return new AILabServiceDetailsPage(this.page, this.webview);\n  }\n\n  async getServiceByModel(modelName: string): Promise<Locator | undefined> {\n    const rows = await this.getAllTableRows();\n    for (let rowNum = 1; rowNum < rows.length; rowNum++) {\n      //skip header\n      const serviceModel = rows[rowNum].getByRole('cell').nth(4);\n      if ((await serviceModel.textContent()) === modelName) {\n        return rows[rowNum];\n      }\n    }\n    return undefined;\n  }\n\n  private async getAllTableRows(): Promise<Locator[]> {\n    return await this.webview.getByRole('row').all();\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-navigation-bar.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect as playExpect } from '@playwright/test';\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport { AILabRecipesCatalogPage } from './ai-lab-recipes-catalog-page';\nimport { AiRunningAppsPage } from './ai-lab-running-apps-page';\nimport { AiModelServicePage } from './ai-lab-model-service-page';\nimport { AILabCatalogPage as AILabModelCatalogPage } from './ai-lab-model-catalog-page';\nimport { AILabPlaygroundsPage } from './ai-lab-playgrounds-page';\nimport { AILabLocalServerPage } from './ai-lab-local-server-page';\nimport { AILabDashboardPage } from './ai-lab-dashboard-page';\nimport { AILabTryInstructLabPage } from './ai-lab-try-instructlab-page';\nimport { AiLlamaStackPage } from './ai-lab-model-llamastack-page';\n\nexport class AILabNavigationBar extends AILabBasePage {\n  readonly navigationBar: Locator;\n  readonly dashboardButton: Locator;\n  readonly recipesCatalogButton: Locator;\n  readonly runningAppsButton: Locator;\n  readonly catalogButton: Locator;\n  readonly servicesButton: Locator;\n  readonly playgroundsButton: Locator;\n  readonly llamaStackButton: Locator;\n  readonly tuneButton: Locator;\n  readonly localServerButton: Locator;\n  readonly tryInstructLabButton: Locator;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, undefined);\n    this.navigationBar = this.webview.getByRole('navigation', { name: 'PreferencesNavigation' });\n    this.dashboardButton = this.navigationBar.getByRole('link', { name: 'Dashboard', exact: true });\n    this.recipesCatalogButton = this.navigationBar.getByRole('link', { name: 'Recipe Catalog', exact: true });\n    this.runningAppsButton = this.navigationBar.getByRole('link', { name: 'Running' });\n    this.catalogButton = this.navigationBar.getByRole('link', { name: 'Catalog', exact: true });\n    this.servicesButton = this.navigationBar.getByRole('link', { name: 'Services' });\n    this.playgroundsButton = this.navigationBar.getByRole('link', { name: 'Playgrounds' });\n    this.llamaStackButton = this.navigationBar.getByRole('link', { name: 'Llama Stack' });\n    this.tuneButton = this.navigationBar.getByRole('link', { name: 'Tune with InstructLab' });\n    this.localServerButton = this.navigationBar.getByRole('link', { name: 'Local Server' });\n    this.tryInstructLabButton = this.navigationBar.getByRole('link', { name: 'Try InstructLab' });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.navigationBar).toBeVisible();\n  }\n\n  async openDashboard(): Promise<AILabDashboardPage> {\n    await playExpect(this.dashboardButton).toBeVisible();\n    await this.dashboardButton.click();\n    return new AILabDashboardPage(this.page, this.webview);\n  }\n\n  async openRecipesCatalog(): Promise<AILabRecipesCatalogPage> {\n    await playExpect(this.recipesCatalogButton).toBeVisible();\n    await this.recipesCatalogButton.click();\n    return new AILabRecipesCatalogPage(this.page, this.webview);\n  }\n\n  async openRunningApps(): Promise<AiRunningAppsPage> {\n    await playExpect(this.runningAppsButton).toBeVisible();\n    await this.runningAppsButton.click();\n    return new AiRunningAppsPage(this.page, this.webview);\n  }\n\n  async openServices(): Promise<AiModelServicePage> {\n    await playExpect(this.servicesButton).toBeVisible();\n    await this.servicesButton.click();\n    return new AiModelServicePage(this.page, this.webview);\n  }\n\n  async openCatalog(): Promise<AILabModelCatalogPage> {\n    await playExpect(this.catalogButton).toBeVisible();\n    await this.catalogButton.click();\n    return new AILabModelCatalogPage(this.page, this.webview);\n  }\n\n  async openPlaygrounds(): Promise<AILabPlaygroundsPage> {\n    await playExpect(this.playgroundsButton).toBeVisible();\n    await this.playgroundsButton.click();\n    return new AILabPlaygroundsPage(this.page, this.webview);\n  }\n\n  async openLlamaStack(): Promise<AiLlamaStackPage> {\n    await playExpect(this.llamaStackButton).toBeVisible();\n    await this.llamaStackButton.click();\n    return new AiLlamaStackPage(this.page, this.webview);\n  }\n\n  async openLocalServer(): Promise<AILabLocalServerPage> {\n    await playExpect(this.localServerButton).toBeVisible();\n    await this.localServerButton.click();\n    return new AILabLocalServerPage(this.page, this.webview);\n  }\n\n  async openTryInstructLab(): Promise<AILabTryInstructLabPage> {\n    await playExpect(this.tryInstructLabButton).toBeVisible();\n    await this.tryInstructLabButton.click();\n    return new AILabTryInstructLabPage(this.page, this.webview);\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-playground-details-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect as playExpect } from '@playwright/test';\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport { AILabPlaygroundsPage } from './ai-lab-playgrounds-page';\nimport { handleConfirmationDialog, podmanAILabExtension } from '@podman-desktop/tests-playwright';\n\nexport class AILabPlaygroundDetailsPage extends AILabBasePage {\n  readonly name: string;\n  readonly deletePlaygroundButton: Locator;\n  readonly conversationSectionLocator: Locator;\n  readonly settingsPanelLocator: Locator;\n  readonly parametersSectionLocator: Locator;\n  readonly temperatureSliderLocator: Locator;\n  readonly maxTokensSliderLocator: Locator;\n  readonly topPSliderLocator: Locator;\n  readonly systemPromptTextAreaLocator: Locator;\n  readonly clearSystemPromptButtonLocator: Locator;\n  readonly editSystemPromptButtonLocator: Locator;\n  readonly promptTextAreaLocator: Locator;\n  readonly sendPromptButton: Locator;\n\n  constructor(page: Page, webview: Page, playgroundName: string) {\n    super(page, webview, playgroundName);\n\n    this.name = playgroundName;\n    this.deletePlaygroundButton = this.webview.getByRole('button', { name: 'Delete conversation' });\n    this.conversationSectionLocator = this.webview.getByLabel('conversation', { exact: true });\n    this.settingsPanelLocator = this.webview.getByLabel('settings panel', { exact: true });\n    this.parametersSectionLocator = this.settingsPanelLocator.getByLabel('parameters', { exact: true });\n    this.temperatureSliderLocator = this.parametersSectionLocator.getByLabel('temperature slider', { exact: true });\n    this.maxTokensSliderLocator = this.parametersSectionLocator.getByLabel('max tokens slider', { exact: true });\n    this.topPSliderLocator = this.parametersSectionLocator.getByLabel('top-p slider', { exact: true });\n    this.systemPromptTextAreaLocator = this.conversationSectionLocator.getByLabel('system-prompt-textarea');\n    this.clearSystemPromptButtonLocator = this.conversationSectionLocator.getByTitle('Clear', { exact: true });\n    this.editSystemPromptButtonLocator = this.conversationSectionLocator.getByTitle('Edit system prompt', {\n      exact: true,\n    });\n    this.promptTextAreaLocator = this.webview.getByLabel('prompt', { exact: true });\n    this.sendPromptButton = this.webview.getByRole('button', { name: 'Send prompt' });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n\n  async defineSystemPrompt(systemPrompt: string): Promise<void> {\n    await playExpect(this.editSystemPromptButtonLocator).toBeVisible();\n    await this.editSystemPromptButtonLocator.click();\n    await playExpect(this.systemPromptTextAreaLocator).toBeVisible();\n    await this.systemPromptTextAreaLocator.fill(systemPrompt);\n    await this.editSystemPromptButtonLocator.click();\n    await playExpect(this.systemPromptTextAreaLocator).not.toBeVisible();\n  }\n\n  async deletePlayground(): Promise<AILabPlaygroundsPage> {\n    await playExpect(this.deletePlaygroundButton).toBeEnabled();\n    await this.deletePlaygroundButton.click();\n    await handleConfirmationDialog(this.page, podmanAILabExtension.extensionName, true, 'Confirm');\n    return new AILabPlaygroundsPage(this.page, this.webview);\n  }\n\n  async submitUserInput(prompt: string): Promise<void> {\n    await this.promptTextAreaLocator.fill(prompt);\n    await playExpect(this.promptTextAreaLocator).toHaveValue(prompt);\n    await playExpect(this.sendPromptButton).toBeEnabled({ timeout: 80_000 });\n    await this.sendPromptButton.click();\n  }\n\n  async getAssistantResponse(index: number): Promise<Locator> {\n    await playExpect(this.sendPromptButton).toBeVisible({ timeout: 100_000 });\n    const assistantResponse = this.conversationSectionLocator.getByLabel('Assistant message').nth(index);\n    return assistantResponse;\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-playgrounds-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Locator, Page } from '@playwright/test';\nimport { expect as playExpect } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport { handleConfirmationDialog, podmanAILabExtension } from '@podman-desktop/tests-playwright';\nimport { AILabPlaygroundDetailsPage } from './ai-lab-playground-details-page';\n\nexport class AILabPlaygroundsPage extends AILabBasePage {\n  readonly additionalActions: Locator;\n  readonly newPlaygroundButton: Locator;\n  readonly playgroundNameInput: Locator;\n  readonly createPlaygroundButton: Locator;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'Playground Environments');\n    this.additionalActions = this.webview.getByRole('group', { name: 'additionalActions' });\n    this.newPlaygroundButton = this.additionalActions.getByRole('button', { name: 'New Playground', exact: true });\n    this.playgroundNameInput = this.webview.getByRole('textbox', { name: 'playgroundName' });\n    this.createPlaygroundButton = this.webview.getByRole('button', { name: 'Create playground', exact: true });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n\n  async createNewPlayground(name: string, timeout = 180_000): Promise<this> {\n    await playExpect(this.newPlaygroundButton).toBeEnabled();\n    await this.newPlaygroundButton.click();\n    await playExpect(this.playgroundNameInput).toBeVisible();\n    await this.playgroundNameInput.fill(name);\n    await playExpect(this.playgroundNameInput).toHaveValue(name);\n    await playExpect(this.createPlaygroundButton).toBeEnabled();\n    await this.createPlaygroundButton.click();\n    await playExpect(this.createPlaygroundButton).not.toBeVisible({ timeout });\n    return this;\n  }\n\n  async deletePlayground(playgroundName: string): Promise<this> {\n    const playgroundRow = await this.getPlaygroundRowByName(playgroundName);\n    if (!playgroundRow) {\n      throw new Error(`Playground ${playgroundName} not found`);\n    }\n    const deleteButton = playgroundRow.getByRole('button', { name: 'Delete conversation', exact: true });\n    await playExpect(deleteButton).toBeEnabled();\n    await deleteButton.click();\n    await handleConfirmationDialog(this.page, podmanAILabExtension.extensionName, true, 'Confirm');\n    return this;\n  }\n\n  async doesPlaygroundExist(playgroundName: string): Promise<boolean> {\n    return (await this.getPlaygroundRowByName(playgroundName)) !== undefined;\n  }\n\n  async goToPlaygroundDetails(playgroundName: string): Promise<AILabPlaygroundDetailsPage> {\n    const playgroundRow = await this.getPlaygroundRowByName(playgroundName);\n    if (!playgroundRow) {\n      throw new Error(`Playground ${playgroundName} not found`);\n    }\n\n    const button = playgroundRow.getByRole('button', { name: playgroundName, exact: true });\n    await playExpect(button).toBeVisible();\n    await button.click();\n\n    return new AILabPlaygroundDetailsPage(this.page, this.webview, playgroundName);\n  }\n\n  private async getPlaygroundRowByName(playgroundName: string): Promise<Locator | undefined> {\n    const row = this.webview.getByRole('row', { name: playgroundName, exact: true });\n    if ((await row.count()) > 0) {\n      return row;\n    }\n    return undefined;\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-recipes-catalog-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect as playExpect } from '@playwright/test';\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport { AILabAppDetailsPage } from './ai-lab-app-details-page';\n\nexport class AILabRecipesCatalogPage extends AILabBasePage {\n  readonly recipesCatalogPage: Locator;\n  readonly recipesCatalogContent: Locator;\n  readonly recipesCatalogNaturalLanguageProcessing: Locator;\n  readonly recipesCatalogAudio: Locator;\n  readonly recipesCatalogComputerVision: Locator;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'Recipe Catalog');\n    this.recipesCatalogPage = this.webview.getByRole('region', { name: 'Recipe Catalog' });\n    this.recipesCatalogContent = this.recipesCatalogPage.getByRole('region', { name: 'content', exact: true }).first();\n    this.recipesCatalogNaturalLanguageProcessing = this.recipesCatalogContent.getByRole('region', {\n      name: 'Natural Language Processing',\n      exact: true,\n    });\n    this.recipesCatalogAudio = this.recipesCatalogContent.getByRole('region', { name: 'Audio', exact: true });\n    this.recipesCatalogComputerVision = this.recipesCatalogContent.getByRole('region', {\n      name: 'Computer Vision',\n      exact: true,\n    });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n    await playExpect(this.recipesCatalogPage).toBeVisible();\n  }\n\n  async openRecipesCatalogApp(appName: string): Promise<AILabAppDetailsPage> {\n    const moreDetailsButton = this.getAppDetailsLocator(appName);\n    await playExpect(moreDetailsButton).toBeEnabled();\n    await moreDetailsButton.click();\n    return new AILabAppDetailsPage(this.page, this.webview, appName);\n  }\n\n  private getAppDetailsLocator(appName: string): Locator {\n    return this.recipesCatalogContent\n      .getByRole('region', { name: appName, exact: true })\n      .getByRole('button', { name: 'More details' });\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-running-apps-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect as playExpect } from '@playwright/test';\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport { handleConfirmationDialog, podmanAILabExtension } from '@podman-desktop/tests-playwright';\n\nexport class AiRunningAppsPage extends AILabBasePage {\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'AI Apps');\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n\n  async getRowForApp(appName: string): Promise<Locator> {\n    const rows = await this.getAllTableRows();\n    for (const row of rows) {\n      const appNameCellCount = await row.getByRole('cell').nth(3).getByText(appName).count();\n      if (appNameCellCount) {\n        return row;\n      }\n    }\n    throw new Error(`No row found for app ${appName}`);\n  }\n\n  async getCurrentStatusForApp(appName: string): Promise<string> {\n    const row = await this.getRowForApp(appName);\n    return `${await row.getByRole('cell').nth(1).getByRole('status').getAttribute('title', { timeout: 60_000 })}`;\n  }\n\n  async restartApp(appName: string): Promise<void> {\n    const dropDownMenu = await this.openKebabMenuForApp(appName);\n    const restartButton = dropDownMenu.getByTitle('Restart AI App');\n    await playExpect(restartButton).toBeVisible();\n    await restartButton.click();\n\n    await handleConfirmationDialog(this.page, 'Podman AI Lab', true, 'Confirm');\n  }\n\n  async stopApp(appName: string): Promise<void> {\n    const row = await this.getRowForApp(appName);\n    const stopButton = row.getByLabel('Stop AI App');\n    await playExpect(stopButton).toBeEnabled();\n    await stopButton.click();\n  }\n\n  async openKebabMenuForApp(appName: string): Promise<Locator> {\n    const row = await this.getRowForApp(appName);\n    const kebabMenu = row.getByLabel('kebab menu');\n    await playExpect(kebabMenu).toBeEnabled();\n    await kebabMenu.click();\n    return this.webview.getByTitle('Drop Down Menu Items');\n  }\n\n  async deleteAIApp(appName: string): Promise<void> {\n    const dropDownMenu = await this.openKebabMenuForApp(appName);\n    const deleteButton = dropDownMenu.getByTitle('Delete AI App');\n    await playExpect(deleteButton).toBeVisible();\n    await deleteButton.click();\n\n    await handleConfirmationDialog(this.page, podmanAILabExtension.extensionName, true, 'Confirm');\n  }\n\n  async appExists(appName: string): Promise<boolean> {\n    try {\n      await this.getRowForApp(appName);\n      return true;\n    } catch (error) {\n      if (error instanceof Error && error.message.includes('No row found for app')) {\n        return false;\n      } else {\n        throw error;\n      }\n    }\n  }\n\n  async getAppPort(appName: string): Promise<string> {\n    const appRow = await this.getRowForApp(appName);\n    //Update this locator after issue https://github.com/containers/podman-desktop-extension-ai-lab/issues/3113 is resolved\n    const portCell = appRow.getByRole('cell').nth(3);\n    const rawPortText = await portCell.getByText(/PORT\\s\\d+/).textContent();\n    if (!rawPortText) {\n      throw new Error(`Failed to extract port for app: ${appName}.`);\n    }\n    const portNumber = rawPortText.replace(/[^\\d]/g, '');\n    return portNumber;\n  }\n\n  private async getAllTableRows(): Promise<Locator[]> {\n    return await this.webview.getByRole('row').all();\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-service-details-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect as playExpect } from '@playwright/test';\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport { AiModelServicePage } from './ai-lab-model-service-page';\nimport { handleConfirmationDialog, podmanAILabExtension } from '@podman-desktop/tests-playwright';\n\nexport class AILabServiceDetailsPage extends AILabBasePage {\n  readonly endpointURL: Locator;\n  readonly inferenceServerType: Locator;\n  readonly modelName: Locator;\n  readonly codeSnippet: Locator;\n  readonly deleteServiceButton: Locator;\n  readonly stopServiceButton: Locator;\n  readonly startServiceButton: Locator;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'Service details');\n    this.endpointURL = this.webview.getByLabel('Endpoint URL', { exact: true });\n    this.inferenceServerType = this.webview.getByLabel('Inference Type', { exact: true });\n    this.modelName = this.webview.getByLabel('Model name', { exact: true });\n    this.codeSnippet = this.webview.getByLabel('Code Snippet', { exact: true });\n    this.deleteServiceButton = this.webview.getByRole('button', { name: 'Delete service' });\n    this.stopServiceButton = this.webview.getByRole('button', { name: 'Stop service' });\n    this.startServiceButton = this.webview.getByRole('button', { name: 'Start service' });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n\n  async deleteService(): Promise<AiModelServicePage> {\n    await playExpect(this.deleteServiceButton).toBeEnabled();\n    await this.deleteServiceButton.click();\n    await handleConfirmationDialog(this.page, podmanAILabExtension.extensionName, true, 'Confirm');\n    return new AiModelServicePage(this.page, this.webview);\n  }\n\n  async stopService(): Promise<void> {\n    await playExpect(this.stopServiceButton).toBeEnabled();\n    await this.stopServiceButton.click();\n  }\n\n  async startService(): Promise<void> {\n    await playExpect(this.startServiceButton).toBeEnabled();\n    await this.startServiceButton.click();\n  }\n\n  async getInferenceServerPort(): Promise<string> {\n    const split = (await this.endpointURL.textContent())?.split(':');\n    const port = split ? split[split.length - 1].split('/')[0] : '';\n    return port;\n  }\n\n  async getServiceState(): Promise<string> {\n    const serviceState = await this.webview.getByRole('status').getAttribute('title');\n    return serviceState ?? 'UNKNOWN';\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-start-recipe-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect as playExpect } from '@playwright/test';\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\nimport {\n  StatusBar,\n  TasksPage,\n  handleConfirmationDialog,\n  podmanAILabExtension,\n  waitUntil,\n} from '@podman-desktop/tests-playwright';\nimport { AILabNavigationBar } from './ai-lab-navigation-bar';\n\nexport class AILabStartRecipePage extends AILabBasePage {\n  readonly recipeStatus: Locator;\n  readonly applicationDetailsPanel: Locator;\n  readonly startRecipeButton: Locator;\n  readonly openAIAppButton: Locator;\n  readonly deleteAIAppButton: Locator;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'Start recipe');\n    this.recipeStatus = this.webview.getByRole('status');\n    this.applicationDetailsPanel = this.webview.getByLabel('application details panel');\n    this.startRecipeButton = this.webview.getByRole('button', { name: /^Start .+ recipe$/i });\n    this.openAIAppButton = this.applicationDetailsPanel.getByRole('button', { name: 'Open AI App' });\n    this.deleteAIAppButton = this.applicationDetailsPanel.getByRole('button', { name: 'Delete AI App' });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n\n  async startRecipe(\n    appName: string,\n    modelDownloadTimeout: number = 500_000,\n    applicationStartTimeout: number = 900_000,\n  ): Promise<void> {\n    await playExpect(this.startRecipeButton).toBeEnabled();\n    await this.startRecipeButton.click();\n    const POLLING_INTERVAL = 10_000;\n    let latestStatus = '';\n    let latestStatusTimeout = 0;\n\n    try {\n      await handleConfirmationDialog(this.page, podmanAILabExtension.extensionName, true, 'Reset');\n    } catch (error) {\n      console.warn(`Warning: Could not reset the app, repository probably clean.\\n\\t${error}`);\n    }\n\n    await waitUntil(\n      async () => {\n        latestStatus = await this.getLatestStatus();\n        if (/[Ee]rror/.test(latestStatus)) {\n          throw new Error(`Error encountered while starting application: ${latestStatus}`);\n        }\n        const progress = await this.getModelDownloadProgress();\n        if (progress < 100) {\n          console.log(\n            `Current model download progress: ${progress}%. Waiting for it to reach 100%. (timeout: ${latestStatusTimeout}ms of ${modelDownloadTimeout}ms)`,\n          );\n          latestStatusTimeout += POLLING_INTERVAL;\n          await this.refreshStartRecipeUI(this.page, this.webview, appName);\n        }\n        return progress === 100;\n      },\n      {\n        timeout: modelDownloadTimeout,\n        diff: POLLING_INTERVAL,\n        message: 'WaitTimeout reached when waiting for mode download progress to be 100 percent',\n      },\n    );\n\n    await waitUntil(\n      async () => {\n        const currentStatus = await this.getLatestStatus();\n        if (currentStatus !== latestStatus) {\n          latestStatus = currentStatus;\n          latestStatusTimeout = 0;\n        } else {\n          latestStatusTimeout += POLLING_INTERVAL;\n        }\n        if (/[Ee]rror/.test(latestStatus)) {\n          throw new Error(`Error encountered while starting application: ${latestStatus}`);\n        }\n        if (!latestStatus.includes('AI App is running')) {\n          console.log(\n            `Latest status: ${latestStatus} (timeout: ${latestStatusTimeout}ms of ${applicationStartTimeout}ms)`,\n          );\n          await this.refreshStartRecipeUI(this.page, this.webview, appName);\n        }\n        return latestStatus.includes('AI App is running');\n      },\n      {\n        timeout: applicationStartTimeout,\n        diff: POLLING_INTERVAL,\n        message: 'WaitTimeout reached when waiting for text: AI App is running',\n      },\n    );\n  }\n\n  async getModelDownloadProgress(): Promise<number> {\n    const content = await this.getDownloadStatusContent();\n\n    if (!content) return 0;\n    if (content.includes('already present on disk')) {\n      console.log('Model already present on disk');\n      return 100;\n    }\n\n    // eslint-disable-next-line sonarjs/slow-regex\n    const regex = new RegExp(/(\\d+)%/);\n    const progressString = regex.exec(content);\n    const progress = progressString ? parseInt(progressString[1]) : 0;\n    console.log(`Model download progress: ${progress}%`);\n    return progress;\n  }\n\n  private async getStatusListLocator(): Promise<Locator[]> {\n    return await this.recipeStatus.locator('ul > li').all();\n  }\n\n  private async getDownloadStatusContent(): Promise<string> {\n    return await this.getStatusContent(3);\n  }\n\n  private async getLatestStatus(): Promise<string> {\n    return await this.getStatusContent();\n  }\n\n  private async getStatusContent(index: number = 0): Promise<string> {\n    const statusList = await this.getStatusListLocator();\n    let currentElement: Locator;\n\n    if (index) {\n      if (statusList.length < index) return '';\n      currentElement = statusList[index - 1];\n    } else {\n      if (statusList.length < 1) return '';\n      currentElement = statusList[statusList.length - 1];\n    }\n\n    const content = await currentElement.textContent();\n\n    if (!content) return '';\n\n    const viewErrorButton = currentElement.getByRole('button', { name: 'View error' });\n    const note = currentElement.getByRole('note');\n\n    if (await viewErrorButton.isVisible()) {\n      await viewErrorButton.click();\n      await this.page.waitForTimeout(500);\n    }\n\n    if ((await note.count()) > 0) {\n      const noteContent = await note.textContent();\n      throw new Error(`Error encountered while starting application: ${noteContent}`);\n    }\n    return content;\n  }\n\n  private async refreshStartRecipeUI(page: Page, webView: Page, appName: string): Promise<void> {\n    console.debug('UI might be stuck, refreshing...');\n    // do not leave webview, ie. do not switch to Dashboard\n    const aiNavBar = new AILabNavigationBar(page, webView);\n    await aiNavBar.openRunningApps();\n    console.debug('Finding Tasks in status Bar');\n    const statusBar = new StatusBar(page);\n    await statusBar.tasksButton.click();\n    console.debug('Opened Tasks in status Bar');\n    const tasksManager = new TasksPage(page);\n    console.debug('Finding particular task in task manager and switching to it');\n    await tasksManager.navigateToTask(`Pulling ${appName}`);\n    console.debug('Start recipe page should be back');\n    await playExpect(this.heading).toBeVisible();\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/ai-lab-try-instructlab-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *025\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport { expect as playExpect } from '@playwright/test';\nimport type { Locator, Page } from '@playwright/test';\nimport { AILabBasePage } from './ai-lab-base-page';\n\nexport class AILabTryInstructLabPage extends AILabBasePage {\n  readonly startInstructLabButton: Locator;\n  readonly openInstructLabButton: Locator;\n  readonly statusMessageBox: Locator;\n\n  constructor(page: Page, webview: Page) {\n    super(page, webview, 'Run InstructLab as a container');\n    this.startInstructLabButton = this.webview.getByRole('button', { name: 'Start InstructLab container' });\n    this.openInstructLabButton = this.webview.getByRole('button', { name: 'Open InstructLab container' });\n    this.statusMessageBox = this.webview.getByRole('status');\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/podman-extension-ai-lab-details-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Locator, Page } from '@playwright/test';\nimport { expect as playExpect, ExtensionDetailsPage } from '@podman-desktop/tests-playwright';\n\nexport class AILabExtensionDetailsPage extends ExtensionDetailsPage {\n  readonly errorTab: Locator;\n\n  constructor(page: Page) {\n    super(page, 'Podman AI Lab extension');\n    this.errorTab = this.tabs.getByRole('button', { name: 'Error' });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n\n  async checkIsActive(statusTest: string): Promise<void> {\n    await playExpect(this.status).toHaveText(statusTest);\n  }\n\n  async checkForErrors(): Promise<void> {\n    // we would like to propagate the error's stack trace into test failure message\n    let stackTrace = '';\n    if ((await this.errorTab.count()) > 0) {\n      await this.activateTab('Error');\n      stackTrace = await this.errorStackTrace.innerText();\n    }\n    await playExpect(this.errorTab, `Error Tab was present with stackTrace: ${stackTrace}`).not.toBeVisible();\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/model/preferences-extension-ai-lab-page.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Locator, Page } from '@playwright/test';\nimport { expect as playExpect, PreferencesPage } from '@podman-desktop/tests-playwright';\n\nexport class ExtensionAILabPreferencesPage extends PreferencesPage {\n  public static readonly tabName = 'Extension: AI Lab';\n  readonly heading: Locator;\n  readonly experimentalGPUCheckbox: Locator;\n\n  constructor(page: Page) {\n    super(page);\n    this.heading = this.content.getByText(ExtensionAILabPreferencesPage.tabName, { exact: true });\n    this.experimentalGPUCheckbox = this.content.getByRole('checkbox', {\n      name: 'Experimental GPU support for inference servers',\n    });\n  }\n\n  async waitForLoad(): Promise<void> {\n    await playExpect(this.heading).toBeVisible();\n  }\n\n  public async disableGPUPreference(): Promise<void> {\n    await this.experimentalGPUCheckbox.uncheck({ force: true });\n    await playExpect(this.experimentalGPUCheckbox).not.toBeChecked();\n  }\n\n  public async enableGPUPreference(): Promise<void> {\n    await this.experimentalGPUCheckbox.check({ force: true });\n    await playExpect(this.experimentalGPUCheckbox).toBeChecked();\n  }\n  public async isGPUPreferenceEnabled(): Promise<boolean> {\n    return await this.experimentalGPUCheckbox.isChecked();\n  }\n}\n"
  },
  {
    "path": "tests/playwright/src/utils/aiLabHandler.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Page } from '@playwright/test';\nimport type { Runner, NavigationBar, ExtensionCardPage } from '@podman-desktop/tests-playwright';\nimport { expect as playExpect, podmanAILabExtension } from '@podman-desktop/tests-playwright';\nimport type { AILabDashboardPage } from 'src/model/ai-lab-dashboard-page';\nimport { handleWebview } from './webviewHandler';\nimport { ExtensionAILabPreferencesPage } from 'src/model/preferences-extension-ai-lab-page';\nimport { AILabExtensionDetailsPage } from 'src/model/podman-extension-ai-lab-details-page';\n\nexport async function reopenAILabDashboard(\n  runner: Runner,\n  page: Page,\n  navigationBar: NavigationBar,\n): Promise<AILabDashboardPage> {\n  const dashboardPage = await navigationBar.openDashboard();\n  await playExpect(dashboardPage.mainPage).toBeVisible();\n  // eslint-disable-next-line @typescript-eslint/no-unused-vars, sonarjs/no-unused-vars\n  const [_locPage, _webview, aiLabNavigationBar] = await handleWebview(runner, page, navigationBar);\n  const aiLabDashboardPage = await aiLabNavigationBar.openDashboard();\n  await aiLabDashboardPage.waitForLoad();\n  return aiLabDashboardPage;\n}\n\nexport async function openAILabPreferences(\n  navigationBar: NavigationBar,\n  page: Page,\n): Promise<ExtensionAILabPreferencesPage> {\n  const dashboardPage = await navigationBar.openDashboard();\n  await playExpect(dashboardPage.mainPage).toBeVisible();\n  const settingsBar = await navigationBar.openSettings();\n  await playExpect(settingsBar.preferencesTab).toBeVisible();\n  await settingsBar.expandPreferencesTab();\n  await playExpect(settingsBar.preferencesTab).toBeVisible();\n  await settingsBar.getPreferencesLinkLocator(ExtensionAILabPreferencesPage.tabName).click();\n  const aiLabPreferencesPage = new ExtensionAILabPreferencesPage(page);\n  await aiLabPreferencesPage.waitForLoad();\n  return aiLabPreferencesPage;\n}\n\nexport async function openAILabExtensionDetails(navigationBar: NavigationBar): Promise<AILabExtensionDetailsPage> {\n  const extensionCard = await getExtensionCard(navigationBar);\n  const extensionDetails = await extensionCard.openExtensionDetails(podmanAILabExtension.extensionFullName);\n  const aiLabExtensionDetails = new AILabExtensionDetailsPage(extensionDetails.page);\n  await aiLabExtensionDetails.waitForLoad();\n  return aiLabExtensionDetails;\n}\n\nexport async function getExtensionCard(navigationBar: NavigationBar): Promise<ExtensionCardPage> {\n  const extensions = await navigationBar.openExtensions();\n  const extensionCard = await extensions.getInstalledExtension(\n    podmanAILabExtension.extensionLabel,\n    podmanAILabExtension.extensionFullLabel,\n  );\n  return extensionCard;\n}\n\nexport async function waitForExtensionToInitialize(navigationBar: NavigationBar): Promise<void> {\n  const extensions = await navigationBar.openExtensions();\n  await playExpect\n    .poll(async () => await extensions.extensionIsInstalled(podmanAILabExtension.extensionFullLabel), {\n      timeout: 30000,\n    })\n    .toBeTruthy();\n}\n\nexport async function getExtensionVersion(navigationBar: NavigationBar): Promise<string> {\n  const extensionsPage = await navigationBar.openExtensions();\n  const extensionVersion = await extensionsPage.getInstalledExtensionVersion(\n    podmanAILabExtension.extensionLabel,\n    podmanAILabExtension.extensionFullLabel,\n  );\n  playExpect(extensionVersion, `Extension version could not be retrieved.`).toBeDefined();\n  return String(extensionVersion);\n}\n"
  },
  {
    "path": "tests/playwright/src/utils/webviewHandler.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\nimport type { Page } from '@playwright/test';\nimport type { NavigationBar, Runner } from '@podman-desktop/tests-playwright';\nimport { expect as playExpect } from '@podman-desktop/tests-playwright';\nimport { AILabNavigationBar } from 'src/model/ai-lab-navigation-bar';\n\nexport async function handleWebview(\n  runner: Runner,\n  page: Page,\n  navigationBar: NavigationBar,\n): Promise<[Page, Page, AILabNavigationBar]> {\n  const AI_LAB_NAVBAR_EXTENSION_LABEL: string = 'AI Lab';\n  const AI_LAB_PAGE_BODY_LABEL: string = 'Webview AI Lab';\n\n  const aiLabPodmanExtensionButton = navigationBar.navigationLocator.getByRole('link', {\n    name: AI_LAB_NAVBAR_EXTENSION_LABEL,\n  });\n  await playExpect(aiLabPodmanExtensionButton).toBeEnabled();\n  await aiLabPodmanExtensionButton.click();\n  await page.waitForTimeout(2_000);\n\n  const webView = page.getByRole('document', { name: AI_LAB_PAGE_BODY_LABEL });\n  await playExpect(webView).toBeVisible();\n\n  await playExpect\n    .poll(\n      async () => {\n        const windows = runner.getWindows();\n        if (windows.length < 2 || !windows[1]) {\n          return false;\n        }\n        try {\n          await windows[1].evaluate(() => true);\n          return true;\n        } catch {\n          return false;\n        }\n      },\n      { timeout: 10_000, intervals: [500] },\n    )\n    .toBeTruthy();\n\n  const [mainPage, webViewPage] = runner.getWindows();\n\n  try {\n    await mainPage.evaluate(() => {\n      const element = document.querySelector('webview');\n      if (element) {\n        (element as HTMLElement).focus();\n      }\n    });\n  } catch (error) {\n    console.log(`Warning: Could not focus webview element: ${error}`);\n  }\n\n  const aiLabNavigationBar = new AILabNavigationBar(mainPage, webViewPage);\n  return [mainPage, webViewPage, aiLabNavigationBar];\n}\n"
  },
  {
    "path": "tests/playwright/tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"target\": \"esnext\",\n    \"module\": \"esnext\",\n    \"moduleResolution\": \"node\",\n    \"strict\": true,\n    \"preserveValueImports\": false,\n    \"skipLibCheck\": false,\n    \"baseUrl\": \".\",\n    \"resolveJsonModule\": true\n  },\n  \"include\": [\"src/**/*.ts\", \"playwright.config.ts\"],\n  \"exclude\": [\"node_modules/**\"]\n}\n"
  },
  {
    "path": "tests/tmt/plans/ai-lab-e2e-plan-default.fmf",
    "content": "# /**********************************************************************\n#  Copyright (C) 2025 Red Hat, Inc.\n#  \n#  Licensed under the Apache License, Version 2.0 (the \"License\");\n#  you may not use this file except in compliance with the License.\n#  You may obtain a copy of the License at\n#  \n#  http://www.apache.org/licenses/LICENSE-2.0\n#  \n#  Unless required by applicable law or agreed to in writing, software\n#  distributed under the License is distributed on an \"AS IS\" BASIS,\n#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#  See the License for the specific language governing permissions and\n#  limitations under the License.\n#  \n#  SPDX-License-Identifier: Apache-2.0\n#  ***********************************************************************/\n\nsummary: | \n  This plan provisions a test environment for running Playwright E2E tests for the Podman Desktop AI Lab extension.\n  It installs all required system dependencies, including GUI libraries, \n  sets up Node.js with pnpm, and starts Podman rootless service.\n\ndiscover:\n    how: fmf\n\nexecute:\n    how: tmt \n\nprovision: \n  hardware: \n    memory: \">= 16 GB\"\n    cpu: \n      cores: \">= 4\"\n    disk:\n      - size: \">= 20 GB\"\n\nenvironment:\n  EXT_TEST_GPU_SUPPORT_ENABLED: false\n\nprepare:\n  - name: Install required packages\n    how: shell\n    script: |\n      sudo dnf update -y\n      sudo dnf install -y \\\n      git curl xorg-x11-server-Xvfb \\\n        nss nspr atk at-spi2-atk cups libXcomposite libXdamage libXfixes libXrandr cairo pango alsa-lib \\\n        gcc-c++ gtk3 passt jq\n      # Install Node.js directly from official binary (if not already installed)\n      \n      if ! command -v node &> /dev/null || [[ \"$(node -v)\" != \"$NODE_VERSION\" ]]; then\n        curl -fsSL https://nodejs.org/dist/$NODE_VERSION/node-$NODE_VERSION-linux-x64.tar.xz -o node.tar.xz || exit 1\n        sudo tar -C /usr/local --strip-components=1 -xf node.tar.xz || exit 1\n        rm -f node.tar.xz\n      fi\n      \n      npm install -g pnpm\n      echo \"NodeJS version: $(node -v)\"\n      echo \"npm version: $(npm -v)\"\n      echo \"pnpm Version: $(pnpm --version)\"\n  \n  - name: Install podman  \n    how: shell\n    script: | \n      bash $TMT_TREE/tests/tmt/scripts/install-podman.sh\n\n  - name: Enable and start Podman rootless service\n    how: shell\n    script: |\n      systemctl --user enable podman.socket || exit 1\n      systemctl --user start podman.socket || exit 1\n      systemctl --user is-active podman.socket || exit 1\n\n/e2e: \n  summary: Execute Playwright E2E tests.\n  discover+: \n    filter: 'tag:e2e'\n\n/smoke: \n  summary: Execute smoke Playwright E2E tests.\n  discover+: \n    filter: 'tag:smoke'\n\n/instructlab: \n  summary: Execute instructlab Playwright E2E tests.\n  discover+:\n    filter: 'tag:instructlab'\n"
  },
  {
    "path": "tests/tmt/plans/ai-lab-e2e-plan-gpu.fmf",
    "content": "# /**********************************************************************\n#  Copyright (C) 2025 Red Hat, Inc.\n#  \n#  Licensed under the Apache License, Version 2.0 (the \"License\");\n#  you may not use this file except in compliance with the License.\n#  You may obtain a copy of the License at\n#  \n#  http://www.apache.org/licenses/LICENSE-2.0\n#  \n#  Unless required by applicable law or agreed to in writing, software\n#  distributed under the License is distributed on an \"AS IS\" BASIS,\n#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#  See the License for the specific language governing permissions and\n#  limitations under the License.\n#  \n#  SPDX-License-Identifier: Apache-2.0\n#  ***********************************************************************/\n\nsummary: | \n  This plan provisions a test environment for running Playwright E2E tests for the Podman Desktop AI Lab extension.\n  It installs all required system dependencies, including GUI libraries, \n  sets up Node.js with pnpm, and starts Podman rootless service.\n\ndiscover:\n    how: fmf\n\nexecute:\n    how: tmt \n\nprovision: \n  hardware: \n    gpu:\n      device-name: GV100 (Tesla V100)\n      vendor-name: NVIDIA\n    memory: \">= 16 GB\"\n    cpu: \n      cores: \">= 4\"\n    disk:\n      - size: \">= 20 GB\"\n\nenvironment: \n  EXT_TEST_GPU_SUPPORT_ENABLED: true\n\nprepare:\n  - name: Install required packages\n    how: shell\n    script: |\n      sudo dnf update -y\n      git curl xorg-x11-server-Xvfb \\\n        nss nspr atk at-spi2-atk cups libXcomposite libXdamage libXfixes libXrandr cairo pango alsa-lib \\\n        gcc-c++ gtk3 passt jq\n      # Install Node.js directly from official binary (if not already installed)\n      \n      if ! command -v node &> /dev/null || [[ \"$(node -v)\" != \"$NODE_VERSION\" ]]; then\n        curl -fsSL https://nodejs.org/dist/$NODE_VERSION/node-$NODE_VERSION-linux-x64.tar.xz -o node.tar.xz || exit 1\n        sudo tar -C /usr/local --strip-components=1 -xf node.tar.xz || exit 1\n        rm -f node.tar.xz\n      fi\n      \n      npm install -g pnpm\n      echo \"NodeJS version: $(node -v)\"\n      echo \"npm version: $(npm -v)\"\n      echo \"pnpm Version: $(pnpm --version)\"\n  \n  - name: Install podman  \n    how: shell\n    script: | \n      bash $TMT_TREE/tests/tmt/scripts/install-podman.sh\n\n  - name: Enable and start Podman rootless service\n    how: shell\n    script: |\n      systemctl --user enable podman.socket || exit 1\n      systemctl --user start podman.socket || exit 1\n      systemctl --user is-active podman.socket || exit 1\n\n/e2e: \n  summary: Execute Playwright E2E tests.\n  discover+: \n    filter: 'tag:e2e'\n\n/smoke: \n  summary: Execute smoke Playwright E2E tests.\n  discover+: \n    filter: 'tag:smoke'\n\n/instructlab: \n  summary: Execute instructlab Playwright E2E tests.\n  discover+:\n    filter: 'tag:instructlab'\n"
  },
  {
    "path": "tests/tmt/scripts/create-results.sh",
    "content": "# /**********************************************************************\n#  Copyright (C) 2025 Red Hat, Inc.\n#  \n#  Licensed under the Apache License, Version 2.0 (the \"License\");\n#  you may not use this file except in compliance with the License.\n#  You may obtain a copy of the License at\n#  \n#  http://www.apache.org/licenses/LICENSE-2.0\n#  \n#  Unless required by applicable law or agreed to in writing, software\n#  distributed under the License is distributed on an \"AS IS\" BASIS,\n#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#  See the License for the specific language governing permissions and\n#  limitations under the License.\n#  \n#  SPDX-License-Identifier: Apache-2.0\n#  ***********************************************************************/\n\n#!/bin/bash\nset -euo pipefail\n\nPLAYWRIGHT_JUNIT_DIR=\"$TMT_TREE/tests/playwright/output/\"\nPLAYWRIGHT_TRACE_VIDEOS_DIR=\"$TMT_TREE/tests/playwright/tests/playwright/output/\"\n\ncd \"$TMT_TEST_DATA\"\n\nif [ -f \"$PLAYWRIGHT_JUNIT_DIR/junit-results.xml\" ]; then\n  cp \"$PLAYWRIGHT_JUNIT_DIR/junit-results.xml\" .\nelse\n  echo \"Error: junit-results.xml not found\"\n  exit 1\nfi\n\nif [ \"$1\" -eq 0 ]; then \n  cat <<EOF > ./results.yaml\n- name: /tests/$2\n  result: pass\n  note: \n    - \"Playwright end-to-end tests completed successfully.\"\n  log:\n    - ../output.txt\n    - junit-results.xml\nEOF\n\nelif [ \"$1\" -eq 255 ]; then\n\n  if [ -d \"$PLAYWRIGHT_TRACE_VIDEOS_DIR/traces\" ]; then\n    cp -r \"$PLAYWRIGHT_TRACE_VIDEOS_DIR/traces\" .\n  else\n    echo \"Warning: traces directory does not exist\" >&2\n  fi\n\n  if [ -d \"$PLAYWRIGHT_TRACE_VIDEOS_DIR/videos\" ]; then \n    cp -r \"$PLAYWRIGHT_TRACE_VIDEOS_DIR/videos\" .\n  else\n    echo \"Warning: videos directory does not exist\" >&2\n  fi\n\n  cat <<EOF > ./results.yaml\n- name: /tests/$2\n  result: fail\n  note: \n    - \"Playwright tests failed.\"\n  log:\n    - ../output.txt\n    - junit-results.xml\n    - videos\n    - traces\nEOF\n\nelse\n  echo \"Warning: Unexpected exit code: $1, treating as failure\" >&2\n  cat <<EOF > ./results.yaml\n- name: /tests/$2\n  result: fail\n  note: \n    - \"Tests failed with unexpected exit code: $1\"\n  log:\n    - ../output.txt\nEOF\nfi\nexit 0\n"
  },
  {
    "path": "tests/tmt/scripts/install-podman.sh",
    "content": "# /**********************************************************************\n#  Copyright (C) 2025 Red Hat, Inc.\n#  \n#  Licensed under the Apache License, Version 2.0 (the \"License\");\n#  you may not use this file except in compliance with the License.\n#  You may obtain a copy of the License at\n#  \n#  http://www.apache.org/licenses/LICENSE-2.0\n#  \n#  Unless required by applicable law or agreed to in writing, software\n#  distributed under the License is distributed on an \"AS IS\" BASIS,\n#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#  See the License for the specific language governing permissions and\n#  limitations under the License.\n#  \n#  SPDX-License-Identifier: Apache-2.0\n#  ***********************************************************************/\n\n#!/bin/bash\nset -euo pipefail\n\n# Uninstall a preinstalled Podman version to ensure the desired version will be installed.\nsudo dnf remove -y podman\n\n# Construct the download URL for the specific Podman version.\nCOMPOSE_VERSION=\"fc$(echo \"$COMPOSE\" | cut -d'-' -f2)\"\n\n# Install Podman based on the requested version:\n# \"nightly\": latest nightly build from rhcontainerbot/podman-next COPR repository\nif [[ \"$PODMAN_VERSION\" == \"nightly\" ]]; then\n    sudo dnf copr enable -y rhcontainerbot/podman-next\n    sudo dnf install -y podman --disablerepo=testing-farm-tag-repository \n    PODMAN_VERSION=\"$(dnf --quiet \\\n        --repofrompath=podman-next,https://download.copr.fedorainfracloud.org/results/rhcontainerbot/podman-next/fedora-$(rpm -E %fedora)/${ARCH}/ \\\n        list --showduplicates podman 2>/dev/null | grep dev | tail -n1 | cut -d':' -f2 | cut -d'-' -f1 )\"\nelse\n    # For \"latest\" or specific version, fetch version if needed and install from RPM\n    if [[ \"$PODMAN_VERSION\" == \"latest\" ]]; then\n        PODMAN_VERSION=\"$(curl -s https://api.github.com/repos/containers/podman/releases/latest | jq -r .tag_name | sed 's/^v//')\"\n    fi\n    CUSTOM_PODMAN_URL=\"https://kojipkgs.fedoraproject.org//packages/podman/${PODMAN_VERSION}/1.${COMPOSE_VERSION}/${ARCH}/podman-${PODMAN_VERSION}-1.${COMPOSE_VERSION}.${ARCH}.rpm\"\n    curl -Lo podman.rpm \"$CUSTOM_PODMAN_URL\"\n    if [[ $? -ne 0 ]]; then\n        echo \"Error: Failed to download Podman RPM from $CUSTOM_PODMAN_URL\"\n        exit 1\n    fi\n    if [[ ! -s podman.rpm ]]; then\n        echo \"Error: Downloaded Podman RPM file is missing or empty.\"\n        rm -f podman.rpm\n        exit 1\n    fi\n    sudo dnf install -y ./podman.rpm\n    rm -f podman.rpm\nfi\n\n# Verify that the installed Podman version matches the expected version. \nINSTALLED_PODMAN_VERSION=\"$(podman --version | cut -d' ' -f3)\"\nNORMALIZED_PODMAN_VERSION=\"${PODMAN_VERSION//\\~/-}\"\n\nif [[ \"$INSTALLED_PODMAN_VERSION\" != \"$NORMALIZED_PODMAN_VERSION\" ]]; then\n    echo \"Podman version mismatch: expected $NORMALIZED_PODMAN_VERSION but got $INSTALLED_PODMAN_VERSION\"\n    exit 1\nfi\n\necho \"Podman installed successfully: $INSTALLED_PODMAN_VERSION\"\n"
  },
  {
    "path": "tests/tmt/tests/e2e-test.fmf",
    "content": "# /**********************************************************************\n#  Copyright (C) 2025 Red Hat, Inc.\n#  \n#  Licensed under the Apache License, Version 2.0 (the \"License\");\n#  you may not use this file except in compliance with the License.\n#  You may obtain a copy of the License at\n#  \n#  http://www.apache.org/licenses/LICENSE-2.0\n#  \n#  Unless required by applicable law or agreed to in writing, software\n#  distributed under the License is distributed on an \"AS IS\" BASIS,\n#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#  See the License for the specific language governing permissions and\n#  limitations under the License.\n#  \n#  SPDX-License-Identifier: Apache-2.0\n#  ***********************************************************************/\n\nsummary: Execute Playwright E2E tests.\ntag:\n  - e2e\nduration: 1h\nresult: custom\nframework: shell\ntest: |\n  set +e\n\n  git clone https://github.com/podman-desktop/podman-desktop \"$TMT_TREE/podman-desktop\"\n  export PODMAN_DESKTOP_ARGS=\"$TMT_TREE/podman-desktop\"\n  (\n    cd \"$PODMAN_DESKTOP_ARGS\"\n    pnpm install\n    pnpm build\n  )\n\n  (\n    cd \"$TMT_TREE/tests/playwright\"\n    pnpm add -D @podman-desktop/tests-playwright@next\n  )\n\n  (\n    cd \"$TMT_TREE\"\n    pnpm install\n    pnpm build\n    pnpm test:e2e\n  )\n  \n  EXIT_CODE=$?\n  bash \"$TMT_TREE/tests/tmt/scripts/create-results.sh\" \"$EXIT_CODE\" \"e2e\"\n"
  },
  {
    "path": "tests/tmt/tests/instructlab-test.fmf",
    "content": "# /**********************************************************************\n#  Copyright (C) 2025 Red Hat, Inc.\n#  \n#  Licensed under the Apache License, Version 2.0 (the \"License\");\n#  you may not use this file except in compliance with the License.\n#  You may obtain a copy of the License at\n#  \n#  http://www.apache.org/licenses/LICENSE-2.0\n#  \n#  Unless required by applicable law or agreed to in writing, software\n#  distributed under the License is distributed on an \"AS IS\" BASIS,\n#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#  See the License for the specific language governing permissions and\n#  limitations under the License.\n#  \n#  SPDX-License-Identifier: Apache-2.0\n#  ***********************************************************************/\n\nsummary: Execute instructlab Playwright E2E tests.\ntag:\n  - instructlab\nduration: 1h\nresult: custom\nframework: shell\ntest: |\n  set +e\n\n  git clone https://github.com/podman-desktop/podman-desktop \"$TMT_TREE/podman-desktop\"\n  export PODMAN_DESKTOP_ARGS=\"$TMT_TREE/podman-desktop\"\n  (\n    cd \"$PODMAN_DESKTOP_ARGS\"\n    pnpm install\n    pnpm build\n  )\n\n  (\n    cd \"$TMT_TREE/tests/playwright\"\n    pnpm add -D @podman-desktop/tests-playwright@next\n  )\n\n  (\n    cd \"$TMT_TREE\"\n    pnpm install\n    pnpm build\n    pnpm test:e2e:instructlab\n  )\n  \n  EXIT_CODE=$?\n  bash \"$TMT_TREE/tests/tmt/scripts/create-results.sh\" \"$EXIT_CODE\" \"instructlab\"\n"
  },
  {
    "path": "tests/tmt/tests/smoke-test.fmf",
    "content": "# /**********************************************************************\n#  Copyright (C) 2025 Red Hat, Inc.\n#  \n#  Licensed under the Apache License, Version 2.0 (the \"License\");\n#  you may not use this file except in compliance with the License.\n#  You may obtain a copy of the License at\n#  \n#  http://www.apache.org/licenses/LICENSE-2.0\n#  \n#  Unless required by applicable law or agreed to in writing, software\n#  distributed under the License is distributed on an \"AS IS\" BASIS,\n#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#  See the License for the specific language governing permissions and\n#  limitations under the License.\n#  \n#  SPDX-License-Identifier: Apache-2.0\n#  ***********************************************************************/\n\nsummary: Execute smoke Playwright E2E tests.\ntag:\n  - smoke\nduration: 1h\nresult: custom\nframework: shell\ntest: |\n  set +e\n\n  git clone https://github.com/podman-desktop/podman-desktop \"$TMT_TREE/podman-desktop\"\n  export PODMAN_DESKTOP_ARGS=\"$TMT_TREE/podman-desktop\"\n  (\n    cd \"$PODMAN_DESKTOP_ARGS\"\n    pnpm install\n    pnpm build\n  )\n\n  (\n    cd \"$TMT_TREE/tests/playwright\"\n    pnpm add -D @podman-desktop/tests-playwright@next\n  )\n\n  (\n    cd \"$TMT_TREE\"\n    pnpm install\n    pnpm build\n    pnpm test:e2e:smoke\n  )\n  \n  EXIT_CODE=$?\n  bash \"$TMT_TREE/tests/tmt/scripts/create-results.sh\" \"$EXIT_CODE\" \"smoke\"\n"
  },
  {
    "path": "tools/compute-model-sizes.sh",
    "content": "cat >Containerfile <<EOF\nFROM registry.access.redhat.com/ubi9-minimal:9.3\nRUN microdnf install -y git make g++\nRUN git clone https://github.com/ggerganov/llama.cpp\nRUN cd llama.cpp && make simple\nRUN pwd\nEOF\npodman build . -q -t get-model-size\ncat packages/backend/src/assets/ai.json | jq -r .models[].url | while read i; do echo $i; curl -s -L -o model $i; podman run --rm -t --security-opt 'label=disable' -v `pwd`/model:/model get-model-size llama.cpp/simple /model a | grep 'model size'; rm model; done\n"
  },
  {
    "path": "types/additional.d.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2023-2025 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\ndeclare module 'tinro/dist/tinro_lib';\ndeclare module '*.png' {\n  const contents: string;\n  export = contents;\n}\n"
  },
  {
    "path": "types/mustache.d.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\n\ndeclare module '*.mustache?raw' {\n  const contents: string;\n  export = contents;\n}\n"
  },
  {
    "path": "types/podman-desktop-api.d.ts",
    "content": "// eslint-disable-next-line etc/no-commented-out-code\n// podman-desktop-api.d.ts\n/* eslint-disable @typescript-eslint/no-explicit-any */\n\ndeclare global {\n  export interface PodmanDesktopApi {\n    getState: () => any;\n    postMessage: (msg: any) => void;\n    setState: (newState: any) => void;\n  }\n\n  function acquirePodmanDesktopApi(): PodmanDesktopApi;\n}\n\nexport { PodmanDesktopApi };\n"
  },
  {
    "path": "types/postman-code-generators.d.ts",
    "content": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n ***********************************************************************/\ndeclare module 'postman-code-generators' {\n  import type { Request } from 'postman-collection';\n\n  export function getLanguageList(): Language[];\n\n  export interface Language {\n    key: string;\n    label: string;\n    syntax_mode: string;\n    variants: LanguageVariant[],\n  }\n\n  export interface LanguageVariant {\n    key: string;\n  }\n\n  export function getOptions(language: string, variant: string, callback: (error: unknown, options: Option[]) => void): void;\n\n  export interface Option {\n    name: string;\n    id: string;\n    type: string;\n    default: string | number | boolean;\n    description: string;\n  }\n\n  export function convert(language: string, variant: string, request: Request, options: Record<string, string | number | boolean>, callback: (error: unknown, snippet: string | undefined) => void): void;\n}\n"
  }
]