Showing preview only (2,376K chars total). Download the full file or copy to clipboard to get everything.
Repository: containers/podman-desktop-extension-ai-lab
Branch: main
Commit: 28796a6eff7c
Files: 457
Total size: 2.2 MB
Directory structure:
gitextract_r3viv2ck/
├── .dockerignore
├── .editorconfig
├── .fmf/
│ └── version
├── .gitattributes
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug_report.yml
│ │ ├── config.yml
│ │ ├── epic.yml
│ │ ├── feature_request.yml
│ │ └── ux-request.yaml
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── dependabot.yml
│ └── workflows/
│ ├── ai-lab-e2e-nightly-windows.yaml
│ ├── build-next.yaml
│ ├── compute-model-sizes.yml
│ ├── e2e-main-tf.yaml
│ ├── e2e-main.yaml
│ ├── llama-stack-playground.yaml
│ ├── pr-check.yaml
│ ├── ramalama.yaml
│ ├── recipe-catalog-change-cleanup.yaml
│ ├── recipe-catalog-change-template.yaml
│ ├── recipe-catalog-change-trigger.yaml
│ ├── release.yaml
│ ├── update-ramalama-references.sh
│ └── update-ramalama-references.yaml
├── .gitignore
├── .husky/
│ ├── commit-msg
│ └── pre-commit
├── .npmrc
├── .prettierrc
├── .vscode/
│ └── settings.json
├── CODE-OF-CONDUCT.md
├── Containerfile
├── LICENSE
├── MIGRATION.md
├── PACKAGING-GUIDE.md
├── README.md
├── RELEASE.md
├── SECURITY.md
├── USAGE_DATA.md
├── api/
│ └── openapi.yaml
├── clean.sh
├── commitlint.config.js
├── docs/
│ └── proposals/
│ ├── ai-studio.md
│ └── state-management.md
├── eslint.config.mjs
├── package.json
├── packages/
│ ├── backend/
│ │ ├── .gitignore
│ │ ├── __mocks__/
│ │ │ └── @podman-desktop/
│ │ │ └── api.js
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── assets/
│ │ │ │ ├── ai.json
│ │ │ │ ├── inference-images.json
│ │ │ │ ├── instructlab-images.json
│ │ │ │ ├── llama-stack-images.json
│ │ │ │ ├── llama-stack-playground-images.json
│ │ │ │ └── openai.json
│ │ │ ├── extension.spec.ts
│ │ │ ├── extension.ts
│ │ │ ├── instructlab-api-impl.ts
│ │ │ ├── llama-stack-api-impl.ts
│ │ │ ├── managers/
│ │ │ │ ├── GPUManager.spec.ts
│ │ │ │ ├── GPUManager.ts
│ │ │ │ ├── SnippetManager.spec.ts
│ │ │ │ ├── SnippetManager.ts
│ │ │ │ ├── TaskRunner.spec.ts
│ │ │ │ ├── TaskRunner.ts
│ │ │ │ ├── apiServer.spec.ts
│ │ │ │ ├── apiServer.ts
│ │ │ │ ├── application/
│ │ │ │ │ ├── applicationManager.spec.ts
│ │ │ │ │ └── applicationManager.ts
│ │ │ │ ├── catalogManager.spec.ts
│ │ │ │ ├── catalogManager.ts
│ │ │ │ ├── gitManager.spec.ts
│ │ │ │ ├── gitManager.ts
│ │ │ │ ├── inference/
│ │ │ │ │ ├── inferenceManager.spec.ts
│ │ │ │ │ └── inferenceManager.ts
│ │ │ │ ├── instructlab/
│ │ │ │ │ ├── instructlabManager.spec.ts
│ │ │ │ │ └── instructlabManager.ts
│ │ │ │ ├── llama-stack/
│ │ │ │ │ ├── llamaStackManager.spec.ts
│ │ │ │ │ └── llamaStackManager.ts
│ │ │ │ ├── modelsManager.spec.ts
│ │ │ │ ├── modelsManager.ts
│ │ │ │ ├── monitoringManager.spec.ts
│ │ │ │ ├── monitoringManager.ts
│ │ │ │ ├── playground/
│ │ │ │ │ ├── McpServerManager.spec.ts
│ │ │ │ │ ├── McpServerManager.ts
│ │ │ │ │ ├── aiSdk.spec.ts
│ │ │ │ │ └── aiSdk.ts
│ │ │ │ ├── playgroundV2Manager.spec.ts
│ │ │ │ ├── playgroundV2Manager.ts
│ │ │ │ ├── podmanConnection.spec.ts
│ │ │ │ ├── podmanConnection.ts
│ │ │ │ ├── recipes/
│ │ │ │ │ ├── BuilderManager.spec.ts
│ │ │ │ │ ├── BuilderManager.ts
│ │ │ │ │ ├── PodManager.spec.ts
│ │ │ │ │ ├── PodManager.ts
│ │ │ │ │ ├── RecipeManager.spec.ts
│ │ │ │ │ └── RecipeManager.ts
│ │ │ │ └── snippets/
│ │ │ │ ├── java-okhttp-snippet.spec.ts
│ │ │ │ ├── java-okhttp-snippet.ts
│ │ │ │ ├── python-langchain-snippet.spec.ts
│ │ │ │ ├── python-langchain-snippet.ts
│ │ │ │ ├── quarkus-snippet.spec.ts
│ │ │ │ └── quarkus-snippet.ts
│ │ │ ├── models/
│ │ │ │ ├── AIConfig.spec.ts
│ │ │ │ ├── AIConfig.ts
│ │ │ │ ├── ApplicationOptions.ts
│ │ │ │ ├── HuggingFaceModelHandler.spec.ts
│ │ │ │ ├── HuggingFaceModelHandler.ts
│ │ │ │ ├── ModelHandler.ts
│ │ │ │ ├── TaskRunner.ts
│ │ │ │ ├── URLModelHandler.ts
│ │ │ │ └── baseEvent.ts
│ │ │ ├── registries/
│ │ │ │ ├── ApplicationRegistry.ts
│ │ │ │ ├── CancellationTokenRegistry.spec.ts
│ │ │ │ ├── CancellationTokenRegistry.ts
│ │ │ │ ├── ConfigurationRegistry.spec.ts
│ │ │ │ ├── ConfigurationRegistry.ts
│ │ │ │ ├── ContainerRegistry.spec.ts
│ │ │ │ ├── ContainerRegistry.ts
│ │ │ │ ├── ConversationRegistry.ts
│ │ │ │ ├── InferenceProviderRegistry.ts
│ │ │ │ ├── LocalRepositoryRegistry.spec.ts
│ │ │ │ ├── LocalRepositoryRegistry.ts
│ │ │ │ ├── ModelHandlerRegistry.ts
│ │ │ │ ├── NavigationRegistry.spec.ts
│ │ │ │ ├── NavigationRegistry.ts
│ │ │ │ ├── TaskRegistry.spec.ts
│ │ │ │ └── TaskRegistry.ts
│ │ │ ├── studio-api-impl.spec.ts
│ │ │ ├── studio-api-impl.ts
│ │ │ ├── studio.spec.ts
│ │ │ ├── studio.ts
│ │ │ ├── templates/
│ │ │ │ ├── java-okhttp.mustache
│ │ │ │ ├── python-langchain.mustache
│ │ │ │ └── quarkus-langchain4j.mustache
│ │ │ ├── tests/
│ │ │ │ ├── ai-test.json
│ │ │ │ ├── ai-user-test.json
│ │ │ │ └── utils.ts
│ │ │ ├── utils/
│ │ │ │ ├── JsonWatcher.spec.ts
│ │ │ │ ├── JsonWatcher.ts
│ │ │ │ ├── Publisher.spec.ts
│ │ │ │ ├── Publisher.ts
│ │ │ │ ├── RecipeConstants.ts
│ │ │ │ ├── arch.ts
│ │ │ │ ├── catalogUtils.spec.ts
│ │ │ │ ├── catalogUtils.ts
│ │ │ │ ├── downloader.ts
│ │ │ │ ├── imagesUtils.spec.ts
│ │ │ │ ├── imagesUtils.ts
│ │ │ │ ├── inferenceUtils.spec.ts
│ │ │ │ ├── inferenceUtils.ts
│ │ │ │ ├── mcpUtils.ts
│ │ │ │ ├── modelsUtils.spec.ts
│ │ │ │ ├── modelsUtils.ts
│ │ │ │ ├── pathUtils.ts
│ │ │ │ ├── podman.spec.ts
│ │ │ │ ├── podman.ts
│ │ │ │ ├── podsUtils.ts
│ │ │ │ ├── ports.ts
│ │ │ │ ├── randomUtils.ts
│ │ │ │ ├── sha.spec.ts
│ │ │ │ ├── sha.ts
│ │ │ │ ├── uploader.spec.ts
│ │ │ │ ├── uploader.ts
│ │ │ │ ├── urldownloader.spec.ts
│ │ │ │ ├── urldownloader.ts
│ │ │ │ └── utils.ts
│ │ │ ├── webviewUtils.spec.ts
│ │ │ ├── webviewUtils.ts
│ │ │ └── workers/
│ │ │ ├── IWorker.ts
│ │ │ ├── WindowsWorker.ts
│ │ │ ├── provider/
│ │ │ │ ├── InferenceProvider.spec.ts
│ │ │ │ ├── InferenceProvider.ts
│ │ │ │ ├── LlamaCppPython.spec.ts
│ │ │ │ ├── LlamaCppPython.ts
│ │ │ │ ├── OpenVINO.spec.ts
│ │ │ │ ├── OpenVINO.ts
│ │ │ │ ├── WhisperCpp.spec.ts
│ │ │ │ └── WhisperCpp.ts
│ │ │ └── uploader/
│ │ │ ├── UploaderOptions.ts
│ │ │ ├── WSLUploader.spec.ts
│ │ │ └── WSLUploader.ts
│ │ ├── tsconfig.json
│ │ ├── vite.config.js
│ │ └── vitest.config.js
│ ├── frontend/
│ │ ├── index.html
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── App.spec.ts
│ │ │ ├── App.svelte
│ │ │ ├── Route.svelte
│ │ │ ├── app.css
│ │ │ ├── index.html
│ │ │ ├── lib/
│ │ │ │ ├── ApplicationActions.spec.ts
│ │ │ │ ├── ApplicationActions.svelte
│ │ │ │ ├── Badge.spec.ts
│ │ │ │ ├── Badge.svelte
│ │ │ │ ├── Card.svelte
│ │ │ │ ├── ContentDetailsLayout.spec.ts
│ │ │ │ ├── ContentDetailsLayout.svelte
│ │ │ │ ├── ContentDetailsLayoutTest.svelte
│ │ │ │ ├── ExpandableMessage.svelte
│ │ │ │ ├── FlatMenu.svelte
│ │ │ │ ├── Navigation.spec.ts
│ │ │ │ ├── Navigation.svelte
│ │ │ │ ├── RangeInput.svelte
│ │ │ │ ├── RecipeCard.spec.ts
│ │ │ │ ├── RecipeCard.svelte
│ │ │ │ ├── RecipeCardTags.spec.ts
│ │ │ │ ├── RecipeCardTags.svelte
│ │ │ │ ├── RecipeCardTags.ts
│ │ │ │ ├── RecipeDetails.spec.ts
│ │ │ │ ├── RecipeDetails.svelte
│ │ │ │ ├── RecipeStatus.spec.ts
│ │ │ │ ├── RecipeStatus.svelte
│ │ │ │ ├── RecipesCard.spec.ts
│ │ │ │ ├── RecipesCard.svelte
│ │ │ │ ├── button/
│ │ │ │ │ ├── CopyButton.spec.ts
│ │ │ │ │ ├── CopyButton.svelte
│ │ │ │ │ └── ListItemButtonIcon.svelte
│ │ │ │ ├── conversation/
│ │ │ │ │ ├── ChatMessage.svelte
│ │ │ │ │ ├── ConversationActions.svelte
│ │ │ │ │ ├── ElapsedTime.svelte
│ │ │ │ │ ├── SystemPromptBanner.spec.ts
│ │ │ │ │ ├── SystemPromptBanner.svelte
│ │ │ │ │ ├── ToolCallMessage.spec.ts
│ │ │ │ │ └── ToolCallMessage.svelte
│ │ │ │ ├── icons/
│ │ │ │ │ ├── InstructLabIcon.svelte
│ │ │ │ │ ├── ModelStatusIcon.spec.ts
│ │ │ │ │ ├── ModelStatusIcon.svelte
│ │ │ │ │ ├── ModelWhite.svelte
│ │ │ │ │ ├── PlaygroundWhite.svelte
│ │ │ │ │ └── RemoteModel.svelte
│ │ │ │ ├── images/
│ │ │ │ │ ├── DashboardBanner.svelte
│ │ │ │ │ ├── PodIcon.svelte
│ │ │ │ │ └── VSCodeIcon.svelte
│ │ │ │ ├── instructlab/
│ │ │ │ │ ├── AboutInstructLabDiscoverCard.svelte
│ │ │ │ │ └── AboutInstructLabExploreCard.svelte
│ │ │ │ ├── markdown/
│ │ │ │ │ ├── LinkComponent.svelte
│ │ │ │ │ └── MarkdownRenderer.svelte
│ │ │ │ ├── monaco-editor/
│ │ │ │ │ ├── MonacoEditor.svelte
│ │ │ │ │ └── monaco.ts
│ │ │ │ ├── notification/
│ │ │ │ │ ├── ContainerConnectionStatusInfo.spec.ts
│ │ │ │ │ ├── ContainerConnectionStatusInfo.svelte
│ │ │ │ │ ├── ContainerConnectionWrapper.spec.ts
│ │ │ │ │ ├── ContainerConnectionWrapper.svelte
│ │ │ │ │ ├── GPUEnabledMachine.spec.ts
│ │ │ │ │ ├── GPUEnabledMachine.svelte
│ │ │ │ │ ├── GPUPromotion.spec.ts
│ │ │ │ │ └── GPUPromotion.svelte
│ │ │ │ ├── progress/
│ │ │ │ │ ├── TaskItem.spec.ts
│ │ │ │ │ ├── TaskItem.svelte
│ │ │ │ │ ├── TasksBanner.spec.ts
│ │ │ │ │ ├── TasksBanner.svelte
│ │ │ │ │ ├── TasksProgress.spec.ts
│ │ │ │ │ ├── TasksProgress.svelte
│ │ │ │ │ ├── TrackedTasks.spec.ts
│ │ │ │ │ └── TrackedTasks.svelte
│ │ │ │ ├── select/
│ │ │ │ │ ├── ContainerProviderConnectionSelect.spec.ts
│ │ │ │ │ ├── ContainerProviderConnectionSelect.svelte
│ │ │ │ │ ├── InferenceRuntimeSelect.spec.ts
│ │ │ │ │ ├── InferenceRuntimeSelect.svelte
│ │ │ │ │ ├── ModelSelect.spec.ts
│ │ │ │ │ ├── ModelSelect.svelte
│ │ │ │ │ ├── Select.spec.ts
│ │ │ │ │ └── Select.svelte
│ │ │ │ └── table/
│ │ │ │ ├── application/
│ │ │ │ │ ├── ApplicationTable.spec.ts
│ │ │ │ │ ├── ApplicationTable.svelte
│ │ │ │ │ ├── ColumnActions.svelte
│ │ │ │ │ ├── ColumnAge.svelte
│ │ │ │ │ ├── ColumnModel.spec.ts
│ │ │ │ │ ├── ColumnModel.svelte
│ │ │ │ │ ├── ColumnPod.svelte
│ │ │ │ │ ├── ColumnRecipe.spec.ts
│ │ │ │ │ ├── ColumnRecipe.svelte
│ │ │ │ │ ├── ColumnRuntime.spec.ts
│ │ │ │ │ ├── ColumnRuntime.svelte
│ │ │ │ │ └── ColumnStatus.svelte
│ │ │ │ ├── instructlab/
│ │ │ │ │ ├── InstructlabColumnAge.svelte
│ │ │ │ │ ├── InstructlabColumnModelName.spec.ts
│ │ │ │ │ ├── InstructlabColumnModelName.svelte
│ │ │ │ │ ├── InstructlabColumnName.svelte
│ │ │ │ │ ├── InstructlabColumnRepository.svelte
│ │ │ │ │ ├── InstructlabColumnStatus.svelte
│ │ │ │ │ └── InstructlabColumnTargetModelName.svelte
│ │ │ │ ├── model/
│ │ │ │ │ ├── ModelColumnAction.spec.ts
│ │ │ │ │ ├── ModelColumnActions.svelte
│ │ │ │ │ ├── ModelColumnAge.spec.ts
│ │ │ │ │ ├── ModelColumnAge.svelte
│ │ │ │ │ ├── ModelColumnLabels.svelte
│ │ │ │ │ ├── ModelColumnName.spec.ts
│ │ │ │ │ ├── ModelColumnName.svelte
│ │ │ │ │ ├── ModelColumnRecipeSelection.svelte
│ │ │ │ │ ├── ModelColumnSize.spec.ts
│ │ │ │ │ └── ModelColumnSize.svelte
│ │ │ │ ├── playground/
│ │ │ │ │ ├── ConversationColumnAction.spec.ts
│ │ │ │ │ ├── ConversationColumnAction.svelte
│ │ │ │ │ ├── PlaygroundColumnIcon.svelte
│ │ │ │ │ ├── PlaygroundColumnModel.svelte
│ │ │ │ │ ├── PlaygroundColumnName.svelte
│ │ │ │ │ ├── PlaygroundColumnRuntime.spec.ts
│ │ │ │ │ └── PlaygroundColumnRuntime.svelte
│ │ │ │ └── service/
│ │ │ │ ├── ServiceAction.spec.ts
│ │ │ │ ├── ServiceAction.svelte
│ │ │ │ ├── ServiceColumnModelName.spec.ts
│ │ │ │ ├── ServiceColumnModelName.svelte
│ │ │ │ ├── ServiceColumnName.spec.ts
│ │ │ │ ├── ServiceColumnName.svelte
│ │ │ │ ├── ServiceColumnRuntime.spec.ts
│ │ │ │ ├── ServiceColumnRuntime.svelte
│ │ │ │ ├── ServiceStatus.spec.ts
│ │ │ │ └── ServiceStatus.svelte
│ │ │ ├── main.ts
│ │ │ ├── models/
│ │ │ │ └── IRouterState.ts
│ │ │ ├── pages/
│ │ │ │ ├── Applications.svelte
│ │ │ │ ├── CreateService.spec.ts
│ │ │ │ ├── CreateService.svelte
│ │ │ │ ├── Dashboard.spec.ts
│ │ │ │ ├── Dashboard.svelte
│ │ │ │ ├── ImportModel.spec.ts
│ │ │ │ ├── ImportModel.svelte
│ │ │ │ ├── InferenceServerDetails.spec.ts
│ │ │ │ ├── InferenceServerDetails.svelte
│ │ │ │ ├── InferenceServers.spec.ts
│ │ │ │ ├── InferenceServers.svelte
│ │ │ │ ├── Model.spec.ts
│ │ │ │ ├── Model.svelte
│ │ │ │ ├── Models.spec.ts
│ │ │ │ ├── Models.svelte
│ │ │ │ ├── NewInstructLabSession.spec.ts
│ │ │ │ ├── NewInstructLabSession.svelte
│ │ │ │ ├── Playground.spec.ts
│ │ │ │ ├── Playground.svelte
│ │ │ │ ├── PlaygroundCreate.spec.ts
│ │ │ │ ├── PlaygroundCreate.svelte
│ │ │ │ ├── Playgrounds.spec.ts
│ │ │ │ ├── Playgrounds.svelte
│ │ │ │ ├── Preferences.svelte
│ │ │ │ ├── Recipe.spec.ts
│ │ │ │ ├── Recipe.svelte
│ │ │ │ ├── Recipes.spec.ts
│ │ │ │ ├── Recipes.svelte
│ │ │ │ ├── StartRecipe.spec.ts
│ │ │ │ ├── StartRecipe.svelte
│ │ │ │ ├── TuneSessions.spec.ts
│ │ │ │ ├── TuneSessions.svelte
│ │ │ │ ├── applications.ts
│ │ │ │ ├── instructlab/
│ │ │ │ │ ├── AboutInstructLab.spec.ts
│ │ │ │ │ ├── AboutInstructLab.svelte
│ │ │ │ │ ├── StartInstructLabContainer.spec.ts
│ │ │ │ │ └── StartInstructLabContainer.svelte
│ │ │ │ ├── llama-stack/
│ │ │ │ │ ├── StartLlamaStackContainer.spec.ts
│ │ │ │ │ └── StartLlamaStackContainer.svelte
│ │ │ │ └── server-information/
│ │ │ │ ├── LocalServer.spec.ts
│ │ │ │ └── LocalServer.svelte
│ │ │ ├── stores/
│ │ │ │ ├── application-states.ts
│ │ │ │ ├── catalog.ts
│ │ │ │ ├── containerProviderConnections.ts
│ │ │ │ ├── conversations.ts
│ │ │ │ ├── extensionConfiguration.ts
│ │ │ │ ├── inferenceServers.ts
│ │ │ │ ├── instructlabSessions.ts
│ │ │ │ ├── localRepositories.ts
│ │ │ │ ├── modelsInfo.spec.ts
│ │ │ │ ├── modelsInfo.ts
│ │ │ │ ├── rpcReadable.spec.ts
│ │ │ │ ├── rpcReadable.ts
│ │ │ │ ├── snippetLanguages.ts
│ │ │ │ └── tasks.ts
│ │ │ └── utils/
│ │ │ ├── categoriesUtils.ts
│ │ │ ├── client.ts
│ │ │ ├── dimensions.ts
│ │ │ ├── fileUtils.ts
│ │ │ ├── localRepositoriesUtils.ts
│ │ │ ├── printers.ts
│ │ │ ├── taskUtils.ts
│ │ │ └── versionControlUtils.ts
│ │ ├── tailwind.config.cjs
│ │ ├── tsconfig.json
│ │ └── vite.config.js
│ └── shared/
│ ├── __mocks__/
│ │ └── @podman-desktop/
│ │ └── api.js
│ ├── src/
│ │ ├── InstructlabAPI.ts
│ │ ├── LlamaStackAPI.ts
│ │ ├── Messages.ts
│ │ ├── StudioAPI.ts
│ │ ├── messages/
│ │ │ ├── MessageProxy.spec.ts
│ │ │ └── MessageProxy.ts
│ │ ├── models/
│ │ │ ├── FilterRecipesResult.ts
│ │ │ ├── IApplicationCatalog.ts
│ │ │ ├── IApplicationState.ts
│ │ │ ├── ICategory.ts
│ │ │ ├── IContainerConnectionInfo.ts
│ │ │ ├── IExtensionConfiguration.ts
│ │ │ ├── IGPUInfo.ts
│ │ │ ├── IInference.spec.ts
│ │ │ ├── IInference.ts
│ │ │ ├── ILocalModelInfo.ts
│ │ │ ├── ILocalRepository.ts
│ │ │ ├── IModelInfo.ts
│ │ │ ├── IModelOptions.ts
│ │ │ ├── IModelResponse.ts
│ │ │ ├── IPlaygroundMessage.ts
│ │ │ ├── IPlaygroundV2.ts
│ │ │ ├── IPodman.ts
│ │ │ ├── IRecipe.ts
│ │ │ ├── IRecipeModelIndex.ts
│ │ │ ├── ITask.ts
│ │ │ ├── InferenceServerConfig.ts
│ │ │ ├── McpSettings.ts
│ │ │ ├── RequestOptions.ts
│ │ │ ├── instructlab/
│ │ │ │ ├── IInstructlabContainerConfiguration.ts
│ │ │ │ ├── IInstructlabContainerInfo.ts
│ │ │ │ └── IInstructlabSession.ts
│ │ │ └── llama-stack/
│ │ │ ├── LlamaStackContainerConfiguration.ts
│ │ │ └── LlamaStackContainerInfo.ts
│ │ └── uri/
│ │ ├── Uri.spec.ts
│ │ └── Uri.ts
│ ├── tsconfig.json
│ ├── vite.config.js
│ └── vitest.config.js
├── pnpm-workspace.yaml
├── tests/
│ ├── playwright/
│ │ ├── package.json
│ │ ├── playwright.config.ts
│ │ ├── src/
│ │ │ ├── ai-lab-extension.spec.ts
│ │ │ ├── model/
│ │ │ │ ├── ai-lab-app-details-page.ts
│ │ │ │ ├── ai-lab-base-page.ts
│ │ │ │ ├── ai-lab-creating-model-service-page.ts
│ │ │ │ ├── ai-lab-dashboard-page.ts
│ │ │ │ ├── ai-lab-local-server-page.ts
│ │ │ │ ├── ai-lab-model-catalog-page.ts
│ │ │ │ ├── ai-lab-model-llamastack-page.ts
│ │ │ │ ├── ai-lab-model-service-page.ts
│ │ │ │ ├── ai-lab-navigation-bar.ts
│ │ │ │ ├── ai-lab-playground-details-page.ts
│ │ │ │ ├── ai-lab-playgrounds-page.ts
│ │ │ │ ├── ai-lab-recipes-catalog-page.ts
│ │ │ │ ├── ai-lab-running-apps-page.ts
│ │ │ │ ├── ai-lab-service-details-page.ts
│ │ │ │ ├── ai-lab-start-recipe-page.ts
│ │ │ │ ├── ai-lab-try-instructlab-page.ts
│ │ │ │ ├── podman-extension-ai-lab-details-page.ts
│ │ │ │ └── preferences-extension-ai-lab-page.ts
│ │ │ └── utils/
│ │ │ ├── aiLabHandler.ts
│ │ │ └── webviewHandler.ts
│ │ └── tsconfig.json
│ └── tmt/
│ ├── plans/
│ │ ├── ai-lab-e2e-plan-default.fmf
│ │ └── ai-lab-e2e-plan-gpu.fmf
│ ├── scripts/
│ │ ├── create-results.sh
│ │ └── install-podman.sh
│ └── tests/
│ ├── e2e-test.fmf
│ ├── instructlab-test.fmf
│ └── smoke-test.fmf
├── tools/
│ └── compute-model-sizes.sh
└── types/
├── additional.d.ts
├── mustache.d.ts
├── podman-desktop-api.d.ts
└── postman-code-generators.d.ts
================================================
FILE CONTENTS
================================================
================================================
FILE: .dockerignore
================================================
node_modules
================================================
FILE: .editorconfig
================================================
# EditorConfig is awesome: http://EditorConfig.org
# https://github.com/jokeyrhyme/standard-editorconfig
# top-most EditorConfig file
root = true
# defaults
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
indent_size = 2
indent_style = space
[*.md]
trim_trailing_whitespace = false
================================================
FILE: .fmf/version
================================================
1
================================================
FILE: .gitattributes
================================================
* text=auto eol=lf
================================================
FILE: .github/ISSUE_TEMPLATE/bug_report.yml
================================================
name: Bug 🐞
description: Report a bug report
type: bug
body:
- type: markdown
attributes:
value: |
Before opening a bug report, please search for the behaviour in the existing issues.
---
Thank you for taking the time to file a bug report. To address this bug as fast as possible, we need some information.
- type: textarea
id: bug-description
attributes:
label: Bug description
description: What happened?
validations:
required: true
- type: input
id: os
attributes:
label: Operating system
description: "Which operating system are you on? Please provide the version as well. If you are on a Mac, please specify Apple silicon or Intel."
placeholder: "macOS Ventura 13.4 (Arm), Windows 11"
validations:
required: true
- type: dropdown
id: install
attributes:
label: Installation Method
description: "How did you install AI Lab ?"
options:
- "from `ghcr.io/containers/podman-desktop-extension-ai-lab` container image"
- "from Podman-Desktop extension page"
- "Other"
- type: dropdown
id: version
attributes:
label: Version
description: What version of the software are you running?
options:
- "next (development version)"
- "1.3.x"
- "1.2.x"
- "1.1.x"
- "1.0.x"
validations:
required: true
- type: textarea
id: steps
attributes:
label: Steps to reproduce
description: What steps do we need to take to reproduce this error?
- type: textarea
id: logs
attributes:
label: Relevant log output
description: If applicable, provide relevant log output.
render: shell
- type: textarea
id: additional-context
attributes:
label: Additional context
description: Add any other context or screenshots here.
================================================
FILE: .github/ISSUE_TEMPLATE/config.yml
================================================
blank_issues_enabled: false
================================================
FILE: .github/ISSUE_TEMPLATE/epic.yml
================================================
name: Epic ⚡
description: A high-level feature
type: epic
body:
- type: markdown
attributes:
value: |
Epics are normally created by the development team, to group a set of related features and plan work across multiple sprints.
The features this epic includes are referenced with the text of the epic.
- type: textarea
id: domain
attributes:
label: Epic domain
description: A clear and concise description of the feature area or domain that this epic will address.
placeholder: AI-Lab should support [...]
validations:
required: true
- type: textarea
id: additional-context
attributes:
label: Additional context
description: Add any other context or screenshots here.
================================================
FILE: .github/ISSUE_TEMPLATE/feature_request.yml
================================================
name: Feature 💡
description: A request, idea, or new functionality
type: feature
body:
- type: markdown
attributes:
value: |
Before opening a feature request, please search for potential existing issues.
---
Thank you for taking the time to file a feature request, we appreciate and value your time to help the project!
- type: textarea
id: problem
attributes:
label: Is your feature request related to a problem? Please describe
description: A clear and concise description of what the problem is.
placeholder: I'm always frustrated when [...]
validations:
required: true
- type: textarea
id: solution
attributes:
label: Describe the solution you'd like
description: A clear and concise description of what you want to happen.
validations:
required: true
- type: textarea
id: alternatives
attributes:
label: Describe alternatives you've considered
description: A clear and concise description of any alternative solutions or features you've considered.
- type: textarea
id: additional-context
attributes:
label: Additional context
description: Add any other context or screenshots here.
================================================
FILE: .github/ISSUE_TEMPLATE/ux-request.yaml
================================================
name: UX Request
description: UX Request Form
type: UX (design spec)
labels: [UX/UI Issue, Graphic design]
body:
- type: markdown
attributes:
value: |
Before opening a UX request, please search for existing issues.
---
- type: textarea
id: UX-description
attributes:
label: UX Description
description: Describe the request
validations:
required: true
- type: dropdown
id: request-type
attributes:
label: Request type
description: "What type of request is this?"
options:
- "A logo design"
- "An icon"
- "An infographic/chart"
- "a template or design for printed materials"
- "Swag design"
- "Graphic design not covered by the above"
- type: dropdown
id: user-experience
attributes:
label: User Experience Request type
description: "What type of request is this?"
options:
- "UX analysis/suggestions for improvement"
- "User research"
- "User testing"
- "Application mockups/designs"
- "Website mockups/designs"
- "Something else UX-related"
- type: textarea
id: Contacts
attributes:
label: Engineering Contact
description: Who is the primary engineer the design team can speak with about this issue?
- type: textarea
id: Deadlne
attributes:
label: Deadline for request
description: When do you need this? If this is for an event, please let us know the date of the evnt and any lead time you need to get materials produced.
================================================
FILE: .github/PULL_REQUEST_TEMPLATE.md
================================================
### What does this PR do?
### Screenshot / video of UI
<!-- If this PR is changing UI, please include
screenshots or screencasts showing the difference -->
### What issues does this PR fix or reference?
<!-- Include any related issues from Podman Desktop
repository (or from another issue tracker). -->
### How to test this PR?
<!-- Please explain steps to reproduce -->
================================================
FILE: .github/dependabot.yml
================================================
# Set update schedule for GitHub Actions
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
open-pull-requests-limit: 10
- package-ecosystem: "npm"
directory: "/"
schedule:
interval: daily
open-pull-requests-limit: 10
groups:
fortawesome:
applies-to: version-updates
patterns:
- "@fortawesome/*"
ai-sdk:
applies-to: version-updates
patterns:
- "@ai-sdk/mcp"
- "ai"
================================================
FILE: .github/workflows/ai-lab-e2e-nightly-windows.yaml
================================================
#
# Copyright (C) 2025 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
name: Podman Desktop AI Lab E2E Nightly
run-name: Podman Desktop AI Lab E2E Nightly ${{ github.event_name == 'push' && '[Recipe change]' || '' }}
on:
schedule:
- cron: '0 2 * * *'
push:
paths:
- 'packages/backend/src/assets/ai.json'
workflow_dispatch:
inputs:
podman_desktop_repo_args:
default: 'REPO=podman-desktop,FORK=podman-desktop,BRANCH=main'
description: 'Podman Desktop repo fork and branch'
type: string
required: true
ext_repo_options:
default: 'REPO=podman-desktop-extension-ai-lab,FORK=containers,BRANCH=main'
description: 'Podman Desktop Extension repo, fork and branch'
type: string
required: true
ext_tests_options:
default: 'EXT_RUN_TESTS_FROM_EXTENSION=1,EXT_RUN_TESTS_AS_ADMIN=1,EXT_TEST_GPU_SUPPORT_ENABLED=0'
description: 'E2E tests options in format VAR1=xxx,VAR2=true,VAR3=15 etc.'
type: string
required: true
npm_target:
default: 'test:e2e'
description: 'npm target to run tests'
type: string
required: true
podman_version:
default: 'latest'
description: 'Podman version (use "latest" to auto-fetch latest release, or specify version like "v5.6.1")'
type: string
required: true
podman_options:
default: 'INIT=1,START=1,ROOTFUL=1,NETWORKING=0'
description: 'Podman machine configuration options, no spaces'
type: string
required: true
env_vars:
default: 'TEST_PODMAN_MACHINE=true,ELECTRON_ENABLE_INSPECT=true'
description: 'Env. Variables passed into target machine, ie: VAR1=xxx,VAR2=true... use EXT_TEST_RAG_CHATBOT=1 to run RAG Chatbot test"'
type: string
required: true
pde2e_image_version:
default: 'v0.0.3'
description: 'PDE2E runner, builder, podman image versions'
type: string
required: true
mapt_params:
description: |
**Create instance(leave empty to use repo secrets/variables)**
**Format:** IMAGE=xxx;VERSION_TAG=xxx;CPUS=xxx;MEMORY=xxx;EXCLUDED_REGIONS=xxx
**Example:**
IMAGE=quay.io/redhat-developer/mapt;VERSION_TAG=v0.9.8;CPUS=4;MEMORY=32;EXCLUDED_REGIONS="westindia,centralindia,southindia,australiacentral,australiacentral2,australiaeast,australiasoutheast,southafricanorth,southafricawest"
required: false
type: string
jobs:
windows:
timeout-minutes: 180
name: windows-${{ matrix.windows-version }}-${{ matrix.windows-featurepack }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
windows-version: ['11']
windows-featurepack: ['25h2-ent']
steps:
- name: Fetch latest Podman version
id: fetch-podman
uses: redhat-actions/podman-install/.github/actions/fetch-latest-podman-version-windows@6b757b792b67ec663765a4f2ca36226e12b2f4cd
with:
version_input: ${{ github.event.inputs.podman_version || 'latest' }}
file_type: 'setup.exe'
github_token: ${{ secrets.GITHUB_TOKEN }}
- name: Set the default env. variables
env:
CI: true
DEFAULT_PODMAN_DESKTOP_REPO_ARGS: 'REPO=podman-desktop,FORK=podman-desktop,BRANCH=main'
DEFAULT_NPM_TARGET: 'test:e2e'
DEFAULT_ENV_VARS: 'TEST_PODMAN_MACHINE=true,ELECTRON_ENABLE_INSPECT=true'
DEFAULT_PODMAN_OPTIONS: 'INIT=1,START=1,ROOTFUL=1,NETWORKING=0'
DEFAULT_EXT_TESTS_OPTIONS: 'EXT_RUN_TESTS_FROM_EXTENSION=1,EXT_RUN_TESTS_AS_ADMIN=1,EXT_TEST_GPU_SUPPORT_ENABLED=0'
DEFAULT_EXT_REPO_OPTIONS: 'REPO=podman-desktop-extension-ai-lab,FORK=containers,BRANCH=main'
DEFAULT_PDE2E_IMAGE_VERSION: 'v0.0.3'
run: |
echo "NPM_TARGET=${{ github.event.inputs.npm_target || env.DEFAULT_NPM_TARGET }}" >> $GITHUB_ENV
echo "ENV_VARS=${{ github.event.inputs.env_vars || env.DEFAULT_ENV_VARS }}" >> $GITHUB_ENV
echo "PODMAN_URL=${{ steps.fetch-podman.outputs.download_url }}" >> $GITHUB_ENV
echo "PDE2E_IMAGE_VERSION=${{ github.event.inputs.pde2e_image_version || env.DEFAULT_PDE2E_IMAGE_VERSION }}" >> $GITHUB_ENV
echo "${{ github.event.inputs.podman_desktop_repo_args || env.DEFAULT_PODMAN_DESKTOP_REPO_ARGS }}" | awk -F ',' \
'{for (i=1; i<=NF; i++) {split($i, kv, "="); print "PD_"kv[1]"="kv[2]}}' >> $GITHUB_ENV
echo "${{ github.event.inputs.ext_tests_options || env.DEFAULT_EXT_TESTS_OPTIONS }}" | awk -F ',' \
'{for (i=1; i<=NF; i++) {split($i, kv, "="); print kv[1]"="kv[2]}}' >> $GITHUB_ENV
echo "${{ github.event.inputs.podman_options || env.DEFAULT_PODMAN_OPTIONS }}" | awk -F ',' \
'{for (i=1; i<=NF; i++) {split($i, kv, "="); print "PODMAN_"kv[1]"="kv[2]}}' >> $GITHUB_ENV
echo "${{ github.event.inputs.ext_repo_options || env.DEFAULT_EXT_REPO_OPTIONS }}" | awk -F ',' \
'{for (i=1; i<=NF; i++) {split($i, kv, "="); print "EXT_"kv[1]"="kv[2]}}' >> $GITHUB_ENV
# For mapt_params, use repo variables directly if input is empty
if [ -n "${{ github.event.inputs.mapt_params }}" ]; then
mapt_params="${{ github.event.inputs.mapt_params }}"
else
mapt_params="IMAGE=${{ vars.MAPT_IMAGE }};VERSION_TAG=${{ vars.MAPT_VERSION_TAG }};CPUS=${{ vars.MAPT_CPUS }};MEMORY=${{ vars.MAPT_MEMORY }};EXCLUDED_REGIONS=\"${{ vars.MAPT_EXCLUDED_REGIONS }}\""
fi
echo "$mapt_params" | awk -F ';' '{for (i=1; i<=NF; i++) {split($i, kv, "="); print "MAPT_"kv[1]"="kv[2]}}' >> $GITHUB_ENV
- name: Create instance
uses: podman-desktop/e2e/.github/actions/create-instance@213a276952d746324895f63cea0b23083013990f
with:
mapt-image: ${{ env.MAPT_IMAGE || '' }}
mapt-version: ${{ env.MAPT_VERSION_TAG || '' }}
windows-version: ${{ matrix.windows-version }}
windows-featurepack: ${{ matrix.windows-featurepack }}
cpus: ${{ env.MAPT_CPUS || '' }}
memory: ${{ env.MAPT_MEMORY || '' }}
excluded-regions: ${{ env.MAPT_EXCLUDED_REGIONS || '' }}
arm-tenant-id: ${{ secrets.ARM_TENANT_ID }}
arm-subscription-id: ${{ secrets.ARM_SUBSCRIPTION_ID }}
arm-client-id: ${{ secrets.ARM_CLIENT_ID }}
arm-client-secret: ${{ secrets.ARM_CLIENT_SECRET }}
- name: Check instance system info
uses: podman-desktop/e2e/.github/actions/instance-system-info@3548105f45def129d5e3aaa5a3d922e09ac892d9
- name: Emulate X session
uses: podman-desktop/e2e/.github/actions/emulate-x-session@3548105f45def129d5e3aaa5a3d922e09ac892d9
- name: Download Podman, do not initialize
uses: podman-desktop/e2e/.github/actions/download-podman-nightly@952cafee20ca82b1ce48b29c848bac1c31062245
with:
podman-image-tag: ${{ env.PDE2E_IMAGE_VERSION }}
podman-download-url: ${{ env.PODMAN_URL }}
- name: Build Podman Desktop Electron Inspect Enabled binary
uses: podman-desktop/e2e/.github/actions/build-podman-desktop@0c1f0a035e0949941fd6abf959ab556ceec13f03
with:
fork: ${{ env.PD_FORK }}
branch: ${{ env.PD_BRANCH }}
env-vars: ${{ env.ENV_VARS }}
- name: Run Podman Desktop Playwright E2E tests
uses: podman-desktop/e2e/.github/actions/run-playwright-test@15b800edab941d394b32aaaa3f7961bb7db7ec3a
with:
pde2e-runner-tag: ${{ env.PDE2E_IMAGE_VERSION }}
podman-desktop-path: true
fork-repo: ${{ env.PD_FORK }}
branch-name: ${{ env.PD_BRANCH }}
ext-repo: ${{ env.EXT_REPO }}
ext-fork: ${{ env.EXT_FORK }}
ext-branch: ${{ env.EXT_BRANCH }}
ext-tests: ${{ env.EXT_RUN_TESTS_FROM_EXTENSION }}
npm-target: ${{ env.NPM_TARGET }}
podman-init: ${{ env.PODMAN_INIT }}
podman-start: ${{ env.PODMAN_START }}
rootful: ${{ env.PODMAN_ROOTFUL }}
user-networking: ${{ env.PODMAN_NETWORKING }}
podman-provider: 'wsl'
env-vars: ${{ env.ENV_VARS }}
ci-bot-token: ${{ secrets.PODMAN_DESKTOP_BOT_TOKEN }}
- name: Destroy instance
if: always()
uses: podman-desktop/e2e/.github/actions/destroy-instance@36e440f2ac18193214f4ffa8f7f1c4c0cb8c9446
with:
mapt-image: ${{ env.MAPT_IMAGE }}
mapt-version: ${{ env.MAPT_VERSION_TAG }}
arm-tenant-id: ${{ secrets.ARM_TENANT_ID }}
arm-subscription-id: ${{ secrets.ARM_SUBSCRIPTION_ID }}
arm-client-id: ${{ secrets.ARM_CLIENT_ID }}
arm-client-secret: ${{ secrets.ARM_CLIENT_SECRET }}
- name: Publish Test Report
uses: mikepenz/action-junit-report@v6
if: always()
with:
annotate_only: true
fail_on_failure: true
include_passed: true
detailed_summary: true
require_tests: true
report_paths: '**/*results.xml'
- name: Upload test artifacts
uses: actions/upload-artifact@v7
if: always()
with:
name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}
path: |
results/*
!./**/*.gguf
!./**/*.bin
!./**/output/videos/*
!./**/output/traces/*
- name: Upload test videos
uses: actions/upload-artifact@v7
if: always()
with:
name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}-videos
path: ./**/output/videos/*
- name: Upload test traces
uses: actions/upload-artifact@v7
if: always()
with:
name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}-traces
path: ./**/output/traces/*
================================================
FILE: .github/workflows/build-next.yaml
================================================
#
# Copyright (C) 2023-2024 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
name: CI
on:
push:
branches:
- 'main'
jobs:
build:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v6.0.2
- uses: pnpm/action-setup@v5
name: Install pnpm
with:
run_install: false
- uses: actions/setup-node@v6
with:
node-version: 24
cache: 'pnpm'
- name: Execute pnpm
run: pnpm install
- name: Run Build
run: pnpm build
- name: Login to ghcr.io
run: podman login --username ${{ github.repository_owner }} --password ${{ secrets.GITHUB_TOKEN }} ghcr.io
- name: Publish Image
id: publish-image
run: |
IMAGE_NAME=ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab
IMAGE_NIGHTLY=${IMAGE_NAME}:nightly
IMAGE_SHA=${IMAGE_NAME}:${GITHUB_SHA}
podman build -t $IMAGE_NIGHTLY .
podman push $IMAGE_NIGHTLY
podman tag $IMAGE_NIGHTLY $IMAGE_SHA
podman push $IMAGE_SHA
================================================
FILE: .github/workflows/compute-model-sizes.yml
================================================
# This is a basic workflow that is manually triggered
name: Compute model sizes
# Controls when the action will run. Workflow runs when manually triggered using the UI
# or API.
on:
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "greet"
compute:
# The type of runner that the job will run on
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
- uses: actions/checkout@v6.0.2
# Runs a single command using the runners shell
- name: Compute model size
run: ./tools/compute-model-sizes.sh
================================================
FILE: .github/workflows/e2e-main-tf.yaml
================================================
# Copyright (C) 2025 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
name: PD AI Lab E2E Nightly Testing Farm
on:
schedule:
- cron: '0 0 * * *'
workflow_dispatch:
inputs:
podman_version:
default: 'latest'
description: 'Podman version to install (e.g., "5.5.2", "5.6.0~rc1"). Use "latest" for stable or "nightly" for the latest development build.'
type: string
required: true
npm_target:
description: npm tests target
type: choice
default: 'e2e'
options:
- e2e
- smoke
- instructlab
plan:
description: plans to run
type: choice
default: 'default'
options:
- default
- gpu
jobs:
pd-ai-lab-e2e-testing-farm:
name: pd-e2e-testing-farm-ci
runs-on: ubuntu-latest
timeout-minutes: 180
strategy:
fail-fast: false
matrix:
fedora-version: ['Fedora-42', 'Fedora-43']
plan: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.plan != '' && fromJSON(format('["{0}"]', github.event.inputs.plan)) || fromJSON('["default", "gpu"]') }}
steps:
- name: Set the default env. variables
env:
DEFAULT_NPM_TARGET: 'smoke'
DEFAULT_PODMAN_VERSION: 'latest'
DEFAULT_NODE_VERSION: 'v24.11.1'
run: |
echo "NPM_TARGET=${{ github.event.inputs.npm_target || env.DEFAULT_NPM_TARGET }}" >> $GITHUB_ENV
echo "PLAN=${{ matrix.plan }}" >> $GITHUB_ENV
echo "PODMAN_VERSION=${{ github.event.inputs.podman_version || env.DEFAULT_PODMAN_VERSION }}" >> $GITHUB_ENV
echo "NODE_VERSION=${{ vars.NODE_VERSION || env.DEFAULT_NODE_VERSION }}" >> $GITHUB_ENV
- name: Run Podman Desktop Playwright E2E tests on Testing Farm CI
id: run-e2e-tf
uses: sclorg/testing-farm-as-github-action@b23f0de29ac969d12411215a983da264b4ced149 #v4.2.0
with:
api_key: ${{ secrets.TF_TOKEN }}
create_github_summary: "false"
compose: ${{ matrix.fedora-version }}
tmt_plan_filter: 'name:/tests/tmt/plans/ai-lab-e2e-plan-${{ env.PLAN }}/${{ env.NPM_TARGET }}'
variables: COMPOSE=${{ matrix.fedora-version }};ARCH=x86_64;PODMAN_VERSION=${{ env.PODMAN_VERSION }};NODE_VERSION=${{ env.NODE_VERSION }}
- name: Extract Testing Farm work ID and base URL
if: always()
run: |
TF_ARTIFACTS_URL="${{ steps.run-e2e-tf.outputs.test_log_url }}"
TF_DEFAULT_JUNIT_DEFAULT="${TF_ARTIFACTS_URL}/results-junit.xml"
curl -o results-junit.xml "$TF_DEFAULT_JUNIT_DEFAULT"
TF_WORK_ID=$(grep -o 'work-${{ env.NPM_TARGET }}[^/"]*' results-junit.xml | head -1)
echo "TF_WORK_ID=$TF_WORK_ID" >> $GITHUB_ENV
echo "TF_ARTIFACTS_URL=$TF_ARTIFACTS_URL" >> $GITHUB_ENV
- name: Download Playwright JUnit report from Testing Farm
if: always()
run: |
TF_PLAYWRIGHT_JUNIT_URL="${{ env.TF_ARTIFACTS_URL }}/${{ env.TF_WORK_ID }}/tests/tmt/plans/ai-lab-e2e-plan-${{ env.PLAN }}/${{ env.NPM_TARGET }}/execute/data/guest/default-0/tests/tmt/tests/${{ env.NPM_TARGET }}-test-1/data/junit-results.xml"
curl -o junit-playwright-results.xml "$TF_PLAYWRIGHT_JUNIT_URL"
- name: Publish test report to PR
if: always()
uses: mikepenz/action-junit-report@5b7ee5a21e8674b695313d769f3cbdfd5d4d53a4 #v6.0.0
with:
fail_on_failure: true
include_passed: true
detailed_summary: true
annotate_only: true
require_tests: true
report_paths: '**/junit-playwright-results.xml'
- name: Download test artifacts from Testing Farm
if: failure()
run: |
mkdir -p results
TF_TEST_DATA_URL="${{ env.TF_ARTIFACTS_URL }}/${{ env.TF_WORK_ID }}/tests/tmt/plans/ai-lab-e2e-plan-${{ env.PLAN }}/${{ env.NPM_TARGET }}/execute/data/guest/default-0/tests/tmt/tests/${{ env.NPM_TARGET }}-test-1/data"
TF_TRACES_URL="${TF_TEST_DATA_URL}/traces/"
TF_VIDEOS_URL="${TF_TEST_DATA_URL}/videos/"
echo "Downloading traces"
wget \
--recursive \
--no-parent \
--no-host-directories \
--cut-dirs=10 \
--reject "index.html*" \
--directory-prefix=results \
"$TF_TRACES_URL"
echo "Downloading videos"
wget \
--recursive \
--no-parent \
--no-host-directories \
--cut-dirs=10 \
--reject "index.html*" \
--directory-prefix=results \
"$TF_VIDEOS_URL"
- name: Upload test artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: ai-lab-testing-farm-artifacts-${{ matrix.fedora-version }}-${{ env.PLAN }}
path: |
results/*
**/junit-playwright-results.xml
================================================
FILE: .github/workflows/e2e-main.yaml
================================================
#
# Copyright (C) 2024 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
name: e2e-tests-main
on:
push:
branches: [main]
schedule:
- cron: '0 2 * * *'
workflow_dispatch:
inputs:
podman_desktop_repo_args:
default: 'REPO=podman-desktop,FORK=podman-desktop,BRANCH=main'
description: 'Podman Desktop repo fork and branch'
type: string
required: true
ext_repo_options:
default: 'REPO=podman-desktop-extension-ai-lab,FORK=containers,BRANCH=main'
description: 'Podman Desktop Extension repo, fork and branch'
type: string
required: true
jobs:
e2e-tests:
name: Run E2E tests ${{ github.event_name == 'schedule' && '[nightly]' || '' }}
runs-on: ubuntu-24.04
steps:
- name: Set default env variables
env:
DEFAULT_PODMAN_DESKTOP_REPO_ARGS: 'REPO=podman-desktop,FORK=podman-desktop,BRANCH=main'
DEFAULT_EXT_REPO_OPTIONS: 'REPO=podman-desktop-extension-ai-lab,FORK=containers,BRANCH=main'
run: |
echo "${{ github.event.inputs.podman_desktop_repo_args || env.DEFAULT_PODMAN_DESKTOP_REPO_ARGS }}" | awk -F ',' \
'{for (i=1; i<=NF; i++) {split($i, kv, "="); print "PD_"kv[1]"="kv[2]}}' >> $GITHUB_ENV
echo "${{ github.event.inputs.ext_repo_options || env.DEFAULT_EXT_REPO_OPTIONS }}" | awk -F ',' \
'{for (i=1; i<=NF; i++) {split($i, kv, "="); print "EXT_"kv[1]"="kv[2]}}' >> $GITHUB_ENV
- uses: actions/checkout@v6.0.2
name: Checkout AI Lab - Workflow Dispatch
if: github.event_name == 'workflow_dispatch'
with:
repository: ${{ env.EXT_FORK }}/${{ env.EXT_REPO }}
ref: ${{ env.EXT_BRANCH }}
path: podman-desktop-extension-ai-lab
- uses: actions/checkout@v6.0.2
name: Checkout AI Lab - Push or Schedule
if: github.event_name == 'push' || github.event_name == 'schedule'
with:
path: podman-desktop-extension-ai-lab
- uses: actions/checkout@v6.0.2
name: Checkout Podman Desktop
with:
repository: ${{ env.PD_FORK }}/${{ env.PD_REPO }}
ref: ${{ env.PD_BRANCH }}
path: podman-desktop
- uses: pnpm/action-setup@v5
name: Install pnpm
with:
run_install: false
package_json_file: ./podman-desktop/package.json
- uses: actions/setup-node@v6
with:
node-version: 24
cache: 'pnpm'
cache-dependency-path: |
./podman-desktop
./podman-desktop-extension-ai-lab
- name: Update podman
run: |
echo "ubuntu version from kubic repository to install podman we need (v5)"
ubuntu_version='23.10'
echo "Add unstable kubic repo into list of available sources and get the repo key"
sudo sh -c "echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list"
curl -L "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key" | sudo apt-key add -
echo "Updating database of packages..."
sudo apt-get update -qq
echo "install necessary dependencies for criu package which is not part of ${ubuntu_version}"
sudo apt-get install -qq libprotobuf32t64 python3-protobuf libnet1
echo "install criu manually from static location"
curl -sLO http://archive.ubuntu.com/ubuntu/pool/universe/c/criu/criu_3.16.1-2_amd64.deb && sudo dpkg -i criu_3.16.1-2_amd64.deb
echo "installing/update podman package..."
sudo apt-get -qq -y install podman || { echo "Start fallback steps for podman nightly installation from a static mirror" && \
sudo sh -c "echo 'deb http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list" && \
curl -L "http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key" | sudo apt-key add - && \
sudo apt-get update && \
sudo apt-get -y install podman; }
podman version
- name: Revert unprivileged user namespace restrictions in Ubuntu 24.04
run: |
# allow unprivileged user namespace
sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0
- name: Set cgroup_manager to 'cgroupfs' instead of systemd
run: |
mkdir -p ~/.config/containers
cat <<EOT >> ~/.config/containers/containers.conf
[engine]
cgroup_manager="cgroupfs"
EOT
podman info
- name: Execute pnpm
working-directory: ./podman-desktop
run: pnpm install --frozen-lockfile
- name: Build Podman Desktop for E2E tests
working-directory: ./podman-desktop
run: pnpm test:e2e:build
- name: Ensure getting current HEAD version of the test framework
working-directory: ./podman-desktop-extension-ai-lab/tests/playwright
run: pnpm add -D @podman-desktop/tests-playwright@next
- name: Execute pnpm in AI Lab Extension
working-directory: ./podman-desktop-extension-ai-lab
run: pnpm install
- name: Build Image
working-directory: ./podman-desktop-extension-ai-lab
id: build-image
run: |
pnpm build
podman build -t local_ai_lab_image ./
CONTAINER_ID=$(podman create localhost/local_ai_lab_image --entrypoint "")
mkdir -p tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins
podman export $CONTAINER_ID | tar -x -C tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins/
podman rm -f $CONTAINER_ID
podman rmi -f localhost/local_ai_lab_image:latest
- name: Free up disk space
uses: podman-desktop/e2e/.github/actions/disk-cleanup@6a406f8f24bacffc481553266f9ba8a5293f3077
- name: Run All E2E tests
working-directory: ./podman-desktop-extension-ai-lab
env:
PODMAN_DESKTOP_ARGS: ${{ github.workspace }}/podman-desktop
EXTENSION_PREINSTALLED: true
run: pnpm test:e2e
- name: Publish Test Report
uses: mikepenz/action-junit-report@v6
if: always()
with:
annotate_only: true
fail_on_failure: true
include_passed: true
detailed_summary: true
require_tests: true
report_paths: '**/*results.xml'
- uses: actions/upload-artifact@v7
if: always()
with:
name: e2e-tests
path: |
./**/tests/**/output/
!./**/*.gguf
!./**/*.bin
!./**/output/videos/*
!./**/output/traces/*
- name: Upload test videos
uses: actions/upload-artifact@v7
if: always()
with:
name: e2e-tests-videos
path: ./**/output/videos/*
- name: Upload test traces
uses: actions/upload-artifact@v7
if: always()
with:
name: e2e-tests-traces
path: ./**/output/traces/*
================================================
FILE: .github/workflows/llama-stack-playground.yaml
================================================
#
# Copyright (C) 2025 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
name: llama-stack-playground
on:
workflow_dispatch:
inputs:
version:
description: 'llama-stack tag to use (e.g. main, v0.2.8,...)'
type: string
required: true
jobs:
publish:
name: publish
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@0c366fd6a839edf440554fa01a7085ccba70ac98 #v5.0.1
with:
repository: meta-llama/llama-stack
ref: ${{ github.event.inputs.version }}
- name: Install qemu dependency
run: |
sudo apt-get update
sudo apt-get install -y qemu-user-static
- name: Build manifest and images
run: |
podman manifest create quay.io/podman-ai-lab/llama-stack-playground:${{ github.event.inputs.version }}
podman build --platform linux/amd64,linux/arm64 llama_stack/distribution/ui --manifest quay.io/podman-ai-lab/llama-stack-playground:${{ github.event.inputs.version }}
- name: Login to quay.io
run: podman login quay.io --username ${{ secrets.QUAY_USERNAME }} --password ${{ secrets.QUAY_PASSWORD }}
- name: Push manifest and images to quay.io
run: podman manifest push quay.io/podman-ai-lab/llama-stack-playground:${{ github.event.inputs.version }}
================================================
FILE: .github/workflows/pr-check.yaml
================================================
#
# Copyright (C) 2024 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
name: pr-check
on: [pull_request]
jobs:
lint-format-unit:
name: linter, formatters and unit tests / ${{ matrix.os }}
runs-on: ${{ matrix.os }}
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
os: [windows-2022, ubuntu-22.04, macos-14]
steps:
- uses: actions/checkout@v6.0.2
- uses: pnpm/action-setup@v5
name: Install pnpm
with:
run_install: false
- uses: actions/setup-node@v6
with:
node-version: 24
cache: 'pnpm'
- name: Execute pnpm
run: pnpm install
- name: Run linter
run: pnpm lint:check
- name: Run formatter
run: pnpm format:check
- name: Run unit tests
run: pnpm test:unit
- name: Run typecheck
run: pnpm typecheck
- name: Run svelte check
run: pnpm svelte:check
# Check we don't have changes in git
- name: Check no changes in git
if: ${{ matrix.os=='ubuntu-22.04'}}
run: |
if ! git diff --exit-code; then
echo "Found changes in git"
exit 1
fi
e2e-pr-check:
name: e2e tests smoke
runs-on: ubuntu-24.04
env:
SKIP_INSTALLATION: true
steps:
- uses: actions/checkout@v6.0.2
with:
path: podman-desktop-extension-ai-lab
# Set up pnpm
- uses: pnpm/action-setup@v5
name: Install pnpm
with:
run_install: false
package_json_file: ./podman-desktop-extension-ai-lab/package.json
# Install Node.js
- uses: actions/setup-node@v6
with:
node-version: 24
# Checkout podman desktop
- uses: actions/checkout@v6.0.2
with:
repository: containers/podman-desktop
ref: main
path: podman-desktop
- name: Update podman
run: |
echo "ubuntu version from kubic repository to install podman we need (v5)"
ubuntu_version='23.10'
echo "Add unstable kubic repo into list of available sources and get the repo key"
sudo sh -c "echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list"
curl -L "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key" | sudo apt-key add -
echo "Updating database of packages..."
sudo apt-get update -qq
echo "install necessary dependencies for criu package which is not part of ${ubuntu_version}"
sudo apt-get install -qq libprotobuf32t64 python3-protobuf libnet1
echo "install criu manually from static location"
curl -sLO http://archive.ubuntu.com/ubuntu/pool/universe/c/criu/criu_3.16.1-2_amd64.deb && sudo dpkg -i criu_3.16.1-2_amd64.deb
echo "installing/update podman package..."
sudo apt-get -qq -y install podman || { echo "Start fallback steps for podman nightly installation from a static mirror" && \
sudo sh -c "echo 'deb http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list" && \
curl -L "http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key" | sudo apt-key add - && \
sudo apt-get update && \
sudo apt-get -y install podman; }
podman version
- name: Revert unprivileged user namespace restrictions in Ubuntu 24.04
run: |
# allow unprivileged user namespace
sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0
- name: Set cgroup_manager to 'cgroupfs' instead of systemd
run: |
mkdir -p ~/.config/containers
cat <<EOT >> ~/.config/containers/containers.conf
[engine]
cgroup_manager="cgroupfs"
EOT
podman info
- name: Install pnpm deps and build Podman Desktop
working-directory: ./podman-desktop
run: |
pnpm install --frozen-lockfile
pnpm test:e2e:build
- name: Ensure getting current HEAD version of the test framework
working-directory: ./podman-desktop-extension-ai-lab/tests/playwright
run: |
# workaround for https://github.com/containers/podman-desktop-extension-bootc/issues/712
version=$(npm view @podman-desktop/tests-playwright@next version)
echo "Version of @podman-desktop/tests-playwright to be used: $version"
jq --arg version "$version" '.devDependencies."@podman-desktop/tests-playwright" = $version' package.json > package.json_tmp && mv package.json_tmp package.json
- name: Execute pnpm in AI Lab Extension
working-directory: ./podman-desktop-extension-ai-lab
run: pnpm install --no-frozen-lockfile
- name: Build Image
working-directory: ./podman-desktop-extension-ai-lab
id: build-image
run: |
pnpm build
podman build -t local_ai_lab_image ./
CONTAINER_ID=$(podman create localhost/local_ai_lab_image --entrypoint "")
mkdir -p tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins
podman export $CONTAINER_ID | tar -x -C tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins/
podman rm -f $CONTAINER_ID
podman rmi -f localhost/local_ai_lab_image:latest
- name: Free up disk space
uses: podman-desktop/e2e/.github/actions/disk-cleanup@6a406f8f24bacffc481553266f9ba8a5293f3077
- name: Run E2E Smoke tests
working-directory: ./podman-desktop-extension-ai-lab
env:
PODMAN_DESKTOP_ARGS: ${{ github.workspace }}/podman-desktop
EXTENSION_PREINSTALLED: true
run: pnpm test:e2e:smoke
- name: Publish Test Report
uses: mikepenz/action-junit-report@v6
if: always()
with:
annotate_only: true
fail_on_failure: true
include_passed: true
detailed_summary: true
require_tests: true
report_paths: '**/*results.xml'
- uses: actions/upload-artifact@v7
if: always()
with:
name: e2e-pr-check
path: |
./**/tests/**/output/
!./**/*.gguf
!./**/*.bin
!./**/output/videos/*
!./**/output/traces/*
- name: Upload test videos
uses: actions/upload-artifact@v7
if: always()
with:
name: e2e-pr-check-videos
path: ./**/output/videos/*
- name: Upload test traces
uses: actions/upload-artifact@v7
if: always()
with:
name: e2e-pr-check-traces
path: ./**/output/traces/*
================================================
FILE: .github/workflows/ramalama.yaml
================================================
#
# Copyright (C) 2025 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
name: ramalama
on:
schedule:
- cron: '0 2 * * *'
workflow_dispatch:
inputs:
tag:
default: 'latest'
description: 'Ramalama images tag to use'
type: string
required: true
jobs:
e2e-check:
name: e2e tests
runs-on: ubuntu-24.04
env:
SKIP_INSTALLATION: true
steps:
- uses: actions/checkout@v6.0.2
with:
path: podman-desktop-extension-ai-lab
# Set up pnpm
- uses: pnpm/action-setup@v5
name: Install pnpm
with:
run_install: false
package_json_file: ./podman-desktop-extension-ai-lab/package.json
# Install Node.js
- uses: actions/setup-node@v6
with:
node-version: 24
# Checkout podman desktop
- uses: actions/checkout@v6.0.2
with:
repository: podman-desktop/podman-desktop
ref: main
path: podman-desktop
- name: Update podman
run: |
echo "ubuntu version from kubic repository to install podman we need (v5)"
ubuntu_version='23.10'
echo "Add unstable kubic repo into list of available sources and get the repo key"
sudo sh -c "echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list"
curl -L "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key" | sudo apt-key add -
echo "Updating database of packages..."
sudo apt-get update -qq
echo "install necessary dependencies for criu package which is not part of ${ubuntu_version}"
sudo apt-get install -qq libprotobuf32t64 python3-protobuf libnet1
echo "install criu manually from static location"
curl -sLO http://archive.ubuntu.com/ubuntu/pool/universe/c/criu/criu_3.16.1-2_amd64.deb && sudo dpkg -i criu_3.16.1-2_amd64.deb
echo "installing/update podman package..."
sudo apt-get -qq -y install podman || { echo "Start fallback steps for podman nightly installation from a static mirror" && \
sudo sh -c "echo 'deb http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list" && \
curl -L "http://ftp.lysator.liu.se/pub/opensuse/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key" | sudo apt-key add - && \
sudo apt-get update && \
sudo apt-get -y install podman; }
podman version
- name: Revert unprivileged user namespace restrictions in Ubuntu 24.04
run: |
# allow unprivileged user namespace
sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0
- name: Set cgroup_manager to 'cgroupfs' instead of systemd
run: |
mkdir -p ~/.config/containers
cat <<EOT >> ~/.config/containers/containers.conf
[engine]
cgroup_manager="cgroupfs"
EOT
podman info
- name: Install pnpm deps and build Podman Desktop
working-directory: ./podman-desktop
run: |
pnpm install --frozen-lockfile
pnpm test:e2e:build
- name: Ensure getting current HEAD version of the test framework
working-directory: ./podman-desktop-extension-ai-lab/tests/playwright
run: |
# workaround for https://github.com/podman-desktop/podman-desktop-extension-bootc/issues/712
version=$(npm view @podman-desktop/tests-playwright@next version)
echo "Version of @podman-desktop/tests-playwright to be used: $version"
jq --arg version "$version" '.devDependencies."@podman-desktop/tests-playwright" = $version' package.json > package.json_tmp && mv package.json_tmp package.json
- name: Execute pnpm in AI Lab Extension
working-directory: ./podman-desktop-extension-ai-lab
run: pnpm install --no-frozen-lockfile
- name: Update ramalama image references in AI Lab Extension
working-directory: ./podman-desktop-extension-ai-lab
run: sed -i -E "s/(@sha256:[0-9a-f]+)/:${{ github.event_name != 'workflow_dispatch' && 'latest' || github.event.inputs.tag }}/g" packages/backend/src/assets/inference-images.json
- name: Build Image
working-directory: ./podman-desktop-extension-ai-lab
id: build-image
run: |
pnpm build
podman build -t local_ai_lab_image ./
CONTAINER_ID=$(podman create localhost/local_ai_lab_image --entrypoint "")
mkdir -p tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins
podman export $CONTAINER_ID | tar -x -C tests/playwright/tests/playwright/output/ai-lab-tests-pd/plugins/
podman rm -f $CONTAINER_ID
podman rmi -f localhost/local_ai_lab_image:latest
- name: Free up disk space
uses: podman-desktop/e2e/.github/actions/disk-cleanup@6a406f8f24bacffc481553266f9ba8a5293f3077
- name: Run E2E tests
working-directory: ./podman-desktop-extension-ai-lab
env:
PODMAN_DESKTOP_ARGS: ${{ github.workspace }}/podman-desktop
EXTENSION_PREINSTALLED: true
run: pnpm test:e2e
- name: Publish Test Report
uses: mikepenz/action-junit-report@v6
if: always()
with:
annotate_only: true
fail_on_failure: true
include_passed: true
detailed_summary: true
require_tests: true
report_paths: '**/*results.xml'
- uses: actions/upload-artifact@v7
if: always()
with:
name: e2e-check
path: |
./**/tests/**/output/
!./**/*.gguf
!./**/*.bin
!./**/output/videos/*
!./**/output/traces/*
- name: Upload test videos
uses: actions/upload-artifact@v7
if: always()
with:
name: e2e-check-videos
path: ./**/output/videos/*
- name: Upload test traces
uses: actions/upload-artifact@v7
if: always()
with:
name: e2e-check-traces
path: ./**/output/traces/*
================================================
FILE: .github/workflows/recipe-catalog-change-cleanup.yaml
================================================
name: recipe-catalog-change-cleanup
on:
workflow_run:
workflows: ["recipe-catalog-change-windows-trigger"]
types:
- completed
jobs:
extract-context:
runs-on: ubuntu-24.04
outputs:
extract-context: ${{ steps.prepare-context.outputs.extract-context }}
trigger-template: ${{ steps.prepare-context.outputs.trigger-template }}
steps:
- name: Prepare context
id: prepare-context
env:
WORKFLOW_RUN: ${{ toJson(github.event.workflow_run) }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "Workflow run ID: ${{ fromJson(env.WORKFLOW_RUN).id }}"
echo "Fork owner: ${{ fromJson(env.WORKFLOW_RUN).head_repository.owner.login }}"
echo "Fork repo: ${{ fromJson(env.WORKFLOW_RUN).head_repository.name }}"
echo "Fork branch: ${{ fromJson(env.WORKFLOW_RUN).head_branch }}"
echo "Commit SHA: ${{ fromJson(env.WORKFLOW_RUN).head_sha }}"
echo "Base repo: ${{ fromJson(env.WORKFLOW_RUN).repository.full_name }}"
echo "Conclusion: ${{ fromJson(env.WORKFLOW_RUN).conclusion }}"
# Fetch job conclusions using the GitHub CLI
echo "Fetching jobs for workflow run ID: ${{ fromJson(env.WORKFLOW_RUN).id }}"
gh api \
repos/${{ github.repository }}/actions/runs/${{ fromJson(env.WORKFLOW_RUN).id }}/jobs \
--jq '.jobs[] | "\(.name)=\(.conclusion)"' | while read -r line; do
echo "$line" >> $GITHUB_OUTPUT
done
cat $GITHUB_OUTPUT
cleanup:
runs-on: ubuntu-24.04
needs: extract-context
if: ${{ github.event.workflow_run.conclusion == 'skipped' || (github.event.workflow_run.conclusion == 'success' && needs.extract-context.outputs.trigger-template == 'skipped') }}
steps:
- name: Remove skipped or cancelled workflow run
env:
WORKFLOW_RUN: ${{ toJson(github.event.workflow_run) }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "Cleaning up workflow run ID: ${{ fromJson(env.WORKFLOW_RUN).id }}"
gh run delete ${{ fromJson(env.WORKFLOW_RUN).id }} --repo ${{ fromJson(env.WORKFLOW_RUN).repository.full_name }}
echo "Workflow run ID ${{ fromJson(env.WORKFLOW_RUN).id }} has been cleaned up."
================================================
FILE: .github/workflows/recipe-catalog-change-template.yaml
================================================
name: Run recipe tests on catalog change
on:
workflow_call:
inputs:
trigger-workflow-run-id:
required: true
type: string
trigger-workflow-fork:
required: true
type: string
trigger-workflow-repo-name:
required: true
type: string
trigger-workflow-branch:
required: true
type: string
trigger-workflow-commit-sha:
required: true
type: string
trigger-workflow-base-repo:
required: true
type: string
pd-fork:
required: false
type: string
pd-branch:
required: false
type: string
pd-env-vars:
required: false
type: string
podman-options:
required: false
type: string
podman-download-url:
required: false
type: string
ext_tests_options:
required: false
type: string
npm-target:
required: false
type: string
pde2e-image-version:
required: false
type: string
mapt_params:
required: false
type: string
jobs:
windows:
name: recipe-catalog-windows-${{ matrix.windows-version }}-${{ matrix.windows-featurepack }}
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
windows-version: ['11']
windows-featurepack: ['25h2-ent']
steps:
- name: Add PR check status
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
status_context="catalog-change-windows-matrix-${{ matrix.windows-version }}-${{ matrix.windows-featurepack }}"
echo "status_context=${status_context}" >> "$GITHUB_ENV"
set -xuo
# Status msg
data="{\"state\":\"pending\""
data="${data},\"description\":\"Running recipe tests on catalog change on Windows ${{ matrix.windows-version }}-${{ matrix.windows-featurepack }}\""
data="${data},\"context\":\"$status_context\""
data="${data},\"target_url\":\"https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\"}"
# Create status by API call
curl -L -v -X POST \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ env.GH_TOKEN }}" \
https://api.github.com/repos/${{ inputs.trigger-workflow-base-repo }}/statuses/${{ inputs.trigger-workflow-commit-sha }} \
-d "${data}"
- name: Get Podman version used by Podman Desktop
run: |
version=$(curl https://raw.githubusercontent.com/containers/podman-desktop/main/extensions/podman/packages/extension/src/podman5.json | jq -r '.version')
echo "Default Podman Version from Podman Desktop: ${version}"
echo "PD_PODMAN_VERSION=${version}" >> $GITHUB_ENV
- name: Set the default env. variables
env:
DEFAULT_FORK: 'containers'
DEFAULT_BRANCH: 'main'
DEFAULT_NPM_TARGET: 'test:e2e'
DEFAULT_ENV_VARS: 'TEST_PODMAN_MACHINE=true,ELECTRON_ENABLE_INSPECT=true'
DEFAULT_PODMAN_OPTIONS: 'INIT=1,START=1,ROOTFUL=1,NETWORKING=0'
DEFAULT_EXT_TESTS_OPTIONS: 'EXT_RUN_TESTS_FROM_EXTENSION=1,EXT_RUN_TESTS_AS_ADMIN=1,EXT_TEST_GPU_SUPPORT_ENABLED=0'
DEFAULT_EXT_REPO_OPTIONS: 'REPO=podman-desktop-extension-ai-lab,FORK=containers,BRANCH=main'
DEFAULT_PODMAN_VERSION: "${{ env.PD_PODMAN_VERSION || '5.3.2' }}"
DEFAULT_URL: "https://github.com/containers/podman/releases/download/v$DEFAULT_PODMAN_VERSION/podman-$DEFAULT_PODMAN_VERSION-setup.exe"
DEFAULT_PDE2E_IMAGE_VERSION: 'v0.0.3-windows'
DEFAULT_MAPT_PARAMS: "IMAGE=${{ vars.MAPT_IMAGE || 'quay.io/redhat-developer/mapt' }};VERSION_TAG=${{ vars.MAPT_VERSION_TAG || 'v0.9.7' }};CPUS=${{ vars.MAPT_CPUS || '4' }};MEMORY=${{ vars.MAPT_MEMORY || '32' }};EXCLUDED_REGIONS=\"${{ vars.MAPT_EXCLUDED_REGIONS || 'westindia,centralindia,southindia,australiacentral,australiacentral2,australiaeast,australiasoutheast,southafricanorth,southafricawest' }}\""
run: |
echo "FORK=${{ inputs.pd-fork || env.DEFAULT_FORK }}" >> $GITHUB_ENV
echo "BRANCH=${{ inputs.pd-branch || env.DEFAULT_BRANCH }}" >> $GITHUB_ENV
echo "NPM_TARGET=${{ inputs.npm-target || env.DEFAULT_NPM_TARGET }}" >> $GITHUB_ENV
echo "ENV_VARS=${{ inputs.pd-env-vars || env.DEFAULT_ENV_VARS }}" >> $GITHUB_ENV
echo "PODMAN_URL=${{ inputs.podman-download-url || env.DEFAULT_URL }}" >> $GITHUB_ENV
echo "PDE2E_IMAGE_VERSION=${{ inputs.pde2e-image-version || env.DEFAULT_PDE2E_IMAGE_VERSION }}" >> $GITHUB_ENV
if [[ -z "${{ inputs.trigger-workflow-repo-name }}" ]] && [[ -z "${{ inputs.trigger-workflow-fork }}" ]] && [[ -z "${{ inputs.trigger-workflow-branch }}" ]]; then
echo "DEFAULT_EXT_REPO_OPTIONS=REPO=${{ inputs.trigger-workflow-repo-name }},FORK=${{ inputs.trigger-workflow-fork }},BRANCH=${{ inputs.trigger-workflow-branch }}" >> $GITHUB_ENV
fi
echo "${{ github.event.inputs.ext_tests_options || env.DEFAULT_EXT_TESTS_OPTIONS }}" | awk -F ',' \
'{for (i=1; i<=NF; i++) {split($i, kv, "="); print kv[1]"="kv[2]}}' >> $GITHUB_ENV
echo "${{ env.DEFAULT_PODMAN_OPTIONS }}" | awk -F ',' \
'{for (i=1; i<=NF; i++) {split($i, kv, "="); print "PODMAN_"kv[1]"="kv[2]}}' >> $GITHUB_ENV
echo "${{ inputs.podman-options || env.DEFAULT_EXT_REPO_OPTIONS }}" | awk -F ',' \
'{for (i=1; i<=NF; i++) {split($i, kv, "="); print "EXT_"kv[1]"="kv[2]}}' >> $GITHUB_ENV
echo "${{ github.event.inputs.mapt_params || env.DEFAULT_MAPT_PARAMS }}" | awk -F ';' \
'{for (i=1; i<=NF; i++) {split($i, kv, "="); print "MAPT_"kv[1]"="kv[2]}}' >> $GITHUB_ENV
- name: Create instance
run: |
# Create instance
podman run -d --name windows-create --rm \
-v ${PWD}:/workspace:z \
-e ARM_TENANT_ID=${{ secrets.ARM_TENANT_ID }} \
-e ARM_SUBSCRIPTION_ID=${{ secrets.ARM_SUBSCRIPTION_ID }} \
-e ARM_CLIENT_ID=${{ secrets.ARM_CLIENT_ID }} \
-e ARM_CLIENT_SECRET='${{ secrets.ARM_CLIENT_SECRET }}' \
--user 0 \
${{ env.MAPT_IMAGE }}:${{ env.MAPT_VERSION_TAG }} azure \
windows create \
--project-name 'windows-desktop' \
--backed-url 'file:///workspace' \
--conn-details-output '/workspace' \
--windows-version '${{ matrix.windows-version }}' \
--windows-featurepack '${{ matrix.windows-featurepack }}' \
--cpus ${{ env.MAPT_CPUS }} \
--memory ${{ env.MAPT_MEMORY }} \
--nested-virt \
--tags project=podman-desktop \
--spot-excluded-regions ${{ env.MAPT_EXCLUDED_REGIONS }} \
--spot
# Check logs
podman logs -f windows-create
- name: Check instance system info
run: |
ssh -i id_rsa \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
-o ServerAliveInterval=30 \
-o ServerAliveCountMax=1200 \
$(cat username)@$(cat host) "systeminfo"
- name: Emulate X session
run: |
# use fake rdp to emulate an active x session
podman run -d --name x-session \
-e RDP_HOST=$(cat host) \
-e RDP_USER=$(cat username) \
-e RDP_PASSWORD=$(cat userpassword) \
quay.io/rhqp/frdp:v0.0.1
# Wait until the x session has been created
podman wait --condition running x-session
# Check logs for the x session
podman logs x-session
- name: Download Podman, do not initialize
run: |
podman run --rm -d --name pde2e-podman-run \
-e TARGET_HOST=$(cat host) \
-e TARGET_HOST_USERNAME=$(cat username) \
-e TARGET_HOST_KEY_PATH=/data/id_rsa \
-e TARGET_FOLDER=pd-e2e \
-e TARGET_CLEANUP=false \
-e TARGET_RESULTS=results \
-e OUTPUT_FOLDER=/data \
-e DEBUG=true \
-v $PWD:/data:z \
quay.io/odockal/pde2e-podman:${{ env.PDE2E_IMAGE_VERSION }} \
pd-e2e/podman.ps1 \
-downloadUrl ${{ env.PODMAN_URL }} \
-targetFolder pd-e2e \
-resultsFolder results \
-initialize 0 \
-rootful 0 \
-start 0 \
-installWSL 0
# check logs
podman logs -f pde2e-podman-run
- name: Build Podman Desktop Electron Inspect Enabled binary
run: |
podman run --rm -d --name pde2e-builder-run \
-e TARGET_HOST=$(cat host) \
-e TARGET_HOST_USERNAME=$(cat username) \
-e TARGET_HOST_KEY_PATH=/data/id_rsa \
-e TARGET_FOLDER=pd-e2e \
-e TARGET_CLEANUP=false \
-e TARGET_RESULTS=results \
-e OUTPUT_FOLDER=/data \
-e DEBUG=true \
-v $PWD:/data:z \
quay.io/odockal/pde2e-builder:${{ env.PDE2E_IMAGE_VERSION }} \
pd-e2e/builder.ps1 \
-targetFolder pd-e2e \
-resultsFolder results \
-fork ${{ env.FORK }} \
-branch ${{ env.BRANCH }} \
-envVars ${{ env.ENV_VARS }}
# check logs
podman logs -f pde2e-builder-run
- name: Run Podman Desktop Playwright E2E tests
run: |
podman run -d --name pde2e-runner-run \
-e TARGET_HOST=$(cat host) \
-e TARGET_HOST_USERNAME=$(cat username) \
-e TARGET_HOST_KEY_PATH=/data/id_rsa \
-e TARGET_FOLDER=pd-e2e \
-e TARGET_RESULTS=results \
-e OUTPUT_FOLDER=/data \
-e DEBUG=true \
-v $PWD:/data:z \
quay.io/odockal/pde2e-runner:${{ env.PDE2E_IMAGE_VERSION }} \
pd-e2e/runner.ps1 \
-targetFolder pd-e2e \
-resultsFolder results \
-podmanPath $(cat results/podman-location.log) \
-pdPath "$(cat results/pde2e-binary-path.log | tr '\n' " ")" \
-fork ${{ env.FORK }} \
-branch ${{ env.BRANCH }} \
-extRepo ${{ env.EXT_REPO }} \
-extFork ${{ env.EXT_FORK }} \
-extBranch ${{ env.EXT_BRANCH }} \
-extTests ${{ env.EXT_RUN_TESTS_FROM_EXTENSION }} \
-npmTarget ${{ env.NPM_TARGET }} \
-initialize ${{ env.PODMAN_INIT }} \
-rootful ${{ env.PODMAN_ROOTFUL }} \
-start ${{ env.PODMAN_START }} \
-userNetworking ${{ env.PODMAN_NETWORKING }} \
-envVars ${{ env.ENV_VARS }} \
-runAsAdmin ${{ env.EXT_RUN_TESTS_AS_ADMIN }}
# check logs
podman logs -f pde2e-runner-run
- name: Publish Test Report
id: test-report
uses: mikepenz/action-junit-report@v6
if: always() # always run even if the previous step fails
with:
annotate_only: true
fail_on_failure: true
include_passed: true
detailed_summary: true
require_tests: true
report_paths: '**/*results.xml'
- name: Update status of the PR check
if: always()
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -xuo
# Status msg
data="{\"state\":\"success\""
if [[ ${{ steps.test-report.outcome }} != "success" ]]; then
data="{\"state\":\"failure\""
fi
data="${data},\"description\":\"Finished recipe tests on catalog change on Windows ${{ matrix.windows-version }}-${{ matrix.windows-featurepack }}\""
data="${data},\"context\":\"${{ env.status_context }}\""
data="${data},\"target_url\":\"https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\"}"
# Create status by API call
curl -L -v -X POST \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ env.GH_TOKEN }}" \
https://api.github.com/repos/${{ inputs.trigger-workflow-base-repo }}/statuses/${{ inputs.trigger-workflow-commit-sha }} \
-d "${data}"
- name: Destroy instance
if: always()
run: |
# Destroy instance
podman run -d --name windows-destroy --rm \
-v ${PWD}:/workspace:z \
-e ARM_TENANT_ID=${{ secrets.ARM_TENANT_ID }} \
-e ARM_SUBSCRIPTION_ID=${{ secrets.ARM_SUBSCRIPTION_ID }} \
-e ARM_CLIENT_ID=${{ secrets.ARM_CLIENT_ID }} \
-e ARM_CLIENT_SECRET='${{ secrets.ARM_CLIENT_SECRET }}' \
--user 0 \
${{ env.MAPT_IMAGE }}:${{ env.MAPT_VERSION_TAG }} azure \
windows destroy \
--project-name 'windows-desktop' \
--backed-url 'file:///workspace'
# Check logs
podman logs -f windows-destroy
- name: Upload test artifacts
uses: actions/upload-artifact@v7
if: always()
with:
name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}
path: |
results/*
!./**/*.gguf
!./**/*.bin
!./**/output/videos/*
!./**/output/traces/*
- name: Upload test videos
uses: actions/upload-artifact@v7
if: always()
with:
name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}-videos
path: ./**/output/videos/*
- name: Upload test traces
uses: actions/upload-artifact@v7
if: always()
with:
name: results-e2e-${{ matrix.windows-version }}${{ matrix.windows-featurepack }}-traces
path: ./**/output/traces/*
================================================
FILE: .github/workflows/recipe-catalog-change-trigger.yaml
================================================
name: recipe-catalog-change-windows-trigger
on:
workflow_run:
workflows: ["pr-check"]
types:
- completed
jobs:
extract-context:
runs-on: ubuntu-24.04
if: ${{ github.event.workflow_run.conclusion == 'success' }}
outputs:
workflow-run-id: ${{ steps.parse-event.outputs.workflow-run-id }}
fork-owner: ${{ steps.parse-event.outputs.fork-owner }}
fork-repo: ${{ steps.parse-event.outputs.fork-repo }}
fork-branch: ${{ steps.parse-event.outputs.fork-branch }}
commit-sha: ${{ steps.parse-event.outputs.commit-sha }}
base-repo: ${{ steps.parse-event.outputs.base-repo }}
changes-detected: ${{ steps.parse-event.outputs.changes-detected }}
steps:
- name: Parse event data
id: parse-event
env:
WORKFLOW_RUN: ${{ toJson(github.event.workflow_run) }}
GH_TOKEN: ${{ github.token }}
run: |
echo "Workflow run ID: ${{ fromJson(env.WORKFLOW_RUN).id }}"
echo "workflow-run-id=${{ fromJson(env.WORKFLOW_RUN).id }}" >> $GITHUB_OUTPUT
echo "Fork owner: ${{ fromJson(env.WORKFLOW_RUN).head_repository.owner.login }}"
echo "fork-owner=${{ fromJson(env.WORKFLOW_RUN).head_repository.owner.login }}" >> $GITHUB_OUTPUT
echo "Fork repo: ${{ fromJson(env.WORKFLOW_RUN).head_repository.name }}"
echo "fork-repo=${{ fromJson(env.WORKFLOW_RUN).head_repository.name }}" >> $GITHUB_OUTPUT
echo "Fork branch: ${{ fromJson(env.WORKFLOW_RUN).head_branch }}"
echo "fork-branch=${{ fromJson(env.WORKFLOW_RUN).head_branch }}" >> $GITHUB_OUTPUT
echo "Commit SHA: ${{ fromJson(env.WORKFLOW_RUN).head_sha }}"
echo "commit-sha=${{ fromJson(env.WORKFLOW_RUN).head_sha }}" >> $GITHUB_OUTPUT
echo "Base repo: ${{ fromJson(env.WORKFLOW_RUN).repository.full_name }}"
echo "base-repo=${{ fromJson(env.WORKFLOW_RUN).repository.full_name }}" >> $GITHUB_OUTPUT
git clone "https://www.github.com/${{ fromJson(env.WORKFLOW_RUN).repository.full_name }}" "${{ fromJson(env.WORKFLOW_RUN).repository.name }}" --depth 1
cd "${{ fromJson(env.WORKFLOW_RUN).repository.name }}"
git remote add upstream "https://www.github.com/${{ fromJson(env.WORKFLOW_RUN).head_repository.full_name }}"
git fetch upstream
git diff --name-only upstream/${{ fromJson(env.WORKFLOW_RUN).head_branch }} HEAD > changes.txt
if grep -qe 'packages/backend/src/assets/ai.json' changes.txt; then
echo "Changes detected in ai.json"
echo "changes-detected=true" >> $GITHUB_OUTPUT
else
echo "No changes detected in ai.json"
echo "changes-detected=false" >> $GITHUB_OUTPUT
fi
trigger-template:
needs: extract-context
uses: containers/podman-desktop-extension-ai-lab/.github/workflows/recipe-catalog-change-template.yaml@main
if: ${{ needs.extract-context.outputs.changes-detected == 'true' }}
strategy:
fail-fast: false
with:
trigger-workflow-run-id: ${{ needs.extract-context.outputs.workflow-run-id }}
trigger-workflow-fork: ${{ needs.extract-context.outputs.fork-owner }}
trigger-workflow-repo-name: ${{ needs.extract-context.outputs.fork-repo }}
trigger-workflow-branch: ${{ needs.extract-context.outputs.fork-branch }}
trigger-workflow-commit-sha: ${{ needs.extract-context.outputs.commit-sha }}
trigger-workflow-base-repo: ${{ needs.extract-context.outputs.base-repo }}
ext_tests_options: 'EXT_RUN_TESTS_FROM_EXTENSION=1,EXT_RUN_TESTS_AS_ADMIN=0,EXT_TEST_GPU_SUPPORT_ENABLED=0'
secrets: inherit
================================================
FILE: .github/workflows/release.yaml
================================================
#
# Copyright (C) 2024-2025 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
name: release
on:
workflow_dispatch:
inputs:
version:
description: 'Version to release'
required: true
branch:
description: 'Branch to use for the release'
required: true
default: main
env:
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
jobs:
tag:
name: Tagging
runs-on: ubuntu-24.04
outputs:
githubTag: ${{ steps.TAG_UTIL.outputs.githubTag}}
extVersion: ${{ steps.TAG_UTIL.outputs.extVersion}}
releaseId: ${{ steps.create_release.outputs.id}}
steps:
- uses: actions/checkout@v6.0.2
with:
ref: ${{ github.event.inputs.branch }}
- name: Generate tag utilities
id: TAG_UTIL
run: |
TAG_PATTERN=${{ github.event.inputs.version }}
echo "githubTag=v$TAG_PATTERN" >> ${GITHUB_OUTPUT}
echo "extVersion=$TAG_PATTERN" >> ${GITHUB_OUTPUT}
- name: tag
run: |
git config --local user.name ${{ github.actor }}
# Add the new version in package.json file
sed -i "s#version\":\ \"\(.*\)\",#version\":\ \"${{ steps.TAG_UTIL.outputs.extVersion }}\",#g" package.json
sed -i "s#version\":\ \"\(.*\)\",#version\":\ \"${{ steps.TAG_UTIL.outputs.extVersion }}\",#g" packages/backend/package.json
sed -i "s#version\":\ \"\(.*\)\",#version\":\ \"${{ steps.TAG_UTIL.outputs.extVersion }}\",#g" packages/frontend/package.json
sed -i "s#version\":\ \"\(.*\)\",#version\":\ \"${{ steps.TAG_UTIL.outputs.extVersion }}\",#g" tests/playwright/package.json
git add package.json
git add packages/backend/package.json
git add packages/frontend/package.json
git add tests/playwright/package.json
# commit the changes
git commit -m "chore: 🥁 tagging ${{ steps.TAG_UTIL.outputs.githubTag }} 🥳"
echo "Tagging with ${{ steps.TAG_UTIL.outputs.githubTag }}"
git tag ${{ steps.TAG_UTIL.outputs.githubTag }}
git push origin ${{ steps.TAG_UTIL.outputs.githubTag }}
- name: Create Release
id: create_release
uses: ncipollo/release-action@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag: ${{ steps.TAG_UTIL.outputs.githubTag }}
name: ${{ steps.TAG_UTIL.outputs.githubTag }}
draft: true
prerelease: false
- name: Create the PR to bump the version in the main branch (only if we're tagging from main branch)
if: ${{ github.event.inputs.branch == 'main' }}
run: |
git config --local user.name ${{ github.actor }}
CURRENT_VERSION=$(echo "${{ steps.TAG_UTIL.outputs.extVersion }}")
tmp=${CURRENT_VERSION%.*}
minor=${tmp#*.}
bumpedVersion=${CURRENT_VERSION%%.*}.$((minor + 1)).0
bumpedBranchName="bump-to-${bumpedVersion}"
git checkout -b "${bumpedBranchName}"
sed -i "s#version\":\ \"\(.*\)\",#version\":\ \"${bumpedVersion}-next\",#g" package.json
sed -i "s#version\":\ \"\(.*\)\",#version\":\ \"${bumpedVersion}-next\",#g" packages/backend/package.json
sed -i "s#version\":\ \"\(.*\)\",#version\":\ \"${bumpedVersion}-next\",#g" packages/frontend/package.json
sed -i "s#version\":\ \"\(.*\)\",#version\":\ \"${bumpedVersion}-next\",#g" tests/playwright/package.json
git add package.json
git add packages/backend/package.json
git add packages/frontend/package.json
git add tests/playwright/package.json
git commit -s --amend -m "chore: bump version to ${bumpedVersion}"
git push origin "${bumpedBranchName}"
echo -e "📢 Bump version to ${bumpedVersion}\n\n${{ steps.TAG_UTIL.outputs.extVersion }} has been released.\n\n Time to switch to the new ${bumpedVersion} version 🥳" > /tmp/pr-title
pullRequestUrl=$(gh pr create --title "chore: 📢 Bump version to ${bumpedVersion}" --body-file /tmp/pr-title --head "${bumpedBranchName}" --base "main")
echo "📢 Pull request created: ${pullRequestUrl}"
echo "➡️ Flag the PR as being ready for review"
gh pr ready "${pullRequestUrl}"
echo "🔅 Mark the PR as being ok to be merged automatically"
gh pr merge "${pullRequestUrl}" --auto --rebase
git checkout ${{ steps.TAG_UTIL.outputs.githubTag }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
build:
needs: [tag]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6.0.2
with:
ref: ${{ needs.tag.outputs.githubTag }}
- uses: pnpm/action-setup@v5
name: Install pnpm
with:
run_install: false
- uses: actions/setup-node@v6
with:
node-version: 24
cache: 'pnpm'
- name: Execute yarn
run: pnpm install
- name: Run Build
run: pnpm build
- name: Login to ghcr.io
run: podman login --username ${{ github.repository_owner }} --password ${{ secrets.GITHUB_TOKEN }} ghcr.io
- name: Build Image
id: build-image
run: |
podman build -t ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab:${{ needs.tag.outputs.extVersion }} .
podman push ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab:${{ needs.tag.outputs.extVersion }}
podman tag ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab:${{ needs.tag.outputs.extVersion }} ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab:latest
podman push ghcr.io/${{ github.repository_owner }}/podman-desktop-extension-ai-lab:latest
release:
needs: [tag, build]
name: Release
runs-on: ubuntu-24.04
steps:
- name: id
run: echo the release id is ${{ needs.tag.outputs.releaseId}}
- name: Publish release
uses: StuYarrow/publish-release@v1.1.2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
id: ${{ needs.tag.outputs.releaseId}}
================================================
FILE: .github/workflows/update-ramalama-references.sh
================================================
#!/usr/bin/env bash
#
# Copyright (C) 2025 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# Script to update ramalama image references in inference-images.json
set -euo pipefail
JSON_PATH="packages/backend/src/assets/inference-images.json"
TMP_JSON="${JSON_PATH}.tmp"
TAG=$1
# Images and their keys in the JSON
IMAGES=(
"whispercpp:ramalama/ramalama-whisper-server:default"
"llamacpp:ramalama/ramalama-llama-server:default"
"llamacpp:ramalama/cuda-llama-server:cuda"
"openvino:ramalama/openvino:default"
)
cp "$JSON_PATH" "$TMP_JSON"
for entry in "${IMAGES[@]}"; do
IFS=":" read -r key image jsonkey <<< "$entry"
digest=$(curl -s "https://quay.io/v2/$image/manifests/$TAG" -H 'Accept: application/vnd.oci.image.index.v1+json' --head | grep -i Docker-Content-Digest | awk -e '{ print $2 }' | tr -d '\r')
# Update the JSON file with the new digest
jq --arg img "quay.io/$image" --arg dig "$digest" --arg key "$key" --arg jsonkey "$jsonkey" \
'(.[$key][$jsonkey]) = ($img + "@" + $dig)' \
"$TMP_JSON" > "$TMP_JSON.new" && mv "$TMP_JSON.new" "$TMP_JSON"
done
# Compare and update if changed
if cmp -s "$JSON_PATH" "$TMP_JSON"; then
echo "No update needed: digests are up to date."
rm "$TMP_JSON"
exit 0
else
mv "$TMP_JSON" "$JSON_PATH"
echo "Updated inference-images.json with latest digests."
exit 10
fi
================================================
FILE: .github/workflows/update-ramalama-references.yaml
================================================
#
# Copyright (C) 2025 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# This workflow automatically updates ramalama image digests in inference-images.json
# and creates a pull request with the changes.
name: update-ramalama-references
on:
schedule:
- cron: '0 3 * * *' # Runs daily at 03:00 UTC
workflow_dispatch:
permissions:
contents: write
jobs:
update-references:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@0c366fd6a839edf440554fa01a7085ccba70ac98 # v5.0.1
- name: Get latest ramalama version
id: get_ramalama_version
run: |
RAMALAMA_VERSION=$(curl -s https://quay.io/v2/ramalama/ramalama-llama-server/tags/list -s | jq .tags[] | grep -E '^"[0-9]+\.[0-9]+\.[0-9]+"$' | sort -V | tail -n 1 | tr -d '"')
echo "RAMALAMA_VERSION=${RAMALAMA_VERSION}" >> $GITHUB_OUTPUT
- name: Check if PR already exists
id: pr_exists
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const branch = `update-ramalama-references-${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}`;
const { data: pulls } = await github.rest.pulls.list({
owner: context.repo.owner,
repo: context.repo.repo,
head: `${context.repo.owner}:${branch}`,
state: 'open',
});
if (pulls.length > 0) {
core.setOutput('exists', 'true');
} else {
core.setOutput('exists', 'false');
}
- name: Update ramalama image references in inference-images.json
id: update_digests
if: steps.pr_exists.outputs.exists == 'false'
run: |
bash .github/workflows/update-ramalama-references.sh "${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}"
continue-on-error: true
- name: Commit changes
if: steps.pr_exists.outputs.exists == 'false' && steps.update_digests.outcome == 'failure'
run: |
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
git checkout -b "update-ramalama-references-${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}"
git add packages/backend/src/assets/inference-images.json
git commit -m "chore: update ramalama image references ${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}"
git push origin "update-ramalama-references-${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}"
- name: Create Pull Request
if: steps.pr_exists.outputs.exists == 'false' && steps.update_digests.outcome == 'failure'
run: |
echo -e "update ramalama image references to ${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}" > /tmp/pr-title
pullRequestUrl=$(gh pr create --title "chore: update ramalama image references to ${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}" --body-file /tmp/pr-title --head "update-ramalama-references-${{ steps.get_ramalama_version.outputs.RAMALAMA_VERSION }}" --base "main")
echo "📢 Pull request created: ${pullRequestUrl}"
echo "➡️ Flag the PR as being ready for review"
gh pr ready "${pullRequestUrl}"
env:
GITHUB_TOKEN: ${{ secrets.PODMAN_DESKTOP_BOT_TOKEN }}
================================================
FILE: .gitignore
================================================
node_modules
.DS_Store
dist
.eslintcache
**/coverage
.idea
output
================================================
FILE: .husky/commit-msg
================================================
#!/bin/sh
#
# Copyright (C) 2024 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
set -u
# avoid [[ which is not POSIX sh.
if test "$#" != 1; then
echo "$0 requires an argument."
exit 1
fi
if test ! -f "$1"; then
echo "file does not exist: $1"
exit 1
fi
pnpm commitlint --edit "$1"
SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p')
grep -qs "^$SOB" "$1" || echo "$SOB" >>"$1"
# Catches duplicate Signed-off-by lines.
test "" = "$(grep '^Signed-off-by: ' "$1" |
sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || {
echo >&2 Duplicate Signed-off-by lines.
exit 1
}
================================================
FILE: .husky/pre-commit
================================================
#
# Copyright (C) 2024 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
pnpm lint-staged
================================================
FILE: .npmrc
================================================
node-linker=hoisted
================================================
FILE: .prettierrc
================================================
{
"svelteSortOrder" : "options-styles-scripts-markup",
"svelteStrictMode": true,
"svelteAllowShorthand": false,
"svelteIndentScriptAndStyle": false,
"bracketSameLine": true,
"singleQuote": true,
"arrowParens": "avoid",
"printWidth": 120,
"trailingComma": "all",
"plugins": ["prettier-plugin-svelte"]
}
================================================
FILE: .vscode/settings.json
================================================
{
"typescript.preferences.importModuleSpecifier": "non-relative"
}
================================================
FILE: CODE-OF-CONDUCT.md
================================================
Podman Desktop Extension AI Lab Project Community Code of Conduct
The Podman Desktop Extension AI Lab Project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
================================================
FILE: Containerfile
================================================
#
# Copyright (C) 2024 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
FROM scratch as builder
COPY packages/backend/dist/ /extension/dist
COPY packages/backend/package.json /extension/
COPY packages/backend/media/ /extension/media
COPY LICENSE /extension/
COPY packages/backend/icon.png /extension/
COPY packages/backend/brain.woff2 /extension/
COPY README.md /extension/
COPY api/openapi.yaml /extension/api/
FROM scratch
LABEL org.opencontainers.image.title="AI Lab" \
org.opencontainers.image.description="AI Lab" \
org.opencontainers.image.vendor="Red Hat" \
io.podman-desktop.api.version=">= 1.8.0"
COPY --from=builder /extension /extension
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: MIGRATION.md
================================================
# Migration guide
## ℹ️ ApplicationCatalog
Before **Podman AI Lab** `v1.2.0` the [user-catalog](./PACKAGING-GUIDE.md#applicationcatalog) was not versioned.
Starting from `v1.2.0` the user-catalog require to have a `version` property.
> [!NOTE]
> The `user-catalog.json` file can be found in `~/.local/share/containers/podman-desktop/extensions-storage/redhat.ai-lab`.
The list of catalog versions can be found in [packages/backend/src/utils/catalogUtils.ts](https://github.com/containers/podman-desktop-extension-ai-lab/blob/main/packages/backend/src/utils/catalogUtils.ts)
The catalog has its own version number, as we may not require to update it with every update. It will follow semantic versioning convention.
## `None` to Catalog `1.0`
`None` represents any catalog version prior to the first versioning.
Version `1.0` of the catalog adds an important property to models `backend`, defining the type of framework required by the model to run (E.g. LLamaCPP, WhisperCPP).
### 🛠️ How to migrate
You can either delete any existing `user-catalog` by deleting the `~/.local/share/containers/podman-desktop/extensions-storage/redhat.ai-lab/user-catalog.json`.
> [!WARNING]
> This will remove the models you have imported from the catalog. You will be able to import it again afterward.
If you want to keep the data, you can migrate it by updating certain properties within the recipes and models fields.
### Recipes
The recipe object has a new property `backend` which defines which framework is required.
Value accepted are `llama-cpp`, `whisper-cpp` and `none`.
Moreover, the `models` property has been changed to `recommended`.
> [!TIP]
> Before Podman AI Lab version v1.2 recipes uses the `models` property to list the models compatible. Now all models using the same `backend` could be used. We introduced `recommended` to highlight certain models.
**Example**
```diff
{
"version": "1.0",
"recipes": [{
"id": "chatbot",
"description" : "This is a Streamlit chat demo application.",
"name" : "ChatBot",
"repository": "https://github.com/containers/ai-lab-recipes",
- "models": [
+ "recommended": [
"hf.instructlab.granite-7b-lab-GGUF",
"hf.instructlab.merlinite-7b-lab-GGUF"
]
+ "backend": "llama-cpp"
}],
"models": [],
"categories": []
}
```
### Models
The model object has also the new property `backend`, which defines which framework is required.
Additionally, we have enhanced security by introducing a new optional `sha256` property.
> [!TIP]
> To get the sha256 of a model, you can use the `sha256sum [model-file]` command in a terminal.
**Example**
```diff
{
"version": "1.0",
"recipes": [],
"models": [{
"id": "hf.instructlab.granite-7b-lab-GGUF",
"name": "instructlab/granite-7b-lab-GGUF",
"description": "# InstructLab Granite 7B",
"hw": "CPU",
"registry": "Hugging Face",
"license": "Apache-2.0",
"url": "https://huggingface.co/instructlab/granite-7b-lab-GGUF/resolve/main/granite-7b-lab-Q4_K_M.gguf",
"memory": 4080218931,
"properties": {
"chatFormat": "openchat"
},
+ "sha256": "6adeaad8c048b35ea54562c55e454cc32c63118a32c7b8152cf706b290611487",
+ "backend": "llama-cpp"
}],
"categories": []
}
```
================================================
FILE: PACKAGING-GUIDE.md
================================================
# Packaging guide
## ApplicationCatalog
AI Lab uses an internal catalog embedded within the application. This catalog is loaded
by AI Lab and displayed when you access the catalog page.
The format of the catalog is JSON. It is possible for users to have a custom version of
the catalog. In order to do so, copy the file located at https://github.com/containers/podman-desktop-extension-ai-lab/blob/main/packages/backend/src/assets/ai.json to $HOME/podman-desktop/ai-lab/catalog.json and AI Lab will use it instead of the embedded one.
Any change done to this file will also be automatically loaded by AI Lab.
### Format of the catalog file
The catalog file has three main elements: categories, models and recipes. Each of these elements is
represented in the JSON file as an array.
The catalog is `versioned`. Current version can be found in [ai.json](https://github.com/containers/podman-desktop-extension-ai-lab/blob/main/packages/backend/src/assets/ai.json#L2).
> :warning: when the version of the catalog is undefined or different from the current, the user-catalog will be ignored.
#### Categories
This is the top level construct of the catalog UI. Recipes are grouped into categories. A category
represents the kind of AI application. Although the list of categories provided by default by
AI Lab represents the AI landscape, it is possible to add new categories.
A category has three main attributes: an id (which should be unique among categories), a description
and a name. The category id attribute will then be used to attach a recipe to one or several categories.
#### Models
The catalog also lists the models that may be associated to recipes. A model is also a first class
citizen in AI Lab as they will be listed in the Models page and can be tested through the playground.
A model has the following attributes:
- ```id```: a unique identifier for the model
- ```name```: the model name
- ```description```: a detailed description about the model
- ```registry```: the model registry where the model is stored
- ```popularity```: an integer field giving the rating of the model. Can be thought as the number of stars
- ```license```: the license under which the model is available
- ```url```: the URL used to download the model
- ```memory```: the memory footprint of the model in bytes, as computed by the workflow `.github/workflows/compute-model-sizes.yaml`
- ```sha256```: the SHA-256 checksum to be used to verify the downloaded model is identical to the original. It is optional and it must be HEX encoded
#### Recipes
A recipe is a sample AI application that is packaged as one or several containers. It is built by AI Lab when the user chooses to download and run it on their workstation. It is provided as
source code and AI Lab will make sure the container images are built prior to launching the containers.
A recipe has the following attributes:
- ```id```: a unique identifier to the recipe
- ```name```: the recipe name
- ```description```: a detailed description about the recipe
- ```repository```: the URL where the recipe code can be retrieved
- ```ref```: an optional ref in the repository to checkout (a branch name, tag name, or commit full id - short commit id won't be recognized). If not defined, the default branch will be used
- ```categories```: an array of category id to be associated by this recipe
- ```basedir```: an optional path within the repository where the ai-lab.yaml file is located. If not provided, the ai-lab.yaml is assumed to be located at the root the repository
- ```readme```: a markdown description of the recipe
- ```models```: an array of model id to be associated with this recipe
#### Recipe configuration file
The configuration file is called ```ai-lab.yaml``` and follows the following syntax.
The root elements are called ```version``` and ```application```.
```version``` represents the version of the specifications that ai-lab adheres to (so far, the only accepted value here is `v1.0`).
```application``` contains an attribute called ```containers``` whose syntax is an array of objects containing the following attributes:
- ```name```: the name of the container
- ```contextdir```: the context directory used to build the container.
- ```containerfile```: the containerfile used to build the image
- ```model-service```: a boolean flag used to indicate if the container is running the model or not
- ```arch```: an optional array of architecture for which this image is compatible with. The values follow the
[GOARCH specification](https://go.dev/src/go/build/syslist.go)
- ```gpu-env```: an optional array of GPU environment for which this image is compatible with. The only accepted value here is cuda.
- ```ports```: an optional array of ports for which the application listens to.
- `image`: an optional image name to be used when building the container image.
The container that is running the service (having the ```model-service``` flag equal to ```true```) can use at runtime
the model managed by AI Lab through an environment variable ```MODEL_PATH``` whose value is the full path name of the
model file.
Below is given an example of such a configuration file:
```yaml
application:
containers:
- name: chatbot-inference-app
contextdir: ai_applications
containerfile: builds/Containerfile
- name: chatbot-model-service
contextdir: model_services
containerfile: base/Containerfile
model-service: true
arch:
- arm64
- amd64
ports:
- 8001
image: quay.io/redhat-et/chatbot-model-service:latest
- name: chatbot-model-servicecuda
contextdir: model_services
containerfile: cuda/Containerfile
model-service: true
gpu-env:
- cuda
arch:
- amd64
ports:
- 8501
image: quay.io/redhat-et/model_services:latest
```
================================================
FILE: README.md
================================================
# Podman AI Lab
Podman AI Lab is an open source extension for Podman Desktop to work with LLMs (Large Language Models) on a local environment. Featuring a recipe catalog with common AI use cases, a curated set of open source models, and a playground for learning, prototyping and experimentation, Podman AI Lab helps you to quickly and easily get started bringing AI into your applications, without depending on infrastructure beyond your laptop ensuring data privacy and security.
## Topics
- [Technology](#technology)
- [Extension features](#extension-features)
- [Requirements](#requirements)
- [Installation](#installation)
- [Usage](#usage)
- [Contributing](#contributing)
- [Feedback](#feedback)
## Technology
Podman AI Lab uses [Podman](https://podman.io) machines to run inference servers for LLM models and AI applications.
The AI models can be downloaded, and common formats like [GGUF](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md), [Pytorch](https://pytorch.org) or [Tensorflow](https://www.tensorflow.org) are supported.
## Extension features
### AI models
Podman AI Lab provides a curated list of open source AI models and LLMs. Once downloaded, the models are available to be used for AI applications, model services and playgrounds.
#### Model services
Once a model is downloaded, a model service can be started. A model service is an inference server that is running in a container and exposing the model through the well-known chat API common to many providers.
#### Playgrounds
The integrated Playground environments allow for experimenting with available models in a local environment. An intuitive user prompt helps in exploring the capabilities and accuracy of various models and aids in finding the best model for the use case at hand. The Playground interface further allows for parameterizing models to further optimize the settings and attributes of each model.
### AI applications
Once an AI model is available through a well-known endpoint, it's easy to imagine a new world of applications that will connect and use the AI model. Podman AI Lab supports AI applications as a set of containers that are connected together.
Podman AI Lab ships with a so-called Recipes Catalog that helps you navigate a number of core AI use cases and problem domains such as Chat Bots, Code Generators and Text Summarizers. Each recipe comes with detailed explanations and sample applications that can be run with various large language models (LLMs). Experimenting with multiple models allows finding the optimal one for your use case.
## Requirements
### Software
- [Podman Desktop 1.8.0+](https://github.com/containers/podman-desktop)
- [Podman 4.9.0+](https://github.com/containers/podman)
- Compatible with Windows, macOS & Linux
### Hardware
LLMs AI models are heavy resource consumers both in terms of memory and CPU. Each of the provided models consumes about 4GiB of memory and requires at least 4 CPUs to run.
We recommend a minimum of 12GB of memory and at least 4 CPUs for the Podman machine. On Windows, the podman machine shares memory and CPU with all the Windows Subsystem for Linux (WSL) machines. By default, WSL is set to 50% of total memory and all logical processors. This can be changed in the WSL Settings (See [WSL Config](https://learn.microsoft.com/en-us/windows/wsl/wsl-config#wslconfig)).
As an additional recommended practice, do not run more than 3 models simultaneously.
## Installation
You can install the Podman AI Lab extension directly inside Podman Desktop.
Go to Extensions > Catalog > Install Podman AI Lab.

To install a development version, use the `Install custom...` action as shown in the recording below.
The name of the image to use is `ghcr.io/containers/podman-desktop-extension-ai-lab`. You can get released tags for the image at https://github.com/containers/podman-desktop-extension-ai-lab/pkgs/container/podman-desktop-extension-ai-lab.

## Usage
1. **Download a model**
Let's select a model from the catalog and download it locally to our workstation.

2. **Start an inference server**
Once a model is available locally, let's start an inference server

3. **Start a playground to have a chat conversation with model**

4. **Start an AI application and use it from the browser**

## Contributing
Want to help develop and contribute to Podman AI Lab?
You can use `pnpm watch --extension-folder` from the Podman Desktop directory to automatically rebuild and test the AI Lab extension:
> **_Note_**: make sure you have the appropriate [prerequisites](https://github.com/containers/podman-desktop/blob/main/CONTRIBUTING.md#prerequisites-prepare-your-environment)
installed.
```sh
git clone https://github.com/containers/podman-desktop
git clone https://github.com/containers/podman-desktop-extension-ai-lab
cd podman-desktop-extension-ai-lab
corepack enable pnpm
pnpm install
pnpm build
cd ../podman-desktop
pnpm watch --extension-folder ../podman-desktop-extension-ai-lab/packages/backend
```
If you are live editing the frontend package, from packages/frontend folder:
```
$ pnpm watch
```
### Cleaning up resources
We'll be adding a way to let a user cleanup their environment: see issue https://github.com/containers/podman-desktop-extension-ai-lab/issues/469.
For the time being, please consider the following actions:
1. Remove the extension from Podman Desktop, from the Settings > Extensions
2. Remove the running playground environments from the list of Pods
3. Remove the images built by the recipes
4. Remove the containers related to AI
5. Cleanup your local clone of the recipes: `$HOME/podman-desktop/ai-lab`
### 📖 Providing a custom catalog
The extension provides by default a curated list of recipes, models and categories. However, this system is extensible and you can define your own.
To enhance the existing catalog, you can create a file located in the extension storage folder `$HOME/.local/share/containers/podman-desktop/extensions-storage/redhat.ai-lab/user-catalog.json`.
It must follow the same format as the default catalog [in the sources of the extension](https://github.com/containers/podman-desktop-extension-ai-lab/blob/main/packages/backend/src/assets/ai.json).
> :information_source: The default behaviour is to append the items of the user's catalog to the default one.
> :warning: Each item (recipes, models or categories) has a unique id, when conflict between the default catalog and the user one are found, the user's items overwrite the defaults.
### Packaging sample applications
Sample applications may be added to the catalog. See [packaging guide](https://github.com/containers/podman-desktop-extension-ai-lab/blob/main/PACKAGING-GUIDE.md) for detailed information.
## Roadmap
The roadmap is always open and we are looking for your feedback. Please create new issues and upvote on the issues that are feeling the most important for you.
We will be working on the following items:
- **Expanded Recipes**: Discover new use cases and samples to inspire and accelerate your applications.
- **GPU Acceleration**: Speeding up processing times by leveraging GPU acceleration.
- **API/CLI**: Interact with Podman AI Lab from CLI and APIs.
- **Enhanced Playgrounds**: Streamlined workflows and UX giving a better space to experiment with LLMs and quickly iterate.
- **Fine Tuning with [InstructLab](https://instructlab.ai/)**: Re-train LLMs with a set of taxonomy knowledges. Learn more about [the InstructLab project](https://github.com/instructlab).
- **Enable Function Calling**: Use LLMs to retrieve or interact with external tools by doing API calls.
- **Local RAG**: Explore RAG pattern, load your document and test behavior of the model.
- **Bridge with AI Platforms (incl. K8s)**: Connect to remote models and ease deployment of applications.
## Feedback
You can provide your feedback on the extension with [this form](https://forms.gle/tctQ4RtZSiMyQr3R8) or create [an issue on this repository](https://github.com/containers/podman-desktop-extension-ai-lab/issues).
================================================
FILE: RELEASE.md
================================================
# Release process for Podman AI Lab
## Pre-requisites
- Create Enhancement Issue `Release vX.X.X` for current sprint, then update the label to `kind/release` and assign it to yourself.
- Confirm with Podman Desktop maintainers that pending / need-to-go-in PR's have been merged.
- Notify main contributors on Discord / Slack.
In the below example, we will pretend that we're upgrading from `1.1.0` to `1.2.0`. Please use the CORRECT release numbers as these are just example numbers.
## Release timeline
Below is what a typical release week may look like:
- **Monday (Notify):** 48-hour notification. Communicate to maintainers and public channels a release will be cut on Wednesday and to merge any pending PRs. Inform QE team. Start work on blog post as it is usually the longest part of the release process.
- **Tuesday (Staging, Testing & Blog):** Stage the release (see instructions below) to create a new cut of the release to test. Test the pre-release (master branch) build briefly. Get feedback from committers (if applicable). Push the blog post for review (as it usually takes a few back-and-forth reviews on documentation).
- **Wednesday (Release):** Publish the new release on the catalog using the below release process.
- **Thursday (Post-release Testing & Blog):** Test the post-release build briefly for any critical bugs. Confirm that new release has been pushed to the catalog. Push the blog post live. Get a known issues list together from QE and publish to the Podman Desktop Discussions, link to this from the release notes.
- **Friday (Communicate):** Friday is statistically the best day for new announcements. Post on internal channels. Post on reddit, hackernews, twitter, etc.
## Releasing on GitHub
1. Go to https://github.com/containers/podman-desktop-extension-ai-lab/actions/workflows/release.yaml
1. Click on the top right drop-down menu `Run workflow`
1. Enter the name of the release. Example: `1.2.0` (DO NOT use the v prefix like v1.2.0)
1. Specify the branch to use for the new release. It's main for all major releases. For a bugfix release, you'll select a different branch.
1. Click on the `Run workflow` button.
1. Note: `Run workflow` takes approximately 2-3 minutes.
1. Close the milestone for the respective release, make sure that all tasks within the milestone are completed / updated before closing. https://github.com/containers/podman-desktop-extension-ai-lab/milestones
1. If not already created, click on `New Milestone` and create a new milestone for the NEXT release.
1. Check that https://github.com/containers/podman-desktop-extension-ai-lab/actions/workflows/release.yaml has been completed.
1. There should be an automated PR that has been created. This will be automatically merged in after all tests have been ran (takes 5-10 minutes). The title looks like `chore: 📢 Bump version to 1.3.0`. Rerun workflow manually if some of e2e tests are failing.
1. Above PR MUST be merged before continuing with the steps.
1. Edit the new release https://github.com/containers/podman-desktop-extension-ai-lab/releases/edit/v1.2.0
1. Select previous tag (v1.1.0) and click on `Generate release notes` and the click on `Update release`
## Test release before it is rolling out.
The release is a pre-release, it means it is not yet the latest version, so no clients will automatically update to this version.
It allows QE (and everyone else) to test the release before they it will go live on the catalog.
## Next phase
- ❌ All severe bugs and regressions are investigated and discussed. If we agree any should block the release, need to fix the bugs and do a respin of the release with a new .z release like 1.2.1 instead of 1.2.0.
Create a branch if it does not exist. For example 1.2.x if 1.2.0 failed. Then, cherry-pick bugfixes in that branch.
- ✅ If committers agree we have a green light, proceed. **Do not forget to change the release from 'pre-release' to 'latest release' before proceeding**.
## Updating catalog
Pre-requisites:
- Ensure the release is OK (green workflow, image has been published https://github.com/containers/podman-desktop-extension-ai-lab/releases https://github.com/containers/podman-desktop-extension-ai-lab/pkgs/container/podman-desktop-extension-ai-lab).
#### Catalog
Create and submit a PR to the catalog (https://github.com/containers/podman-desktop-catalog on branch gh-pages). This is manual and will be automated in the future.
================================================
FILE: SECURITY.md
================================================
## Security and Disclosure Information Policy for the Podman Desktop Extension AI Lab Project
The Podman Desktop Extension AI Lab Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.
================================================
FILE: USAGE_DATA.md
================================================
# Data Collection
The AI Lab extension uses telemetry to collect anonymous usage data in order to identify issues and improve our user experience. You can read our privacy statement
[here](https://developers.redhat.com/article/tool-data-collection).
Telemetry for the extension is based on the Podman Desktop telemetry.
Users are prompted during Podman Desktop first startup to accept or decline telemetry. This setting can be
changed at any time in Settings > Preferences > Telemetry.
On disk the setting is stored in the `"telemetry.*"` keys within the settings file,
at `$HOME/.local/share/containers/podman-desktop/configuration/settings.json`. A generated anonymous id
is stored at `$HOME/.redhat/anonymousId`.
## What's included in the telemetry data
- General information, including operating system, machine architecture, and country.
- When the extension starts and stops.
- When the icon to enter the extension zone is clicked.
- When a recipe page is opened (with recipe Id and name).
- When a sample application is pulled (with recipe Id and name).
- When a playground is started or stopped (with model Id).
- When a request is sent to a model in the playground (with model Id, **without** request content).
- When a model is downloaded or deleted from disk.
No personally identifiable information is captured. An anonymous id is used so that we can correlate the actions of a user even if we can't tell who they are.
================================================
FILE: api/openapi.yaml
================================================
openapi: 3.0.0
info:
title: Podman Desktop AI Lab API
description: API for interacting with the Podman Desktop AI Lab service.
version: 0.0.1
servers:
- url: http://{host}:{port}
description: Podman Desktop AI Lab API server
variables:
host:
default: 127.0.0.1
port:
default: '10434'
tags:
- name: server
description: Server information
paths:
/api/version:
get:
operationId: getServerVersion
tags:
- server
description: Return the Podman Desktop AI Lab API server version
summary: Return the Podman Desktop AI Lab API server version
responses:
'200':
description: The Podman Desktop AI Lab API server version was successfully fetched
content:
application/json:
schema:
type: object
additionalProperties: false
properties:
version:
type: string
required:
- version
/api/tags:
get:
operationId: getModels
tags:
- models
description: List models that are available locally
summary: List models that are available locally
responses:
'200':
description: The models were successfully fetched
content:
application/json:
schema:
$ref: '#/components/schemas/ListResponse'
/api/pull:
post:
operationId: pullModel
tags:
- models
description: |
Download a model from the Podman AI Lab catalog.
summary: |
Download a model from the Podman AI Lab Catalog.
requestBody:
required: true
description: Request to pull a model
content:
application/json:
schema:
$ref: '#/components/schemas/PullRequest'
responses:
'200':
description: Model was successfully pulled
content:
application/x-ndjson:
schema:
$ref: '#/components/schemas/ProgressResponse'
/api/show:
post:
operationId: showModel
tags:
- models
description: |
Not implemented, returns an empty object - Show information about a model including details, modelfile, template,
parameters, license, and system prompt.
summary: |
Show information about a model including details, modelfile, template,
parameters, license, and system prompt.
requestBody:
required: true
description: Request to show a model
content:
application/json:
schema:
$ref: '#/components/schemas/ShowRequest'
responses:
'200':
description: The model's information was successfully fetched
content:
application/json:
schema:
$ref: '#/components/schemas/ShowResponse'
/api/generate:
post:
operationId: generateResponse
tags:
- generate
description: |
Generate a response for a given prompt with a provided model. This is
a streaming endpoint, so there will be a series of responses. The
final response object will include statistics and additional data from
the request.
summary: |
Generate a response for a given prompt with a provided model. This is
a streaming endpoint, so there will be a series of responses. The final
response object will include statistics and additional data from the
request.
requestBody:
required: true
description: Request to generate a response
content:
application/json:
schema:
$ref: '#/components/schemas/GenerateRequest'
responses:
'200':
description: A response was successfully generated for the prompt
content:
application/json:
schema:
$ref: '#/components/schemas/GenerateResponse'
/api/chat:
post:
operationId: generateChat
tags:
- chat
- generate
description: |
Generate the next message in a chat with a provided model. This is a
streaming endpoint, so there will be a series of responses. Streaming
can be disabled using "stream": false. The final response object will
include statistics and additional data from the request.
summary: |
Generate the next message in a chat with a provided model. This is a
streaming endpoint, so there will be a series of responses. Streaming
can be disabled using "stream": false. The final response object will
include statistics and additional data from the request.
requestBody:
required: true
description: Request to generate a response in a chat
content:
application/json:
schema:
$ref: '#/components/schemas/ChatRequest'
responses:
'200':
description: The next message was successfully generated for the chat
content:
application/json:
schema:
$ref: '#/components/schemas/ChatResponse'
/api/ps:
get:
operationId: getRunningModels
tags:
- models
description: List running models
summary: List running models
responses:
'200':
description: The list of running models was successfully fetched
content:
application/json:
schema:
$ref: '#/components/schemas/ProcessResponse'
components:
schemas:
ListResponse:
type: object
description: Response from a list request
properties:
models:
type: array
items:
$ref: '#/components/schemas/ListModelResponse'
ListModelResponse:
type: object
description: Response from a list request
properties:
name:
type: string
model:
type: string
modified_at:
type: string
format: date-time
size:
type: integer
digest:
type: string
details:
$ref: '#/components/schemas/ModelDetails'
ProcessResponse:
type: object
description: Response with a list of running models
properties:
models:
type: array
items:
$ref: '#/components/schemas/ProcessModelResponse'
ProcessModelResponse:
type: object
description: Running model description
properties:
name:
type: string
model:
type: string
size:
type: integer
digest:
type: string
details:
$ref: '#/components/schemas/ModelDetails'
expires_at:
type: string
format: date-time
size_vram:
type: integer
ModelDetails:
type: object
description: Details about a model
properties:
parent_model:
type: string
format:
type: string
family:
type: string
families:
type: array
items:
type: string
parameter_size:
type: string
quantization_level:
type: string
PullRequest:
type: object
description: Request to pull a model
properties:
model:
type: string
description: The name of the model to pull
example: instructlab/granite-7b-lab-GGUF
insecure:
type: boolean
description: |
allow insecure connections to the catalog.
stream:
type: boolean
description: |
If false the response will be returned as a single response object,
rather than a stream of objects
required:
- model
ProgressResponse:
type: object
description: The response returned from various streaming endpoints
properties:
status:
type: string
description: The status of the request
digest:
type: string
description: The SHA256 digest of the blob
total:
type: integer
description: The total size of the task
completed:
type: integer
description: The completed size of the task
ShowRequest:
type: object
description: Request to show a model
properties:
model:
type: string
description: The name of the model to show
required:
- model
ShowResponse:
type: object
description: Response from a show request
properties:
license:
type: string
description: The model license
modelfile:
type: string
description: The modelfile content
parameters:
type: string
description: The model parameters
template:
type: string
description: The model template
system:
type: string
description: The model system message/prompt
details:
$ref: '#/components/schemas/ModelDetails'
messages:
type: array
items:
$ref: '#/components/schemas/Message'
GenerateRequest:
type: object
description: Request to generate a response
properties:
model:
type: string
description: The model name
prompt:
type: string
description: The prompt to generate a response for
suffix:
type: string
images:
type: array
items:
type: string
format: byte
description: |
A list of base64-encoded images (for multimodal models such as
llava)
format:
type: string
description: |
The format to return a response in. Currently the only accepted
value is json
system:
type: string
description: |
System message to (overrides what is defined in the Modelfile)
template:
type: string
description: |
The prompt template to use (overrides what is defined in the
Modelfile)
context:
type: array
items:
type: integer
description: |
The context parameter returned from a previous request to generate,
this can be used to keep a short conversational memory
example: []
stream:
type: boolean
description: |
If false the response will be returned as a single response object,
rather than a stream of objects
raw:
type: boolean
description: |
If true no formatting will be applied to the prompt. You may choose
to use the raw parameter if you are specifying a full templated
prompt in your request to the API
keep_alive:
$ref: '#/components/schemas/Duration'
required:
- model
GenerateResponse:
type: object
description: Response from a generate request
properties:
model:
type: string
description: The model name that generated the response
created_at:
type: string
format: date-time
description: Timestamp of the response
response:
type: string
description: |
The textual response itself. When done, empty if the response was
streamed, if not streamed, this will contain the full response
done:
type: boolean
description: Specifies if the response is complete
context:
type: array
items:
type: integer
description: |
When done, encoding of the conversation used in this response
total_duration:
type: number
description: When done, time spent generating the response
load_duration:
type: number
description: When done, time spent in nanoseconds loading the model
prompt_eval_count:
type: integer
description: When done, number of tokens in the prompt
prompt_eval_duration:
type: number
description: |
When done, time spent in nanoseconds evaluating the prompt
eval_count:
type: integer
description: When done, number of tokens in the response
eval_duration:
type: number
description: |
When done, time in nanoseconds spent generating the response
ChatRequest:
type: object
description: Request to generate a response in a chat
properties:
model:
type: string
description: The model name
messages:
type: array
items:
$ref: '#/components/schemas/Message'
description: Messages of the chat - can be used to keep a chat memory
stream:
type: boolean
description: Enable streaming of returned response
format:
type: string
description: Format to return the response in (e.g. "json")
keep_alive:
$ref: '#/components/schemas/Duration'
options:
$ref: '#/components/schemas/Options'
ChatResponse:
type: object
description: Response from a chat request
properties:
model:
type: string
description: The model name
created_at:
type: string
format: date-time
description: Timestamp of the response
message:
$ref: '#/components/schemas/Message'
done_reason:
type: string
description: Reason the model stopped generating text
done:
type: boolean
description: Specifies if the response is complete
total_duration:
type: number
description: Total duration of the request
load_duration:
type: number
description: Load duration of the request
prompt_eval_count:
type: integer
description: Count of prompt evaluations
prompt_eval_duration:
type: number
description: Duration of prompt evaluations
eval_count:
type: integer
description: Count of evaluations
eval_duration:
type: number
description: Duration of evaluations
Message:
type: object
description: A message in a chat
properties:
role:
type: string
content:
type: string
images:
type: array
items:
type: string
format: byte
Duration:
type: string
description: A string representing the duration
example: "5m"
Options:
type: object
description: |
Advanced model and runner options for generation and chat requests
properties:
num_keep:
type: integer
description: |
Specifies the number of tokens from the beginning of
the context ot retain when the context limit is reached.
(Default: 4)
example: 4
seed:
type: integer
description: |
Sets the random number seed to use for generation. Setting this to
a specific number will make the model generate the same text for
the same prompt.
(Default: 0)
example: -1
num_predict:
type: integer
description: |
Maximum number of tokens to predict when generating text.
(Default: 128, -1 = infinite generation, -2 = fill context)
example: -1
top_k:
type: integer
description: |
Reduces the probability of generating nonsense. A higher value
(e.g. 100) will give more diverse answers, while a lower value
(e.g. 10) will be more conservative.
(Default: 40)
example: 40
top_p:
type: number
format: float
description: |
Works together with top-k. A higher value (e.g., 0.95) will lead to
more diverse text, while a lower value (e.g., 0.5) will generate
more focused and conservative text.
(Default: 0.9)
example: 0.9
tfs_z:
type: number
format: float
description: |
Tail free sampling is used to reduce the impact of less probable
tokens from the output. A higher value (e.g., 2.0) will reduce the
impact more, while a value of 1.0 disables this setting.
(default: 1)
example: 1.0
typical_p:
type: number
format: float
description: |
Controls the selection of typical words based on their probability
distribution. A higher value (e.g., 0.95) focuses on more typical
words, reducing the chance of unusual words being selected.
(Default: 1.0)
example: 1.0
repeat_last_n:
type: integer
description: |
Sets how far back for the model to look back to prevent repetition.
(Default: 64, 0 = disabled, -1 = num_ctx)
example: 64
temperature:
type: number
format: float
description: |
The temperature of the model. Increasing the temperature will make
the model answer more creatively.
(Default: 0.8)
example: 0.8
repeat_penalty:
type: number
format: float
description: |
Sets how strongly to penalize repetitions. A higher value
(e.g., 1.5) will penalize repetitions more strongly, while a lower
value (e.g., 0.9) will be more lenient.
(Default: 1.1)
example: 1.1
presence_penalty:
type: number
format: float
description: |
Applies a penalty to tokens that have already appeared in the
generated text, encouraging the model to introduce new tokens. A
higher value increases this penalty, promoting more varied and less
repetitive output.
(Default: 0.8)
example: 0.8
frequency_penalty:
type: number
format: float
description: |
Penalizes tokens based on their frequency in the generated text so
far. A higher value reduces the likelihood of frequent tokens being
generated again, promoting more diverse outputs.
(Default: 0.8)
example: 0.8
mirostat:
type: number
format: float
description: |
Enable Mirostat sampling for controlling perplexity.
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)
example: 0
mirostat_tau:
type: number
format: float
description: |
Controls the balance between coherence and diversity of the output.
A lower value will result in more focused and coherent text.
(Default: 5.0)
example: 5.8
mirostat_eta:
type: number
format: float
description: |
Influences how quickly the algorithm responds to feedback from the
generated text. A lower learning rate will result in slower
adjustments, while a higher learning rate will make the algorithm
more responsive.
(Default: 0.1)
example: 0.1
penalize_newline:
type: boolean
description: |
Determines whether the model should penalize the generation of
newlines, which can help control the structure and formatting of
the output.
(Default: true)
example: true
stop:
type: array
items:
type: string
description: |
Sets the stop sequences to use. When this pattern is encountered
the LLM will stop generating text and return. Multiple stop patterns
may be set by specifying multiple separate stop parameters in a
modelfile.
example: ['AI assistant.']
numa:
type: boolean
description: |
Indicates whether to use Non-Uniform Memory Access (NUMA) for
optimizing memory usage and performance on multi-processor systems.
(Default: false)
example: false
num_ctx:
type: integer
description: |
Sets the size of the context window used to generate the next token.
(Default: 2048)
example: 2048
num_batch:
type: integer
description: |
Specifies the number of batches for processing.
(Default: 512)
example: 512
num_gpu:
type: integer
description: |
Specifies the number of GPUs to use. A value of -1 uses all
available GPUs.
(Default: -1)
example: -1
main_gpu:
type: integer
description: |
Specifies the primary GPU to use for processing.
(Default: 0)
low_vram:
type: boolean
description: |
Indicates whether to optimize the model for low VRAM usage.
(Default: false)
example: false
f16_kv:
type: boolean
description: |
Indicates whether to use 16-bit floating point precision for
key-value pairs, reducing memory usage.
(Default: false)
example: true
logits_all:
type: boolean
description: |
Specifies whether to output logits for all tokens.
(Default: false)
example: false
vocab_only:
type: boolean
description: |
Indicates whether to only load the vocabulary without the full model.
(Default: false)
example: false
use_mmap:
type: boolean
description: |
Determines whether to use memory-mapped files for loading the model,
improving performance on large models.
(Default: true)
example: true
use_mlock:
type: boolean
description: |
Determines whether to use memory locking to prevent swapping the
model out of RAM.
(Default: false)
example: false
num_thread:
type: integer
description: |
Specifies the number of threads to use for processing. A value of
0 uses all available threads.
(Default: 0)
example: 0
================================================
FILE: clean.sh
================================================
rm -rf node_modules packages/backend/node_modules packages/frontend/node_modules
================================================
FILE: commitlint.config.js
================================================
module.exports = { extends: ['@commitlint/config-conventional'] };
================================================
FILE: docs/proposals/ai-studio.md
================================================
# Motivation
Today, there is no notion of ordering between the containers. But we know that we have a dependency between
the client application and the container that is running the model.
The second issue is that there is no concept of starting point for a container so today we rely only on the
container being started by the container engine and we know that this is not adequate for the model service container
So this is handle by a kind of dirty fix: the containers are all started in parallel but as the client application
will fail because the model service is started (as it take a while), so we are trying to restart the client application
until the model service is properly started.
The purpose of this change is to propose an update to the ai-lab.yaml so that it is as much generic as it
could be and inspired from the Compose specification.
## Proposed changes
Define a condition for the container to be properly started: this would be based on the readinessProbe that can already
be defined in a Kubernetes container. In the first iteration, we would support only the ```exec``` field. If
```readinessProbe``` is defined, then we would check for the healthcheck status field to be ```healthy```
So the current chatbot file would be updated from:
```yaml
application:
type: language
name: chatbot
description: This is a LLM chatbot application that can interact with a llamacpp model-service
containers:
- name: chatbot-inference-app
contextdir: ai_applications
containerfile: builds/Containerfile
- name: chatbot-model-service
contextdir: model_services
containerfile: base/Containerfile
model-service: true
backend:
- llama
arch:
- arm64
- amd64
- name: chatbot-model-servicecuda
contextdir: model_services
containerfile: cuda/Containerfile
model-service: true
backend:
- llama
gpu-env:
- cuda
arch:
- amd64
```
to
```yaml
application:
type: language
name: chatbot
description: This is a LLM chatbot application that can interact with a llamacpp model-service
containers:
- name: chatbot-inference-app
contextdir: ai_applications
containerfile: builds/Containerfile
readinessProbe: # added
exec: # added
command: # added
- curl -f localhost:8080 || exit 1 # added
- name: chatbot-model-service
contextdir: model_services
containerfile: base/Containerfile
model-service: true
readinessProbe: # added
exec: # added
command: # added
- curl -f localhost:7860 || exit 1 # added
backend:
- llama
arch:
- arm64
- amd64
- name: chatbot-model-service
contextdir: model_services
containerfile: cuda/Containerfile
model-service: true
readinessProbe: # added
exec: # added
command: # added
- curl -f localhost:7860 || exit 1 # added
backend:
- llama
gpu-env:
- cuda
arch:
- amd64
```
From the Podman Desktop API point of view, this would require extending the
[ContainerCreateOptions](https://podman-desktop.io/api/interfaces/ContainerCreateOptions) structure to support the
HealthCheck option.
================================================
FILE: docs/proposals/state-management.md
================================================
# State management
The backend manages and persists the State. The backend pushes new state to the front-end
when changes happen, and the front-end can ask for the current value of the state.
The front-end uses `readable` stores to expose the state to the different pages. The store
listens for new states pushed by the backend (`onMessage`), and asks for the current state
at initial time.
The pages of the front-end subscribe to the store to get the value of the state in a reactive manner.
## Catalog
The catalog is persisted as a file in the user's filesystem. The backend reads the file at startup,
and watches the file for changes. The backend updates the state as soon as changes it detects changes.
The front-end uses a `readable` store, which waits for changes on the Catalog state
(using `onMessage('new-catalog-state', data)`),
and asks for the current state at startup (with `postMessage('ask-catalog-state')`).
The interested pages of the front-end subscribe to the store to get the value
of the Catalog state in a reactive manner.
## Pulled applications
The front-end initiates the pulling of an application (using `postMessage('pull-application', app-id)`).
The backend manages and persists the state of the pulled applications and pushes every update
on the state (progression, etc.) (using `postMessage('new-pulled-application-state, app-id, data)`).
The front-end uses a `readable` store, which waits for changes on the Pulled Applications state
(using `onMessage('new-pulled-application-state)`), and asks for the current state at startup
(with `postMessage('ask-pulled-applications-state')`).
The interested pages of the front-end subscribe to the store to get the value of the Pulled Applications state
in a reactive manner.
## Errors
The front-end initiates operations (pull application, etc). When an error happens during an operation,
the backend manages and persists the error in a centralized way.
The backend pushes new errors (using `postMessage('new-error-state', data)`).
Optionally, it can push errors to the core Podman Desktop, to display errors in the notifications system.
The front-end uses a `readable` store, which waits for changes on the Errors state (using `onMessage('new-error-state')`),
and asks for the current state at startup (using `postMessage('ask-error-state)`).
The interested pages of the front-end subscribe to the store to display the errors related to the page.
The user can acknowledge an error (using a `postMessage('ack-error', id)`).
================================================
FILE: eslint.config.mjs
================================================
/**********************************************************************
* Copyright (C) 2024 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
***********************************************************************/
import globals from 'globals';
import js from '@eslint/js';
import typescriptLint from 'typescript-eslint';
import tsParser from '@typescript-eslint/parser';
import svelteParser from 'svelte-eslint-parser';
import importPlugin from 'eslint-plugin-import';
import { fixupConfigRules, fixupPluginRules } from '@eslint/compat';
import { fileURLToPath } from 'node:url';
import path from 'node:path';
import { FlatCompat } from '@eslint/eslintrc';
import unicorn from 'eslint-plugin-unicorn';
import noNull from 'eslint-plugin-no-null';
import sonarjs from 'eslint-plugin-sonarjs';
import etc from 'eslint-plugin-etc';
import svelte from 'eslint-plugin-svelte';
import redundantUndefined from 'eslint-plugin-redundant-undefined';
import simpleImportSort from 'eslint-plugin-simple-import-sort';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const compat = new FlatCompat({
baseDirectory: __dirname,
recommendedConfig: js.configs.recommended,
allConfig: js.configs.all,
});
const TYPESCRIPT_PROJECTS = ['packages/*/tsconfig.json', 'tests/*/tsconfig.json'];
export default [
{
ignores: [
'*.config.*js',
'**/*.config.*js',
'**/dist/**/*',
'**/test-resources',
'**/__mocks__/',
'**/coverage/',
'packages/backend/media/**',
'**/.svelte-kit/',
'scripts/**',
'**/src-generated/',
'tests/playwright/tests/playwright/output/**',
],
},
js.configs.recommended,
...typescriptLint.configs.recommended,
sonarjs.configs.recommended,
...svelte.configs['flat/recommended'],
...fixupConfigRules(
compat.extends('plugin:import/recommended', 'plugin:import/typescript', 'plugin:etc/recommended'),
),
{
plugins: {
// compliant v9 plug-ins
unicorn,
// non-compliant v9 plug-ins
etc: fixupPluginRules(etc),
import: fixupPluginRules(importPlugin),
'no-null': fixupPluginRules(noNull),
'redundant-undefined': fixupPluginRules(redundantUndefined),
'simple-import-sort': fixupPluginRules(simpleImportSort),
},
settings: {
'import/resolver': {
typescript: true,
node: true,
'eslint-import-resolver-custom-alias': {
alias: {
'/@': './src',
'/@gen': './src-generated',
},
extensions: ['.ts'],
packages: ['packages/*'],
},
},
},
},
{
linterOptions: {
reportUnusedDisableDirectives: 'off',
},
languageOptions: {
globals: {
...globals.node,
},
// parser: tsParser,
sourceType: 'module',
parserOptions: {
extraFileExtensions: ['.svelte'],
warnOnUnsupportedTypeScriptVersion: false,
project: TYPESCRIPT_PROJECTS,
},
},
},
{
rules: {
eqeqeq: 'error',
'prefer-promise-reject-errors': 'error',
semi: ['error', 'always'],
'comma-dangle': ['warn', 'always-multiline'],
quotes: [
'error',
'single',
{
allowTemplateLiterals: true,
},
],
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/no-unused-vars': ['error', { argsIgnorePattern: '^_', caughtErrors: 'none' }],
'@typescript-eslint/no-var-requires': 'off',
'@typescript-eslint/consistent-type-imports': 'error',
'@typescript-eslint/no-explicit-any': 'error',
'@typescript-eslint/await-thenable': 'error',
'@typescript-eslint/no-floating-promises': ['error', { ignoreVoid: false }],
'@typescript-eslint/no-misused-promises': 'error',
'@typescript-eslint/prefer-optional-chain': 'error',
'@typescript-eslint/explicit-function-return-type': 'error',
'@typescript-eslint/prefer-nullish-coalescing': [
'error',
{
ignoreConditionalTests: true,
},
],
'@typescript-eslint/no-require-imports': 'off',
// unicorn custom rules
'unicorn/prefer-node-protocol': 'error',
'no-null/no-null': 'error',
'sonarjs/no-empty-function': 'off',
'sonarjs/deprecation': 'off',
'sonarjs/todo-tag': 'off',
'sonarjs/sonar-no-fallthrough': 'off',
/**
* Having a semicolon helps the optimizer interpret your code correctly.
* This avoids rare errors in optimized code.
* @see https://twitter.com/alex_kozack/status/1364210394328408066
*/
semi: ['error', 'always'],
/**
* This will make the history of changes in the hit a little cleaner
*/
'comma-dangle': ['warn', 'always-multiline'],
/**
* Just for beauty
*/
quotes: ['error', 'single', { allowTemplateLiterals: true }],
// disabled import/namespace rule as the plug-in is not fully compatible using the compat mode
'import/namespace': 'off',
'import/no-duplicates': 'error',
'import/first': 'error',
'import/newline-after-import': 'error',
'import/no-extraneous-dependencies': 'error',
'import/no-unresolved': 'off',
'import/default': 'off',
'import/no-named-as-default-member': 'off',
'import/no-named-as-default': 'off',
'sonarjs/cognitive-complexity': 'off',
'sonarjs/no-duplicate-string': 'off',
'sonarjs/no-empty-collection': 'off',
'sonarjs/no-small-switch': 'off',
'sonarjs/no-unused-expressions': 'off',
'etc/no-commented-out-code': 'error',
'etc/no-deprecated': 'off',
'etc/no-commented-out-code': 'off',
'redundant-undefined/redundant-undefined': 'error',
'import/no-extraneous-dependencies': 'error',
'import/no-restricted-paths': [
'error',
{
zones: [
{
target: './packages/backend/**/*',
from: ['./packages/frontend/**/*'],
},
{
target: './packages/frontend/**/*',
from: ['./packages/backend/**/*'],
},
],
},
],
// disabled as code in this project is not yet compliant:
'svelte/valid-compile': 'off',
'no-undef': 'off',
},
},
{
files: ['**/*.svelte'],
languageOptions: {
parser: svelteParser,
ecmaVersion: 5,
sourceType: 'script',
parserOptions: {
parser: tsParser,
},
},
rules: {
eqeqeq: 'off',
'etc/no-implicit-any-catch': 'off',
'no-inner-declarations': 'off',
'sonarjs/code-eval': 'off',
'sonarjs/different-types-comparison': 'off',
'sonarjs/prefer-nullish-coalescing': 'off',
'sonarjs/no-nested-template-literals': 'off',
'sonarjs/no-nested-conditional': 'off',
'@typescript-eslint/no-unused-vars': 'off',
'@typescript-eslint/ban-types': 'off',
'@typescript-eslint/no-unused-expressions': 'off',
},
},
{
files: ['packages/frontend/**'],
languageOptions: {
globals: {
...Object.fromEntries(Object.entries(globals.node).map(([key]) => [key, 'off'])),
...globals.browser,
},
},
},
{
files: ['packages/shared/**'],
languageOptions: {
globals: {
...Object.fromEntries(Object.entries(globals.node).map(([key]) => [key, 'off'])),
...Object.fromEntries(Object.entries(globals.browser).map(([key]) => [key, 'off'])),
},
},
},
];
================================================
FILE: package.json
================================================
{
"name": "ai-lab-monorepo",
"displayName": "ai-lab-monorepo",
"description": "ai-lab-monorepo",
"publisher": "redhat",
"version": "1.10.0-next",
"license": "Apache-2.0",
"private": true,
"engines": {
"node": ">=24.0.0",
"npm": ">=10.2.3"
},
"scripts": {
"build": "concurrently \"cd packages/frontend && pnpm run build\" \"cd packages/backend && pnpm run build\"",
"watch": "concurrently \"cd packages/frontend && pnpm run watch\" \"cd packages/backend && pnpm run watch\"",
"format:check": "prettier --check \"**/src/**/*.{ts,svelte}\"",
"format:fix": "prettier --write \"**/src/**/*.{ts,svelte}\"",
"lint:check": "eslint . --cache",
"lint:fix": "eslint . --cache --fix",
"svelte:check": "svelte-check",
"test:backend": "vitest run -r packages/backend --passWithNoTests --coverage",
"test:frontend": "vitest -c packages/frontend/vite.config.js run packages/frontend --passWithNoTests --coverage",
"test:shared": "vitest run -r packages/shared --passWithNoTests --coverage",
"test:unit": "pnpm run test:backend && pnpm run test:shared && pnpm run test:frontend",
"test:e2e": "cd tests/playwright && pnpm run test:e2e",
"test:e2e:smoke": "cd tests/playwright && pnpm run test:e2e:smoke",
"test:e2e:instructlab": "cd tests/playwright && pnpm run test:e2e:instructlab",
"typecheck:shared": "tsc --noEmit --project packages/shared",
"typecheck:frontend": "tsc --noEmit --project packages/frontend",
"typecheck:backend": "cd packages/backend && pnpm run typecheck",
"typecheck": "pnpm run typecheck:shared && pnpm run typecheck:frontend && pnpm run typecheck:backend",
"prepare": "husky"
},
"resolutions": {
"string-width": "^4.2.0",
"wrap-ansi": "^7.0.0",
"postman-code-generators": "1.10.1"
},
"lint-staged": {
"*.{js,ts,tsx,svelte}": [
"eslint --cache --fix",
"prettier --cache --write"
],
"*.{md,css,json}": "prettier --write"
},
"devDependencies": {
"@commitlint/cli": "^20.5.2",
"@commitlint/config-conventional": "^20.5.0",
"@eslint/compat": "^2.0.5",
"@typescript-eslint/eslint-plugin": "^8.59.1",
"@typescript-eslint/parser": "^8.59.1",
"@vitest/coverage-v8": "^3.2.3",
"autoprefixer": "^10.5.0",
"commitlint": "^20.5.2",
"concurrently": "^9.2.1",
"eslint": "^9.39.2",
"eslint-import-resolver-custom-alias": "^1.3.2",
"eslint-import-resolver-typescript": "^4.3.5",
"eslint-plugin-etc": "^2.0.3",
"eslint-plugin-import": "^2.31.0",
"eslint-plugin-no-null": "^1.0.2",
"eslint-plugin-redundant-undefined": "^1.0.0",
"eslint-plugin-simple-import-sort": "^13.0.0",
"eslint-plugin-sonarjs": "^4.0.3",
"eslint-plugin-svelte": "^3.17.1",
"eslint-plugin-unicorn": "^64.0.0",
"globals": "^17.5.0",
"husky": "^9.1.7",
"lint-staged": "^16.4.0",
"msw": "^2.14.2",
"prettier": "^3.8.3",
"prettier-plugin-svelte": "^3.5.1",
"svelte-check": "^4.4.6",
"svelte-eslint-parser": "^1.6.0",
"typescript": "5.9.3",
"typescript-eslint": "^8.59.1",
"vite": "^7.3.1",
"vitest": "^3.0.5"
},
"workspaces": {
"packages": [
"packages/*",
"tests/*"
]
},
"dependencies": {
"js-yaml": "^4.1.1",
"zod": "^4.3.6"
},
"scarfSettings": {
"enabled": false
},
"pnpm": {
"overrides": {
"postman-collection>semver": "^7.5.2"
},
"ignoredBuiltDependencies": [
"@scarf/scarf",
"@tailwindcss/oxide",
"esbuild",
"postman-code-generators",
"svelte-preprocess",
"unrs-resolver"
]
},
"packageManager": "pnpm@10.12.4+sha512.5ea8b0deed94ed68691c9bad4c955492705c5eeb8a87ef86bc62c74a26b037b08ff9570f108b2e4dbd1dd1a9186fea925e527f141c648e85af45631074680184"
}
================================================
FILE: packages/backend/.gitignore
================================================
media
/src-generated
================================================
FILE: packages/backend/__mocks__/@podman-desktop/api.js
================================================
/**********************************************************************
* Copyright (C) 2024 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
***********************************************************************/
/**
* Mock the extension API for vitest.
* This file is referenced from vitest.config.js file.
*/
const plugin = {};
module.exports = plugin;
================================================
FILE: packages/backend/package.json
================================================
{
"name": "ai-lab",
"displayName": "Podman AI Lab",
"description": "Podman AI Lab lets you work with LLMs locally, exploring AI fundamentals, experimenting with models and prompts, and serving models while maintaining data security and privacy.",
"version": "1.10.0-next",
"icon": "icon.png",
"type": "module",
"publisher": "redhat",
"license": "Apache-2.0",
"engines": {
"podman-desktop": ">=1.8.0"
},
"main": "./dist/extension.cjs",
"contributes": {
"commands": [
{
"command": "ai-lab.navigation.inference.start",
"title": "AI Lab: navigate to inference start page",
"hidden": true
},
{
"command": "ai-lab.navigation.recipe.start",
"title": "AI Lab: navigate to recipe start page",
"hidden": true
}
],
"configuration": {
"title": "AI Lab",
"properties": {
"ai-lab.models.path": {
"type": "string",
"format": "folder",
"default": "",
"description": "Custom path where to download models. Note: The extension must be restarted for changes to take effect. (Default is blank)"
},
"ai-lab.modelUploadDisabled": {
"type": "boolean",
"default": false,
"description": "Disable the model upload to the podman machine",
"hidden": true
},
"ai-lab.experimentalGPU": {
"type": "boolean",
"default": false,
"description": "Experimental GPU support for inference servers"
},
"ai-lab.apiPort": {
"type": "number",
"default": 10434,
"minimum": 1024,
"maximum": 65535,
"description": "Port on which the API is listening (requires restart of extension)"
},
"ai-lab.inferenceRuntime": {
"type": "string",
"enum": [
"all",
"llama-cpp",
"whisper-cpp",
"none"
],
"description": "Choose the default inferencing runtime for AI Lab"
},
"ai-lab.experimentalTuning": {
"type": "boolean",
"default": false,
"description": "Display InstructLab Tuning screens (experimental)",
"hidden": true
},
"ai-lab.showGPUPromotion": {
"type": "boolean",
"default": true,
"description": "Display GPU promotion banner",
"hidden": true
}
}
},
"icons": {
"brain-icon": {
"description": "Brain icon",
"default": {
"fontPath": "brain.woff2",
"fontCharacter": "\\E001"
}
}
},
"views": {
"icons/containersList": [
{
"when": "ai-lab-model-id in containerLabelKeys",
"icon": "${brain-icon}"
}
],
"icons/image": [
{
"when": "ai-lab-recipe-id in imageLabelKeys",
"icon": "${brain-icon}"
}
]
}
},
"scripts": {
"generate": "npx openapi-typescript ../../api/openapi.yaml -o src-generated/openapi.ts",
"build": "pnpm run generate && vite build",
"test": "vitest run --coverage",
"test:watch": "vitest watch --coverage",
"format:check": "prettier --check \"src/**/*.ts\"",
"format:fix": "prettier --write \"src/**/*.ts\"",
"watch": "pnpm run generate && npx vite --mode development build -w",
"typecheck": "pnpm run generate && tsc --noEmit"
},
"dependencies": {
"@ai-sdk/mcp": "^1.0.36",
"@ai-sdk/openai-compatible": "^2.0.42",
"@huggingface/gguf": "^0.4.2",
"@huggingface/hub": "^2.11.0",
"ai": "^6.0.168",
"express": "^5.2.1",
"express-openapi-validator": "^5.6.2",
"isomorphic-git": "^1.37.6",
"js-yaml": "^4.1.1",
"mustache": "^4.2.0",
"openai": "^6.35.0",
"postman-code-generators": "^1.14.1",
"postman-collection": "^5.3.0",
"semver": "^7.7.4",
"swagger-ui-dist": "^5.32.5",
"swagger-ui-express": "^5.0.1",
"systeminformation": "^5.31.5",
"xml-js": "^1.6.11"
},
"devDependencies": {
"@podman-desktop/api": "1.13.0-202409181313-78725a6565",
"@ai-sdk/provider": "^3.0.8",
"@ai-sdk/provider-utils": "^4.0.24",
"@rollup/plugin-replace": "^6.0.3",
"@types/express": "^5.0.6",
"@types/js-yaml": "^4.0.9",
"@types/mustache": "^4.2.6",
"@types/node": "^24",
"@types/postman-collection": "^3.5.11",
"@types/supertest": "^7.2.0",
"@types/swagger-ui-dist": "^3.30.5",
"@types/swagger-ui-express": "^4.1.8",
"openapi-typescript": "^7.13.0",
"supertest": "^7.2.2",
"vitest": "^3.0.5"
}
}
================================================
FILE: packages/backend/src/assets/ai.json
================================================
{
"version": "1.0",
"recipes": [
{
"id": "chatbot",
"description": "This recipe provides a blueprint for developers to create their own AI-powered chat applications using Streamlit.",
"name": "ChatBot",
"repository": "https://github.com/containers/ai-lab-recipes",
"ref": "v1.8.0",
"icon": "natural-language-processing",
"categories": ["natural-language-processing"],
"basedir": "recipes/natural_language_processing/chatbot",
"readme": "# Chat Application\n\n This recipe helps developers start building their own custom LLM enabled chat applications. It consists of two main components: the Model Service and the AI Application.\n\n There are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\n\n The AI Application will connect to the Model Service via its OpenAI compatible API. The recipe relies on [Langchain's](https://python.langchain.com/docs/get_started/introduction) python package to simplify communication with the Model Service and uses [Streamlit](https://streamlit.io/) for the UI layer. You can find an example of the chat application below.\n\n \n\n\n## Try the Chat Application\n\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Chatbot` and follow the instructions to start the application.\n\n# Build the Application\n\nThe rest of this document will explain how to build and run the application from the terminal, and will\ngo into greater detail on how each container in the Pod above is built, run, and \nwhat purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\n\n\nThis application requires a model, a model service and an AI inferencing application.\n\n* [Quickstart](#quickstart)\n* [Download a model](#download-a-model)\n* [Build the Model Service](#build-the-model-service)\n* [Deploy the Model Service](#deploy-the-model-service)\n* [Build the AI Application](#build-the-ai-application)\n* [Deploy the AI Application](#deploy-the-ai-application)\n* [Interact with the AI Application](#interact-with-the-ai-application)\n* [Embed the AI Application in a Bootable Container Image](#embed-the-ai-application-in-a-bootable-container-image)\n\n\n## Quickstart\nTo run the application with pre-built images from `quay.io/ai-lab`, use `make quadlet`. This command\nbuilds the application's metadata and generates Kubernetes YAML at `./build/chatbot.yaml` to spin up a Pod that can then be launched locally.\nTry it with:\n\n```\nmake quadlet\npodman kube play build/chatbot.yaml\n```\n\nThis will take a few minutes if the model and model-server container images need to be downloaded. \nThe Pod is named `chatbot`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\n\n```\npodman pod list\npodman ps\n```\n\nOnce the Pod and its containers are running, the application can be accessed at `http://localhost:8501`. \nPlease refer to the section below for more details about [interacting with the chatbot application](#interact-with-the-ai-application).\n\nTo stop and remove the Pod, run:\n\n```\npodman pod stop chatbot\npodman pod rm chatbot\n```\n\n## Download a model\n\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct). This is a well\nperformant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted\nand quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of\nways to get a GGUF version of granite-3.3-8b-instruct, but the simplest is to download a pre-converted one from\n[huggingface.co](https://huggingface.co) here: https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF.\n\nThe recommended model can be downloaded using the code snippet below:\n\n```bash\ncd ../../../models\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\ncd ../recipes/natural_language_processing/chatbot\n```\n\n_A full list of supported open models is forthcoming._ \n\n\n## Build the Model Service\n\nThe complete instructions for building and deploying the Model Service can be found in the\n[llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\n\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\n\n```bash\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\nmake build\n```\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\n\n## Deploy the Model Service\n\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\n\n```bash\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\nmake run\n```\n\n## Build the AI Application\n\nThe AI Application can be built from the make command:\n\n```bash\n# Run this from the current directory (path recipes/natural_language_processing/chatbot from repo containers/ai-lab-recipes)\nmake build\n```\n\n## Deploy the AI Application\n\nMake sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following:\n\n```bash\n# Run this from the current directory (path recipes/natural_language_processing/chatbot from repo containers/ai-lab-recipes)\nmake run \n```\n\n## Interact with the AI Application\n\nEverything should now be up an running with the chat application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled chatbot applications. \n\n## Embed the AI Application in a Bootable Container Image\n\nTo build a bootable container image that includes this sample chatbot workload as a service that starts when a system is booted, run: `make -f Makefile bootc`. You can optionally override the default image / tag you want to give the make command by specifying it as follows: `make -f Makefile BOOTC_IMAGE=<your_bootc_image> bootc`.\n\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\n\n```bash\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 bootc\n```\n\nSelecting the ARCH for the bootc/Containerfile is simple using the Makefile ARCH= variable.\n\n```\nmake ARCH=x86_64 bootc\n```\n\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\nwith the chatbot application, it's as simple as ssh-ing into the bootc system and running:\n\n```bash\nbootc switch quay.io/ai-lab/chatbot-bootc:latest\n```\n\nUpon a reboot, you'll see that the chatbot service is running on the system. Check on the service with:\n\n```bash\nssh user@bootc-system-ip\nsudo systemctl status chatbot\n```\n\n### What are bootable containers?\n\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\n\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than\nat _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system.\nBootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization\ntools. Might I suggest [podman](https://podman.io/)?\n\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI\nimage registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think\nfactories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\n\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\n\n#### Creating bootable disk images\n\nYou can convert a bootc image to a bootable disk image using the\n[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\n\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\n\nDefault image types can be set via the DISK_TYPE Makefile variable.\n\n`make bootc-image-builder DISK_TYPE=ami`\n",
"recommended": [
"hf.ibm-granite.granite-4.0-micro-GGUF",
"hf.ibm-granite.granite-4.0-tiny-GGUF",
"hf.ibm-granite.granite-3.3-8b-instruct-GGUF",
"hf.ibm-research.granite-3.2-8b-instruct-GGUF"
],
"backend": "llama-cpp",
"languages": ["python"],
"frameworks": ["streamlit", "langchain"]
},
{
"id": "chatbot-pydantic-ai",
"description": "This recipe provides a blueprint for developers to create their own AI-powered chat applications with the pydantic framework using Streamlit",
"name": "Chatbot PydanticAI",
"repository": "https://github.com/containers/ai-lab-recipes",
"ref": "v1.8.0",
"icon": "natural-language-processing",
"categories": ["natural-language-processing"],
"basedir": "recipes/natural_language_processing/chatbot-pydantic-ai",
"readme": "# Chatbot Pydantic Application\n\n This recipe helps developers start building their own custom LLM enabled chat applications. It consists of two main components: the Model Service and the AI Application.\n\n There are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile).\n\n The AI Application will connect to the Model Service via its OpenAI compatible API. The recipe relies on [Langchain's](https://python.langchain.com/docs/get_started/introduction) python package to simplify communication with the Model Service and uses [Streamlit](https://streamlit.io/) for the UI layer. You can find an example of the chat application below.\n\n \n\n\n## Try the Chat Application\n\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Chatbot Pydantic AI` and follow the instructions to start the application.\n\n# Build the Application\n\nThe rest of this document will explain how to build and run the application from the terminal, and will\ngo into greater detail on how each container in the Pod above is built, run, and \nwhat purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\n\n\nThis application requires a model, a model service and an AI inferencing application.\n\n* [Quickstart](#quickstart)\n* [Download a model](#download-a-model)\n* [Build the Model Service](#build-the-model-service)\n* [Deploy the Model Service](#deploy-the-model-service)\n* [Build the AI Application](#build-the-ai-application)\n* [Deploy the AI Application](#deploy-the-ai-application)\n* [Interact with the AI Application](#interact-with-the-ai-application)\n* [Embed the AI Application in a Bootable Container Image](#embed-the-ai-application-in-a-bootable-container-image)\n\n\n## Quickstart\nTo run the application with pre-built images from `quay.io/ai-lab`, use `make quadlet`. This command\nbuilds the application's metadata and generates Kubernetes YAML at `./build/chatbot-pydantic-ai.yaml` to spin up a Pod that can then be launched locally.\nTry it with:\n\n```\nmake quadlet\npodman kube play build/chatbot-pydantic-ai.yaml\n```\n\nThis will take a few minutes if the model and model-server container images need to be downloaded. \nThe Pod is named `chatbot-pydantic-ai`, so you may use [Podman](https://podman.io) to manage the Pod and its containers:\n\n```\npodman pod list\npodman ps\n```\n\nOnce the Pod and its containers are running, the application can be accessed at `http://localhost:8501`. \nPlease refer to the section below for more details about [interacting with the chatbot-pydantic-ai application](#interact-with-the-ai-application).\n\nTo stop and remove the Pod, run:\n\n```\npodman pod stop chatbot-pydantic-ai\npodman pod rm chatbot-pydantic-ai\n```\n\n## Download a model\n\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct). This is a well\nperformant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted\nand quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of\nways to get a GGUF version of granite-3.3-8b-instruct, but the simplest is to download a pre-converted one from\n[huggingface.co](https://huggingface.co) here: https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF.\n\nThe recommended model can be downloaded using the code snippet below:\n\n```bash\ncd ../../../models\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\ncd ../recipes/natural_language_processing/chatbot-pydantic-ai\n```\n\n_A full list of supported open models is forthcoming._ \n\n\n## Build the Model Service\n\nThe complete instructions for building and deploying the Model Service can be found in the\n[llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\n\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\n\n```bash\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\nmake build\n```\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\n\n## Deploy the Model Service\n\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\n\n```bash\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\nmake run\n```\n\n## Build the AI Application\n\nThe AI Application can be built from the make command:\n\n```bash\n# Run this from the current directory (path recipes/natural_language_processing/chatbot-pydantic-ai from repo containers/ai-lab-recipes)\nmake build\n```\n\n## Deploy the AI Application\n\nMake sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following:\n\n```bash\n# Run this from the current directory (path recipes/natural_language_processing/chatbot-pydantic-ai from repo containers/ai-lab-recipes)\nmake run \n```\n\n## Interact with the AI Application\n\nEverything should now be up an running with the chat application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled chatbot-pydantic-ai applications. \n\n## Embed the AI Application in a Bootable Container Image\n\nTo build a bootable container image that includes this sample chatbot-pydantic-ai workload as a service that starts when a system is booted, run: `make -f Makefile bootc`. You can optionally override the default image / tag you want to give the make command by specifying it as follows: `make -f Makefile BOOTC_IMAGE=<your_bootc_image> bootc`.\n\nSubstituting the bootc/Containerfile FROM command is simple using the Makefile FROM option.\n\n```bash\nmake FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 bootc\n```\n\nSelecting the ARCH for the bootc/Containerfile is simple using the Makefile ARCH= variable.\n\n```\nmake ARCH=x86_64 bootc\n```\n\nThe magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built\nwith the chatbot-pydantic-ai application, it's as simple as ssh-ing into the bootc system and running:\n\n```bash\nbootc switch quay.io/ai-lab/chatbot-pydantic-ai-bootc:latest\n```\n\nUpon a reboot, you'll see that the chatbot-pydantic-ai service is running on the system. Check on the service with:\n\n```bash\nssh user@bootc-system-ip\nsudo systemctl status chatbot-pydantic-ai\n```\n\n### What are bootable containers?\n\nWhat's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI?\n\nThat's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than\nat _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system.\nBootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization\ntools. Might I suggest [podman](https://podman.io/)?\n\nOnce installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI\nimage registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think\nfactories or appliances. Who doesn't want to add a little AI to their appliance, am I right?\n\nBootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime!\n\n#### Creating bootable disk images\n\nYou can convert a bootc image to a bootable disk image using the\n[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image.\n\nThis container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images.\n\nDefault image types can be set via the DISK_TYPE Makefile variable.\n\n`make bootc-image-builder DISK_TYPE=ami`\n",
"recommended": [],
"backend": "llama-cpp",
"languages": ["python"],
"frameworks": ["streamlit", "PydanticAI"]
},
{
"id": "agents",
"description": "This recipe shows how ReAct can be used to create an intelligent music discovery assistant with Spotify API.",
"name": "ReAct Agent Application",
"repository": "https://github.com/containers/ai-lab-recipes",
"ref": "v1.8.0",
"icon": "natural-language-processing",
"categories": ["natural-language-processing"],
"basedir": "recipes/natural_language_processing/agents",
"readme": "# ReAct Agent Application\n\n This recipe demonstrates the ReAct (Reasoning and Acting) framework in action through a music exploration application. ReAct enables AI to think step-by-step about tasks, take appropriate actions, and provide reasoned responses. The application shows how ReAct can be used to create an intelligent music discovery assistant that combines reasoning with Spotify API interactions.\nThe application utilizes [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) for the Model Service and integrates with Spotify's API for music data. The recipe uses [Langchain](https://python.langchain.com/docs/get_started/introduction) for the ReAct implementation and [Streamlit](https://streamlit.io/) for the UI layer.\n\n## Spotify API Access\nTo use this application, you'll need Spotify API credentials (follow the link here for documentation https://developer.spotify.com/documentation/web-api):\n- Create a Spotify Developer account\n- Create an application in the Spotify Developer Dashboard (https://developer.spotify.com/documentation/web-api/concepts/apps dont worry about adding web/redirect url use the defaults)\n- Get your Client ID and Client Secret once the app is created (https://developer.spotify.com/dashboard)\n\nThese can be provided through environment variables or the application's UI.\n\n## Try the ReAct Agent Application\nThe [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `ReAct Agent` and follow the instructions to start the application.\n\n# Build the Application\nThe rest of this document will explain how to build and run the application from the terminal, and will go into greater detail on how each container in the Pod above is built, run, and what purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application.\n\n## Download a model\nIf you are just getting started, we recommend using [granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct). This is a well performant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted and quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of ways to get a GGUF version of granite-3.3-8b-instruct, but the simplest is to download a pre-converted one from [huggingface.co](https://huggingface.co) here: https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF.\nThe recommended model can be downloaded using the code snippet below:\n```bash\ncd ../../../models\ncurl -sLO https://huggingface.co/ibm-granite/granite-3.3-8b-instruct-GGUF/resolve/main/granite-3.3-8b-instruct-Q4_K_M.gguf\ncd ../recipes/natural_language_processing/agents\n```\n_A full list of supported open models is forthcoming._ \n\n## Build the Model Service\nThe complete instructions for building and deploying the Model Service can be found in the [llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md).\nThe Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/).\n```bash\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\nmake build\n```\nCheckout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build.\n\n## Deploy the Model Service\nThe local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults:\n```bash\n# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes\nmake run\n```\n\n## Build the AI Application\nThe AI Application can be built from the make command:\n```bash\n# Run this from the current directory (path recipes/natural_language_processing/agents from repo containers/ai-lab-recipes)\nmake build\n```\n\n## Deploy the AI Application\nMake sure the Model Service is up and running before startin
gitextract_r3viv2ck/
├── .dockerignore
├── .editorconfig
├── .fmf/
│ └── version
├── .gitattributes
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug_report.yml
│ │ ├── config.yml
│ │ ├── epic.yml
│ │ ├── feature_request.yml
│ │ └── ux-request.yaml
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── dependabot.yml
│ └── workflows/
│ ├── ai-lab-e2e-nightly-windows.yaml
│ ├── build-next.yaml
│ ├── compute-model-sizes.yml
│ ├── e2e-main-tf.yaml
│ ├── e2e-main.yaml
│ ├── llama-stack-playground.yaml
│ ├── pr-check.yaml
│ ├── ramalama.yaml
│ ├── recipe-catalog-change-cleanup.yaml
│ ├── recipe-catalog-change-template.yaml
│ ├── recipe-catalog-change-trigger.yaml
│ ├── release.yaml
│ ├── update-ramalama-references.sh
│ └── update-ramalama-references.yaml
├── .gitignore
├── .husky/
│ ├── commit-msg
│ └── pre-commit
├── .npmrc
├── .prettierrc
├── .vscode/
│ └── settings.json
├── CODE-OF-CONDUCT.md
├── Containerfile
├── LICENSE
├── MIGRATION.md
├── PACKAGING-GUIDE.md
├── README.md
├── RELEASE.md
├── SECURITY.md
├── USAGE_DATA.md
├── api/
│ └── openapi.yaml
├── clean.sh
├── commitlint.config.js
├── docs/
│ └── proposals/
│ ├── ai-studio.md
│ └── state-management.md
├── eslint.config.mjs
├── package.json
├── packages/
│ ├── backend/
│ │ ├── .gitignore
│ │ ├── __mocks__/
│ │ │ └── @podman-desktop/
│ │ │ └── api.js
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── assets/
│ │ │ │ ├── ai.json
│ │ │ │ ├── inference-images.json
│ │ │ │ ├── instructlab-images.json
│ │ │ │ ├── llama-stack-images.json
│ │ │ │ ├── llama-stack-playground-images.json
│ │ │ │ └── openai.json
│ │ │ ├── extension.spec.ts
│ │ │ ├── extension.ts
│ │ │ ├── instructlab-api-impl.ts
│ │ │ ├── llama-stack-api-impl.ts
│ │ │ ├── managers/
│ │ │ │ ├── GPUManager.spec.ts
│ │ │ │ ├── GPUManager.ts
│ │ │ │ ├── SnippetManager.spec.ts
│ │ │ │ ├── SnippetManager.ts
│ │ │ │ ├── TaskRunner.spec.ts
│ │ │ │ ├── TaskRunner.ts
│ │ │ │ ├── apiServer.spec.ts
│ │ │ │ ├── apiServer.ts
│ │ │ │ ├── application/
│ │ │ │ │ ├── applicationManager.spec.ts
│ │ │ │ │ └── applicationManager.ts
│ │ │ │ ├── catalogManager.spec.ts
│ │ │ │ ├── catalogManager.ts
│ │ │ │ ├── gitManager.spec.ts
│ │ │ │ ├── gitManager.ts
│ │ │ │ ├── inference/
│ │ │ │ │ ├── inferenceManager.spec.ts
│ │ │ │ │ └── inferenceManager.ts
│ │ │ │ ├── instructlab/
│ │ │ │ │ ├── instructlabManager.spec.ts
│ │ │ │ │ └── instructlabManager.ts
│ │ │ │ ├── llama-stack/
│ │ │ │ │ ├── llamaStackManager.spec.ts
│ │ │ │ │ └── llamaStackManager.ts
│ │ │ │ ├── modelsManager.spec.ts
│ │ │ │ ├── modelsManager.ts
│ │ │ │ ├── monitoringManager.spec.ts
│ │ │ │ ├── monitoringManager.ts
│ │ │ │ ├── playground/
│ │ │ │ │ ├── McpServerManager.spec.ts
│ │ │ │ │ ├── McpServerManager.ts
│ │ │ │ │ ├── aiSdk.spec.ts
│ │ │ │ │ └── aiSdk.ts
│ │ │ │ ├── playgroundV2Manager.spec.ts
│ │ │ │ ├── playgroundV2Manager.ts
│ │ │ │ ├── podmanConnection.spec.ts
│ │ │ │ ├── podmanConnection.ts
│ │ │ │ ├── recipes/
│ │ │ │ │ ├── BuilderManager.spec.ts
│ │ │ │ │ ├── BuilderManager.ts
│ │ │ │ │ ├── PodManager.spec.ts
│ │ │ │ │ ├── PodManager.ts
│ │ │ │ │ ├── RecipeManager.spec.ts
│ │ │ │ │ └── RecipeManager.ts
│ │ │ │ └── snippets/
│ │ │ │ ├── java-okhttp-snippet.spec.ts
│ │ │ │ ├── java-okhttp-snippet.ts
│ │ │ │ ├── python-langchain-snippet.spec.ts
│ │ │ │ ├── python-langchain-snippet.ts
│ │ │ │ ├── quarkus-snippet.spec.ts
│ │ │ │ └── quarkus-snippet.ts
│ │ │ ├── models/
│ │ │ │ ├── AIConfig.spec.ts
│ │ │ │ ├── AIConfig.ts
│ │ │ │ ├── ApplicationOptions.ts
│ │ │ │ ├── HuggingFaceModelHandler.spec.ts
│ │ │ │ ├── HuggingFaceModelHandler.ts
│ │ │ │ ├── ModelHandler.ts
│ │ │ │ ├── TaskRunner.ts
│ │ │ │ ├── URLModelHandler.ts
│ │ │ │ └── baseEvent.ts
│ │ │ ├── registries/
│ │ │ │ ├── ApplicationRegistry.ts
│ │ │ │ ├── CancellationTokenRegistry.spec.ts
│ │ │ │ ├── CancellationTokenRegistry.ts
│ │ │ │ ├── ConfigurationRegistry.spec.ts
│ │ │ │ ├── ConfigurationRegistry.ts
│ │ │ │ ├── ContainerRegistry.spec.ts
│ │ │ │ ├── ContainerRegistry.ts
│ │ │ │ ├── ConversationRegistry.ts
│ │ │ │ ├── InferenceProviderRegistry.ts
│ │ │ │ ├── LocalRepositoryRegistry.spec.ts
│ │ │ │ ├── LocalRepositoryRegistry.ts
│ │ │ │ ├── ModelHandlerRegistry.ts
│ │ │ │ ├── NavigationRegistry.spec.ts
│ │ │ │ ├── NavigationRegistry.ts
│ │ │ │ ├── TaskRegistry.spec.ts
│ │ │ │ └── TaskRegistry.ts
│ │ │ ├── studio-api-impl.spec.ts
│ │ │ ├── studio-api-impl.ts
│ │ │ ├── studio.spec.ts
│ │ │ ├── studio.ts
│ │ │ ├── templates/
│ │ │ │ ├── java-okhttp.mustache
│ │ │ │ ├── python-langchain.mustache
│ │ │ │ └── quarkus-langchain4j.mustache
│ │ │ ├── tests/
│ │ │ │ ├── ai-test.json
│ │ │ │ ├── ai-user-test.json
│ │ │ │ └── utils.ts
│ │ │ ├── utils/
│ │ │ │ ├── JsonWatcher.spec.ts
│ │ │ │ ├── JsonWatcher.ts
│ │ │ │ ├── Publisher.spec.ts
│ │ │ │ ├── Publisher.ts
│ │ │ │ ├── RecipeConstants.ts
│ │ │ │ ├── arch.ts
│ │ │ │ ├── catalogUtils.spec.ts
│ │ │ │ ├── catalogUtils.ts
│ │ │ │ ├── downloader.ts
│ │ │ │ ├── imagesUtils.spec.ts
│ │ │ │ ├── imagesUtils.ts
│ │ │ │ ├── inferenceUtils.spec.ts
│ │ │ │ ├── inferenceUtils.ts
│ │ │ │ ├── mcpUtils.ts
│ │ │ │ ├── modelsUtils.spec.ts
│ │ │ │ ├── modelsUtils.ts
│ │ │ │ ├── pathUtils.ts
│ │ │ │ ├── podman.spec.ts
│ │ │ │ ├── podman.ts
│ │ │ │ ├── podsUtils.ts
│ │ │ │ ├── ports.ts
│ │ │ │ ├── randomUtils.ts
│ │ │ │ ├── sha.spec.ts
│ │ │ │ ├── sha.ts
│ │ │ │ ├── uploader.spec.ts
│ │ │ │ ├── uploader.ts
│ │ │ │ ├── urldownloader.spec.ts
│ │ │ │ ├── urldownloader.ts
│ │ │ │ └── utils.ts
│ │ │ ├── webviewUtils.spec.ts
│ │ │ ├── webviewUtils.ts
│ │ │ └── workers/
│ │ │ ├── IWorker.ts
│ │ │ ├── WindowsWorker.ts
│ │ │ ├── provider/
│ │ │ │ ├── InferenceProvider.spec.ts
│ │ │ │ ├── InferenceProvider.ts
│ │ │ │ ├── LlamaCppPython.spec.ts
│ │ │ │ ├── LlamaCppPython.ts
│ │ │ │ ├── OpenVINO.spec.ts
│ │ │ │ ├── OpenVINO.ts
│ │ │ │ ├── WhisperCpp.spec.ts
│ │ │ │ └── WhisperCpp.ts
│ │ │ └── uploader/
│ │ │ ├── UploaderOptions.ts
│ │ │ ├── WSLUploader.spec.ts
│ │ │ └── WSLUploader.ts
│ │ ├── tsconfig.json
│ │ ├── vite.config.js
│ │ └── vitest.config.js
│ ├── frontend/
│ │ ├── index.html
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── App.spec.ts
│ │ │ ├── App.svelte
│ │ │ ├── Route.svelte
│ │ │ ├── app.css
│ │ │ ├── index.html
│ │ │ ├── lib/
│ │ │ │ ├── ApplicationActions.spec.ts
│ │ │ │ ├── ApplicationActions.svelte
│ │ │ │ ├── Badge.spec.ts
│ │ │ │ ├── Badge.svelte
│ │ │ │ ├── Card.svelte
│ │ │ │ ├── ContentDetailsLayout.spec.ts
│ │ │ │ ├── ContentDetailsLayout.svelte
│ │ │ │ ├── ContentDetailsLayoutTest.svelte
│ │ │ │ ├── ExpandableMessage.svelte
│ │ │ │ ├── FlatMenu.svelte
│ │ │ │ ├── Navigation.spec.ts
│ │ │ │ ├── Navigation.svelte
│ │ │ │ ├── RangeInput.svelte
│ │ │ │ ├── RecipeCard.spec.ts
│ │ │ │ ├── RecipeCard.svelte
│ │ │ │ ├── RecipeCardTags.spec.ts
│ │ │ │ ├── RecipeCardTags.svelte
│ │ │ │ ├── RecipeCardTags.ts
│ │ │ │ ├── RecipeDetails.spec.ts
│ │ │ │ ├── RecipeDetails.svelte
│ │ │ │ ├── RecipeStatus.spec.ts
│ │ │ │ ├── RecipeStatus.svelte
│ │ │ │ ├── RecipesCard.spec.ts
│ │ │ │ ├── RecipesCard.svelte
│ │ │ │ ├── button/
│ │ │ │ │ ├── CopyButton.spec.ts
│ │ │ │ │ ├── CopyButton.svelte
│ │ │ │ │ └── ListItemButtonIcon.svelte
│ │ │ │ ├── conversation/
│ │ │ │ │ ├── ChatMessage.svelte
│ │ │ │ │ ├── ConversationActions.svelte
│ │ │ │ │ ├── ElapsedTime.svelte
│ │ │ │ │ ├── SystemPromptBanner.spec.ts
│ │ │ │ │ ├── SystemPromptBanner.svelte
│ │ │ │ │ ├── ToolCallMessage.spec.ts
│ │ │ │ │ └── ToolCallMessage.svelte
│ │ │ │ ├── icons/
│ │ │ │ │ ├── InstructLabIcon.svelte
│ │ │ │ │ ├── ModelStatusIcon.spec.ts
│ │ │ │ │ ├── ModelStatusIcon.svelte
│ │ │ │ │ ├── ModelWhite.svelte
│ │ │ │ │ ├── PlaygroundWhite.svelte
│ │ │ │ │ └── RemoteModel.svelte
│ │ │ │ ├── images/
│ │ │ │ │ ├── DashboardBanner.svelte
│ │ │ │ │ ├── PodIcon.svelte
│ │ │ │ │ └── VSCodeIcon.svelte
│ │ │ │ ├── instructlab/
│ │ │ │ │ ├── AboutInstructLabDiscoverCard.svelte
│ │ │ │ │ └── AboutInstructLabExploreCard.svelte
│ │ │ │ ├── markdown/
│ │ │ │ │ ├── LinkComponent.svelte
│ │ │ │ │ └── MarkdownRenderer.svelte
│ │ │ │ ├── monaco-editor/
│ │ │ │ │ ├── MonacoEditor.svelte
│ │ │ │ │ └── monaco.ts
│ │ │ │ ├── notification/
│ │ │ │ │ ├── ContainerConnectionStatusInfo.spec.ts
│ │ │ │ │ ├── ContainerConnectionStatusInfo.svelte
│ │ │ │ │ ├── ContainerConnectionWrapper.spec.ts
│ │ │ │ │ ├── ContainerConnectionWrapper.svelte
│ │ │ │ │ ├── GPUEnabledMachine.spec.ts
│ │ │ │ │ ├── GPUEnabledMachine.svelte
│ │ │ │ │ ├── GPUPromotion.spec.ts
│ │ │ │ │ └── GPUPromotion.svelte
│ │ │ │ ├── progress/
│ │ │ │ │ ├── TaskItem.spec.ts
│ │ │ │ │ ├── TaskItem.svelte
│ │ │ │ │ ├── TasksBanner.spec.ts
│ │ │ │ │ ├── TasksBanner.svelte
│ │ │ │ │ ├── TasksProgress.spec.ts
│ │ │ │ │ ├── TasksProgress.svelte
│ │ │ │ │ ├── TrackedTasks.spec.ts
│ │ │ │ │ └── TrackedTasks.svelte
│ │ │ │ ├── select/
│ │ │ │ │ ├── ContainerProviderConnectionSelect.spec.ts
│ │ │ │ │ ├── ContainerProviderConnectionSelect.svelte
│ │ │ │ │ ├── InferenceRuntimeSelect.spec.ts
│ │ │ │ │ ├── InferenceRuntimeSelect.svelte
│ │ │ │ │ ├── ModelSelect.spec.ts
│ │ │ │ │ ├── ModelSelect.svelte
│ │ │ │ │ ├── Select.spec.ts
│ │ │ │ │ └── Select.svelte
│ │ │ │ └── table/
│ │ │ │ ├── application/
│ │ │ │ │ ├── ApplicationTable.spec.ts
│ │ │ │ │ ├── ApplicationTable.svelte
│ │ │ │ │ ├── ColumnActions.svelte
│ │ │ │ │ ├── ColumnAge.svelte
│ │ │ │ │ ├── ColumnModel.spec.ts
│ │ │ │ │ ├── ColumnModel.svelte
│ │ │ │ │ ├── ColumnPod.svelte
│ │ │ │ │ ├── ColumnRecipe.spec.ts
│ │ │ │ │ ├── ColumnRecipe.svelte
│ │ │ │ │ ├── ColumnRuntime.spec.ts
│ │ │ │ │ ├── ColumnRuntime.svelte
│ │ │ │ │ └── ColumnStatus.svelte
│ │ │ │ ├── instructlab/
│ │ │ │ │ ├── InstructlabColumnAge.svelte
│ │ │ │ │ ├── InstructlabColumnModelName.spec.ts
│ │ │ │ │ ├── InstructlabColumnModelName.svelte
│ │ │ │ │ ├── InstructlabColumnName.svelte
│ │ │ │ │ ├── InstructlabColumnRepository.svelte
│ │ │ │ │ ├── InstructlabColumnStatus.svelte
│ │ │ │ │ └── InstructlabColumnTargetModelName.svelte
│ │ │ │ ├── model/
│ │ │ │ │ ├── ModelColumnAction.spec.ts
│ │ │ │ │ ├── ModelColumnActions.svelte
│ │ │ │ │ ├── ModelColumnAge.spec.ts
│ │ │ │ │ ├── ModelColumnAge.svelte
│ │ │ │ │ ├── ModelColumnLabels.svelte
│ │ │ │ │ ├── ModelColumnName.spec.ts
│ │ │ │ │ ├── ModelColumnName.svelte
│ │ │ │ │ ├── ModelColumnRecipeSelection.svelte
│ │ │ │ │ ├── ModelColumnSize.spec.ts
│ │ │ │ │ └── ModelColumnSize.svelte
│ │ │ │ ├── playground/
│ │ │ │ │ ├── ConversationColumnAction.spec.ts
│ │ │ │ │ ├── ConversationColumnAction.svelte
│ │ │ │ │ ├── PlaygroundColumnIcon.svelte
│ │ │ │ │ ├── PlaygroundColumnModel.svelte
│ │ │ │ │ ├── PlaygroundColumnName.svelte
│ │ │ │ │ ├── PlaygroundColumnRuntime.spec.ts
│ │ │ │ │ └── PlaygroundColumnRuntime.svelte
│ │ │ │ └── service/
│ │ │ │ ├── ServiceAction.spec.ts
│ │ │ │ ├── ServiceAction.svelte
│ │ │ │ ├── ServiceColumnModelName.spec.ts
│ │ │ │ ├── ServiceColumnModelName.svelte
│ │ │ │ ├── ServiceColumnName.spec.ts
│ │ │ │ ├── ServiceColumnName.svelte
│ │ │ │ ├── ServiceColumnRuntime.spec.ts
│ │ │ │ ├── ServiceColumnRuntime.svelte
│ │ │ │ ├── ServiceStatus.spec.ts
│ │ │ │ └── ServiceStatus.svelte
│ │ │ ├── main.ts
│ │ │ ├── models/
│ │ │ │ └── IRouterState.ts
│ │ │ ├── pages/
│ │ │ │ ├── Applications.svelte
│ │ │ │ ├── CreateService.spec.ts
│ │ │ │ ├── CreateService.svelte
│ │ │ │ ├── Dashboard.spec.ts
│ │ │ │ ├── Dashboard.svelte
│ │ │ │ ├── ImportModel.spec.ts
│ │ │ │ ├── ImportModel.svelte
│ │ │ │ ├── InferenceServerDetails.spec.ts
│ │ │ │ ├── InferenceServerDetails.svelte
│ │ │ │ ├── InferenceServers.spec.ts
│ │ │ │ ├── InferenceServers.svelte
│ │ │ │ ├── Model.spec.ts
│ │ │ │ ├── Model.svelte
│ │ │ │ ├── Models.spec.ts
│ │ │ │ ├── Models.svelte
│ │ │ │ ├── NewInstructLabSession.spec.ts
│ │ │ │ ├── NewInstructLabSession.svelte
│ │ │ │ ├── Playground.spec.ts
│ │ │ │ ├── Playground.svelte
│ │ │ │ ├── PlaygroundCreate.spec.ts
│ │ │ │ ├── PlaygroundCreate.svelte
│ │ │ │ ├── Playgrounds.spec.ts
│ │ │ │ ├── Playgrounds.svelte
│ │ │ │ ├── Preferences.svelte
│ │ │ │ ├── Recipe.spec.ts
│ │ │ │ ├── Recipe.svelte
│ │ │ │ ├── Recipes.spec.ts
│ │ │ │ ├── Recipes.svelte
│ │ │ │ ├── StartRecipe.spec.ts
│ │ │ │ ├── StartRecipe.svelte
│ │ │ │ ├── TuneSessions.spec.ts
│ │ │ │ ├── TuneSessions.svelte
│ │ │ │ ├── applications.ts
│ │ │ │ ├── instructlab/
│ │ │ │ │ ├── AboutInstructLab.spec.ts
│ │ │ │ │ ├── AboutInstructLab.svelte
│ │ │ │ │ ├── StartInstructLabContainer.spec.ts
│ │ │ │ │ └── StartInstructLabContainer.svelte
│ │ │ │ ├── llama-stack/
│ │ │ │ │ ├── StartLlamaStackContainer.spec.ts
│ │ │ │ │ └── StartLlamaStackContainer.svelte
│ │ │ │ └── server-information/
│ │ │ │ ├── LocalServer.spec.ts
│ │ │ │ └── LocalServer.svelte
│ │ │ ├── stores/
│ │ │ │ ├── application-states.ts
│ │ │ │ ├── catalog.ts
│ │ │ │ ├── containerProviderConnections.ts
│ │ │ │ ├── conversations.ts
│ │ │ │ ├── extensionConfiguration.ts
│ │ │ │ ├── inferenceServers.ts
│ │ │ │ ├── instructlabSessions.ts
│ │ │ │ ├── localRepositories.ts
│ │ │ │ ├── modelsInfo.spec.ts
│ │ │ │ ├── modelsInfo.ts
│ │ │ │ ├── rpcReadable.spec.ts
│ │ │ │ ├── rpcReadable.ts
│ │ │ │ ├── snippetLanguages.ts
│ │ │ │ └── tasks.ts
│ │ │ └── utils/
│ │ │ ├── categoriesUtils.ts
│ │ │ ├── client.ts
│ │ │ ├── dimensions.ts
│ │ │ ├── fileUtils.ts
│ │ │ ├── localRepositoriesUtils.ts
│ │ │ ├── printers.ts
│ │ │ ├── taskUtils.ts
│ │ │ └── versionControlUtils.ts
│ │ ├── tailwind.config.cjs
│ │ ├── tsconfig.json
│ │ └── vite.config.js
│ └── shared/
│ ├── __mocks__/
│ │ └── @podman-desktop/
│ │ └── api.js
│ ├── src/
│ │ ├── InstructlabAPI.ts
│ │ ├── LlamaStackAPI.ts
│ │ ├── Messages.ts
│ │ ├── StudioAPI.ts
│ │ ├── messages/
│ │ │ ├── MessageProxy.spec.ts
│ │ │ └── MessageProxy.ts
│ │ ├── models/
│ │ │ ├── FilterRecipesResult.ts
│ │ │ ├── IApplicationCatalog.ts
│ │ │ ├── IApplicationState.ts
│ │ │ ├── ICategory.ts
│ │ │ ├── IContainerConnectionInfo.ts
│ │ │ ├── IExtensionConfiguration.ts
│ │ │ ├── IGPUInfo.ts
│ │ │ ├── IInference.spec.ts
│ │ │ ├── IInference.ts
│ │ │ ├── ILocalModelInfo.ts
│ │ │ ├── ILocalRepository.ts
│ │ │ ├── IModelInfo.ts
│ │ │ ├── IModelOptions.ts
│ │ │ ├── IModelResponse.ts
│ │ │ ├── IPlaygroundMessage.ts
│ │ │ ├── IPlaygroundV2.ts
│ │ │ ├── IPodman.ts
│ │ │ ├── IRecipe.ts
│ │ │ ├── IRecipeModelIndex.ts
│ │ │ ├── ITask.ts
│ │ │ ├── InferenceServerConfig.ts
│ │ │ ├── McpSettings.ts
│ │ │ ├── RequestOptions.ts
│ │ │ ├── instructlab/
│ │ │ │ ├── IInstructlabContainerConfiguration.ts
│ │ │ │ ├── IInstructlabContainerInfo.ts
│ │ │ │ └── IInstructlabSession.ts
│ │ │ └── llama-stack/
│ │ │ ├── LlamaStackContainerConfiguration.ts
│ │ │ └── LlamaStackContainerInfo.ts
│ │ └── uri/
│ │ ├── Uri.spec.ts
│ │ └── Uri.ts
│ ├── tsconfig.json
│ ├── vite.config.js
│ └── vitest.config.js
├── pnpm-workspace.yaml
├── tests/
│ ├── playwright/
│ │ ├── package.json
│ │ ├── playwright.config.ts
│ │ ├── src/
│ │ │ ├── ai-lab-extension.spec.ts
│ │ │ ├── model/
│ │ │ │ ├── ai-lab-app-details-page.ts
│ │ │ │ ├── ai-lab-base-page.ts
│ │ │ │ ├── ai-lab-creating-model-service-page.ts
│ │ │ │ ├── ai-lab-dashboard-page.ts
│ │ │ │ ├── ai-lab-local-server-page.ts
│ │ │ │ ├── ai-lab-model-catalog-page.ts
│ │ │ │ ├── ai-lab-model-llamastack-page.ts
│ │ │ │ ├── ai-lab-model-service-page.ts
│ │ │ │ ├── ai-lab-navigation-bar.ts
│ │ │ │ ├── ai-lab-playground-details-page.ts
│ │ │ │ ├── ai-lab-playgrounds-page.ts
│ │ │ │ ├── ai-lab-recipes-catalog-page.ts
│ │ │ │ ├── ai-lab-running-apps-page.ts
│ │ │ │ ├── ai-lab-service-details-page.ts
│ │ │ │ ├── ai-lab-start-recipe-page.ts
│ │ │ │ ├── ai-lab-try-instructlab-page.ts
│ │ │ │ ├── podman-extension-ai-lab-details-page.ts
│ │ │ │ └── preferences-extension-ai-lab-page.ts
│ │ │ └── utils/
│ │ │ ├── aiLabHandler.ts
│ │ │ └── webviewHandler.ts
│ │ └── tsconfig.json
│ └── tmt/
│ ├── plans/
│ │ ├── ai-lab-e2e-plan-default.fmf
│ │ └── ai-lab-e2e-plan-gpu.fmf
│ ├── scripts/
│ │ ├── create-results.sh
│ │ └── install-podman.sh
│ └── tests/
│ ├── e2e-test.fmf
│ ├── instructlab-test.fmf
│ └── smoke-test.fmf
├── tools/
│ └── compute-model-sizes.sh
└── types/
├── additional.d.ts
├── mustache.d.ts
├── podman-desktop-api.d.ts
└── postman-code-generators.d.ts
SYMBOL INDEX (1020 symbols across 170 files)
FILE: eslint.config.mjs
constant TYPESCRIPT_PROJECTS (line 45) | const TYPESCRIPT_PROJECTS = ['packages/*/tsconfig.json', 'tests/*/tsconf...
FILE: packages/backend/src/extension.ts
function activate (line 24) | async function activate(extensionContext: ExtensionContext): Promise<voi...
function deactivate (line 29) | async function deactivate(): Promise<void> {
FILE: packages/backend/src/instructlab-api-impl.ts
class InstructlabApiImpl (line 25) | class InstructlabApiImpl implements InstructlabAPI {
method constructor (line 26) | constructor(private instructlabManager: InstructlabManager) {}
method getIsntructlabSessions (line 28) | async getIsntructlabSessions(): Promise<InstructlabSession[]> {
method requestCreateInstructlabContainer (line 32) | requestCreateInstructlabContainer(config: InstructlabContainerConfigur...
method routeToInstructLabContainerTerminal (line 36) | routeToInstructLabContainerTerminal(containerId: string): Promise<void> {
method getInstructlabContainerId (line 40) | getInstructlabContainerId(): Promise<string | undefined> {
FILE: packages/backend/src/llama-stack-api-impl.ts
class LlamaStackApiImpl (line 25) | class LlamaStackApiImpl implements LlamaStackAPI {
method constructor (line 26) | constructor(private llamaStackManager: LlamaStackManager) {}
method requestcreateLlamaStackContainerss (line 28) | requestcreateLlamaStackContainerss(config: LlamaStackContainerConfigur...
method routeToLlamaStackContainerTerminal (line 32) | routeToLlamaStackContainerTerminal(containerId: string): Promise<void> {
method getLlamaStackContainersInfo (line 36) | getLlamaStackContainersInfo(): Promise<LlamaStackContainers | undefine...
FILE: packages/backend/src/managers/GPUManager.ts
class GPUManager (line 28) | class GPUManager extends Publisher<IGPUInfo[]> implements Disposable {
method constructor (line 31) | constructor(rpcExtension: RpcExtension) {
method dispose (line 37) | dispose(): void {}
method getAll (line 39) | getAll(): IGPUInfo[] {
method collectGPUs (line 43) | async collectGPUs(): Promise<IGPUInfo[]> {
method getVendor (line 52) | protected getVendor(raw: string): GPUVendor {
FILE: packages/backend/src/managers/SnippetManager.ts
type Generator (line 29) | type Generator = (requestOptions: RequestOptions) => Promise<string>;
class SnippetManager (line 31) | class SnippetManager extends Publisher<Language[]> implements Disposable {
method constructor (line 35) | constructor(
method addVariant (line 45) | addVariant(key: string, variant: string, generator: Generator): void {
method getLanguageList (line 56) | getLanguageList(): Language[] {
method generate (line 60) | async generate(requestOptions: RequestOptions, language: string, varia...
method init (line 81) | init(): void {
method dispose (line 90) | dispose(): void {}
FILE: packages/backend/src/managers/TaskRunner.ts
class TaskRunner (line 22) | class TaskRunner {
method constructor (line 23) | constructor(private taskRegistry: TaskRegistry) {}
method runAsTask (line 25) | async runAsTask<T>(
method failFastSubtasks (line 61) | private failFastSubtasks(labels: Record<string, string>): void {
FILE: packages/backend/src/managers/apiServer.spec.ts
class TestApiServer (line 54) | class TestApiServer extends ApiServer {
method getListener (line 55) | public override getListener(): Server | undefined {
FILE: packages/backend/src/managers/apiServer.ts
constant SHOW_API_ERROR_COMMAND (line 49) | const SHOW_API_ERROR_COMMAND = 'ai-lab.show-api-error';
constant PREFERENCE_RANDOM_PORT (line 51) | const PREFERENCE_RANDOM_PORT = 0;
type ListModelResponse (line 53) | type ListModelResponse = components['schemas']['ListModelResponse'];
type Message (line 54) | type Message = components['schemas']['Message'];
type ProcessModelResponse (line 55) | type ProcessModelResponse = components['schemas']['ProcessModelResponse'];
type SwaggerRequest (line 57) | interface SwaggerRequest extends Request {
function asListModelResponse (line 61) | function asListModelResponse(model: ModelInfo): ListModelResponse {
function toDigest (line 73) | function toDigest(name: string, sha256?: string): string {
function asProcessModelResponse (line 77) | function asProcessModelResponse(model: ModelInfo): ProcessModelResponse {
constant LISTENING_ADDRESS (line 86) | const LISTENING_ADDRESS = '0.0.0.0';
type ChatCompletionOptions (line 88) | interface ChatCompletionOptions {
class ApiServer (line 97) | class ApiServer implements Disposable {
method constructor (line 100) | constructor(
method getListener (line 109) | protected getListener(): Server | undefined {
method init (line 113) | async init(): Promise<void> {
method displayApiError (line 196) | displayApiError(port: number): void {
method getFile (line 213) | private getFile(filepath: string): string {
method getSpecFile (line 223) | getSpecFile(): string {
method getPackageFile (line 227) | getPackageFile(): string {
method dispose (line 231) | dispose(): void {
method doErr (line 235) | private doErr(res: Response, message: string, err: unknown): void {
method getSpec (line 242) | getSpec(_req: Request, res: Response): void {
method getVersion (line 255) | getVersion(_req: Request, res: Response): void {
method getModels (line 269) | getModels(_req: Request, res: Response): void {
method streamLine (line 281) | private streamLine(res: Response, obj: unknown): void {
method sendResult (line 285) | private sendResult(res: Response, obj: unknown, code: number, stream: ...
method pullModel (line 294) | pullModel(req: Request, res: Response): void {
method show (line 372) | show(req: Request, res: Response): void {
method makeServerAvailable (line 379) | private async makeServerAvailable(modelInfo: ModelInfo): Promise<Infer...
method openAIChatCompletions (line 415) | private async openAIChatCompletions(options: ChatCompletionOptions): P...
method checkModelAvailability (line 439) | private checkModelAvailability(modelName: string): ModelInfo {
method generate (line 455) | generate(req: Request, res: Response): void {
method chat (line 534) | chat(req: Request, res: Response): void {
method ps (line 602) | ps(_req: Request, res: Response): void {
FILE: packages/backend/src/managers/application/applicationManager.spec.ts
function getInitializedApplicationManager (line 154) | function getInitializedApplicationManager(): ApplicationManager {
FILE: packages/backend/src/managers/application/applicationManager.ts
class ApplicationManager (line 60) | class ApplicationManager extends Publisher<ApplicationState[]> implement...
method constructor (line 66) | constructor(
method requestPullApplication (line 83) | async requestPullApplication(options: ApplicationOptions): Promise<str...
method pullApplication (line 119) | async pullApplication(options: ApplicationOptions, labels: Record<stri...
method initApplication (line 180) | private async initApplication(options: ApplicationOptions, labels: Rec...
method runApplication (line 236) | protected async runApplication(podInfo: PodInfo, labels?: { [key: stri...
method waitContainerIsRunning (line 256) | protected async waitContainerIsRunning(engineId: string, container: Po...
method createApplicationPod (line 269) | protected async createApplicationPod(
method createContainerAndAttachToPod (line 293) | protected async createContainerAndAttachToPod(
method createPod (line 368) | protected async createPod(options: ApplicationOptions, images: RecipeI...
method stopApplication (line 434) | async stopApplication(recipeId: string, modelId: string): Promise<PodI...
method startApplication (line 466) | async startApplication(recipeId: string, modelId: string): Promise<voi...
method refresh (line 476) | protected refresh(): void {
method init (line 492) | init(): void {
method adoptPod (line 525) | protected adoptPod(pod: PodInfo): void {
method forgetPodById (line 551) | protected forgetPodById(podId: string): void {
method checkPodsHealth (line 581) | protected async checkPodsHealth(): Promise<void> {
method updateApplicationState (line 611) | protected updateApplicationState(recipeId: string, modelId: string, st...
method getApplicationsState (line 616) | getApplicationsState(): ApplicationState[] {
method clearTasks (line 620) | protected clearTasks(recipeId: string, modelId: string): void {
method removeApplication (line 633) | async removeApplication(recipeId: string, modelId: string): Promise<vo...
method restartApplication (line 653) | async restartApplication(connection: ContainerProviderConnection, reci...
method getApplicationPorts (line 682) | async getApplicationPorts(recipeId: string, modelId: string): Promise<...
method getApplicationPod (line 690) | protected async getApplicationPod(recipeId: string, modelId: string): ...
method hasApplicationPod (line 698) | protected async hasApplicationPod(recipeId: string, modelId: string): ...
method findPod (line 706) | protected async findPod(recipeId: string, modelId: string): Promise<Po...
method dispose (line 713) | dispose(): void {
FILE: packages/backend/src/managers/catalogManager.ts
constant USER_CATALOG (line 36) | const USER_CATALOG = 'user-catalog.json';
class CatalogManager (line 38) | class CatalogManager extends Publisher<ApplicationCatalog> implements Di...
method constructor (line 46) | constructor(
method init (line 63) | async init(): Promise<void> {
method loadDefaultCatalog (line 80) | private loadDefaultCatalog(): void {
method onUserCatalogUpdate (line 85) | private onUserCatalogUpdate(content: unknown): void {
method notify (line 151) | override notify(): void {
method dispose (line 156) | dispose(): void {
method getCatalog (line 161) | public getCatalog(): ApplicationCatalog {
method getModels (line 165) | public getModels(): ModelInfo[] {
method getModelById (line 169) | public getModelById(modelId: string): ModelInfo {
method getModelByName (line 177) | public getModelByName(modelName: string): ModelInfo {
method getRecipes (line 185) | public getRecipes(): Recipe[] {
method getRecipeById (line 189) | public getRecipeById(recipeId: string): Recipe {
method importUserModels (line 201) | async importUserModels(localModels: LocalModelImportInfo[]): Promise<v...
method removeUserModel (line 253) | async removeUserModel(modelId: string): Promise<void> {
method getUserCatalogPath (line 281) | private getUserCatalogPath(): string {
method filterRecipes (line 285) | public filterRecipes(filters: RecipeFilters): FilterRecipesResult {
FILE: packages/backend/src/managers/gitManager.ts
type GitCloneInfo (line 24) | interface GitCloneInfo {
class GitManager (line 30) | class GitManager {
method cloneRepository (line 31) | async cloneRepository(gitCloneInfo: GitCloneInfo): Promise<void> {
method getRepositoryRemotes (line 44) | async getRepositoryRemotes(directory: string): Promise<
method getRepositoryStatus (line 74) | async getRepositoryStatus(directory: string): Promise<{
method getCurrentCommit (line 108) | async getCurrentCommit(directory: string): Promise<string> {
method pull (line 112) | async pull(directory: string): Promise<void> {
method processCheckout (line 120) | async processCheckout(gitCloneInfo: GitCloneInfo): Promise<void> {
method isRepositoryUpToDate (line 166) | async isRepositoryUpToDate(
method getTrackingBranch (line 241) | async getTrackingBranch(directory: string, branch: string): Promise<st...
method getBehindAhead (line 255) | async getBehindAhead(dir: string, localBranch: string): Promise<{ behi...
method getTagCommitId (line 302) | async getTagCommitId(directory: string, tagName: string): Promise<stri...
FILE: packages/backend/src/managers/inference/inferenceManager.ts
class InferenceManager (line 39) | class InferenceManager extends Publisher<InferenceServer[]> implements D...
method constructor (line 48) | constructor(
method init (line 65) | init(): void {
method isInitialize (line 74) | public isInitialize(): boolean {
method dispose (line 81) | dispose(): void {
method cleanDisposables (line 90) | private cleanDisposables(): void {
method getServers (line 97) | public getServers(): InferenceServer[] {
method getRegisteredProviders (line 105) | public getRegisteredProviders(): InferenceType[] {
method get (line 114) | public get(containerId: string): InferenceServer | undefined {
method findServerByModel (line 122) | public findServerByModel(model: ModelInfo): InferenceServer | undefined {
method requestCreateInferenceServer (line 143) | requestCreateInferenceServer(config: InferenceServerConfig): string {
method createInferenceServer (line 178) | async createInferenceServer(config: InferenceServerConfig): Promise<st...
method updateServerStatus (line 244) | private updateServerStatus(engineId: string, containerId: string): void {
method watchContainerStatus (line 278) | private watchContainerStatus(engineId: string, containerId: string): v...
method watchMachineEvent (line 309) | private watchMachineEvent(_event: PodmanConnectionEvent): void {
method watchContainerStart (line 317) | private watchContainerStart(event: ContainerEvent): void {
method retryableRefresh (line 343) | private retryableRefresh(retry: number = 3): void {
method refreshInferenceServers (line 368) | private async refreshInferenceServers(): Promise<void> {
method removeInferenceServer (line 417) | private removeInferenceServer(containerId: string): void {
method deleteInferenceServer (line 426) | async deleteInferenceServer(containerId: string): Promise<void> {
method startInferenceServer (line 457) | async startInferenceServer(containerId: string): Promise<void> {
method stopInferenceServer (line 486) | async stopInferenceServer(containerId: string): Promise<void> {
method setInferenceServerStatus (line 518) | private setInferenceServerStatus(containerId: string, status: Inferenc...
FILE: packages/backend/src/managers/instructlab/instructlabManager.spec.ts
function waitTasks (line 99) | async function waitTasks(id: string, nb: number): Promise<Task[]> {
FILE: packages/backend/src/managers/instructlab/instructlabManager.ts
constant INSTRUCTLAB_CONTAINER_LABEL (line 39) | const INSTRUCTLAB_CONTAINER_LABEL = 'ai-lab-instructlab-container';
class InstructlabManager (line 41) | class InstructlabManager implements Disposable {
method constructor (line 46) | constructor(
method init (line 57) | init(): void {
method dispose (line 63) | dispose(): void {
method refreshInstructlabContainer (line 68) | private async refreshInstructlabContainer(id?: string): Promise<void> {
method watchMachineEvent (line 80) | private async watchMachineEvent(event: PodmanConnectionEvent): Promise...
method onStartContainerEvent (line 86) | private async onStartContainerEvent(event: ContainerEvent): Promise<vo...
method onStopContainerEvent (line 90) | private onStopContainerEvent(event: ContainerEvent): void {
method getSessions (line 98) | public getSessions(): InstructlabSession[] {
method getInstructLabContainer (line 119) | async getInstructLabContainer(): Promise<string | undefined> {
method requestCreateInstructlabContainer (line 131) | async requestCreateInstructlabContainer(config: InstructlabContainerCo...
method createInstructlabContainer (line 189) | async createInstructlabContainer(
method getInstructLabContainerFolder (line 259) | private async getInstructLabContainerFolder(): Promise<string> {
FILE: packages/backend/src/managers/llama-stack/llamaStackManager.spec.ts
class TestLlamaStackManager (line 66) | class TestLlamaStackManager extends LlamaStackManager {
method refreshLlamaStackContainers (line 67) | public override async refreshLlamaStackContainers(): Promise<void> {
method getContainersInfo (line 71) | public override getContainersInfo(): LlamaStackContainers | undefined {
constant LLAMA_STACK_CONTAINER_RUNNING (line 105) | const LLAMA_STACK_CONTAINER_RUNNING = {
constant LLAMA_STACK_CONTAINER_STOPPED (line 114) | const LLAMA_STACK_CONTAINER_STOPPED = {
constant NON_LLAMA_STACK_CONTAINER (line 119) | const NON_LLAMA_STACK_CONTAINER = { Id: 'dummyId' } as unknown as Contai...
constant NO_OP_DISPOSABLE (line 121) | const NO_OP_DISPOSABLE = {
function waitTasks (line 165) | async function waitTasks(id: string, nb: number): Promise<Task[]> {
FILE: packages/backend/src/managers/llama-stack/llamaStackManager.ts
constant LLAMA_STACK_CONTAINER_LABEL (line 49) | const LLAMA_STACK_CONTAINER_LABEL = 'ai-lab-llama-stack-container';
constant LLAMA_STACK_API_PORT_LABEL (line 50) | const LLAMA_STACK_API_PORT_LABEL = 'ai-lab-llama-stack-api-port';
constant LLAMA_STACK_PLAYGROUND_PORT_LABEL (line 51) | const LLAMA_STACK_PLAYGROUND_PORT_LABEL = 'ai-lab-llama-stack-playground...
constant SECOND (line 52) | const SECOND: number = 1_000_000_000;
function getLocalIPAddress (line 58) | async function getLocalIPAddress(connection: ContainerProviderConnection...
class LlamaStackManager (line 80) | class LlamaStackManager implements Disposable {
method constructor (line 87) | constructor(
method init (line 101) | init(): void {
method dispose (line 107) | dispose(): void {
method watchMachineEvent (line 112) | private async watchMachineEvent(event: PodmanConnectionEvent): Promise...
method onStartContainerEvent (line 121) | private async onStartContainerEvent(): Promise<void> {
method onStopContainerEvent (line 125) | private async onStopContainerEvent(event: ContainerEvent): Promise<voi...
method getLlamaStackContainers (line 144) | async getLlamaStackContainers(): Promise<LlamaStackContainers | undefi...
method refreshLlamaStackContainers (line 156) | protected async refreshLlamaStackContainers(): Promise<void> {
method requestcreateLlamaStackContainerss (line 205) | async requestcreateLlamaStackContainerss(config: LlamaStackContainerCo...
method startBoth (line 237) | private async startBoth(
method createPlaygroundFromServer (line 288) | private async createPlaygroundFromServer(
method createBoth (line 333) | private async createBoth(
method createLlamaStackContainers (line 373) | async createLlamaStackContainers(
method createServerContainer (line 400) | private async createServerContainer(
method waitLlamaStackServerHealthy (line 460) | async waitLlamaStackServerHealthy(
method registerModels (line 484) | async registerModels(
method createPlaygroundContainer (line 513) | private async createPlaygroundContainer(
method getLlamaStackContainersFolder (line 563) | private async getLlamaStackContainersFolder(): Promise<string> {
method getContainersInfo (line 570) | protected getContainersInfo(): LlamaStackContainers | undefined {
FILE: packages/backend/src/managers/modelsManager.spec.ts
method completed (line 99) | get completed(): boolean {
function mockFiles (line 171) | function mockFiles(now: Date): void {
method getModels (line 212) | getModels(): ModelInfo[] {
method getModels (line 267) | getModels(): ModelInfo[] {
method getModels (line 308) | getModels(): ModelInfo[] {
method getModels (line 367) | getModels(): ModelInfo[] {
method getModels (line 673) | getModels(): ModelInfo[] {
method getModels (line 710) | getModels(): ModelInfo[] {
method getModels (line 743) | getModels(): ModelInfo[] {
method getModels (line 775) | getModels(): ModelInfo[] {
method getModels (line 814) | getModels(): ModelInfo[] {
FILE: packages/backend/src/managers/modelsManager.ts
class ModelsManager (line 45) | class ModelsManager implements Disposable {
method constructor (line 51) | constructor(
method init (line 66) | async init(): Promise<void> {
method dispose (line 81) | dispose(): void {
method loadLocalModels (line 86) | async loadLocalModels(): Promise<void> {
method getModelsInfo (line 97) | getModelsInfo(): ModelInfo[] {
method sendModelsInfo (line 101) | async sendModelsInfo(): Promise<void> {
method getLocalModelsFromDisk (line 106) | async getLocalModelsFromDisk(): Promise<void> {
method isModelOnDisk (line 112) | isModelOnDisk(modelId: string): boolean {
method getLocalModelInfo (line 116) | getLocalModelInfo(modelId: string): LocalModelInfo {
method getModelInfo (line 124) | getModelInfo(modelId: string): ModelInfo {
method getLocalModelPath (line 132) | getLocalModelPath(modelId: string): string {
method deleteModel (line 136) | async deleteModel(modelId: string): Promise<void> {
method deleteRemoteModel (line 178) | private async deleteRemoteModel(modelInfo: ModelInfo): Promise<void> {
method requestDownloadModel (line 210) | async requestDownloadModel(model: ModelInfo, labels?: { [key: string]:...
method onDownloadUploadEvent (line 248) | private async onDownloadUploadEvent(event: BaseEvent, action: 'downloa...
method createDownloader (line 296) | public createDownloader(model: ModelInfo, abortSignal: AbortSignal): D...
method createDownloadTask (line 313) | private createDownloadTask(model: ModelInfo, labels?: { [key: string]:...
method downloadModel (line 330) | private async downloadModel(model: ModelInfo, task: Task): Promise<str...
method uploadModelToPodmanMachine (line 375) | async uploadModelToPodmanMachine(
method updateModelInfos (line 402) | private async updateModelInfos(): Promise<void> {
method getModelMetadata (line 410) | async getModelMetadata(modelId: string): Promise<Record<string, unknow...
FILE: packages/backend/src/managers/monitoringManager.spec.ts
function simplifiedCallback (line 50) | function simplifiedCallback(callback: (arg: ContainerStatsInfo) => void,...
FILE: packages/backend/src/managers/monitoringManager.ts
type StatsInfo (line 23) | interface StatsInfo {
type StatsHistory (line 29) | interface StatsHistory {
constant MAX_AGE (line 34) | const MAX_AGE: number = 5 * 60 * 1000;
class MonitoringManager (line 36) | class MonitoringManager extends Publisher<StatsHistory[]> implements Dis...
method constructor (line 40) | constructor(rpcExtension: RpcExtension) {
method monitor (line 46) | async monitor(containerId: string, engineId: string): Promise<Disposab...
method push (line 59) | private push(containerId: string, statsInfo: ContainerStatsInfo): void {
method clear (line 81) | clear(containerId: string): void {
method getStats (line 85) | getStats(): StatsHistory[] {
method dispose (line 89) | dispose(): void {
FILE: packages/backend/src/managers/playground/McpServerManager.ts
constant MCP_SETTINGS (line 28) | const MCP_SETTINGS = 'mcp-settings.json';
class McpServerManager (line 30) | class McpServerManager extends Publisher<McpSettings> implements Disposa...
method constructor (line 35) | constructor(
method init (line 51) | init(): void {
method onMcpSettingsUpdated (line 55) | private onMcpSettingsUpdated(mcpSettings: McpSettings): void {
method getMcpSettings (line 68) | getMcpSettings(): McpSettings {
method toMcpClients (line 72) | async toMcpClients(): Promise<McpClient[]> {
method dispose (line 77) | dispose(): void {
FILE: packages/backend/src/managers/playground/aiSdk.spec.ts
function createTestModel (line 356) | function createTestModel({
FILE: packages/backend/src/managers/playground/aiSdk.ts
function toCoreMessage (line 44) | function toCoreMessage(...messages: Message[]): ModelMessage[] {
class AiStreamProcessor (line 83) | class AiStreamProcessor<TOOLS extends ToolSet> {
method constructor (line 88) | constructor(
FILE: packages/backend/src/managers/playgroundV2Manager.ts
class PlaygroundV2Manager (line 37) | class PlaygroundV2Manager implements Disposable {
method constructor (line 40) | constructor(
method deleteConversation (line 51) | deleteConversation(conversationId: string): void {
method requestCreatePlayground (line 60) | async requestCreatePlayground(name: string, model: ModelInfo): Promise...
method createPlayground (line 109) | async createPlayground(name: string, model: ModelInfo, trackingId: str...
method submitSystemPrompt (line 144) | private submitSystemPrompt(conversationId: string, content: string): v...
method setSystemPrompt (line 162) | setSystemPrompt(conversationId: string, content: string | undefined): ...
method submit (line 192) | async submit(conversationId: string, userInput: string, options?: Mode...
method getConversations (line 278) | getConversations(): Conversation[] {
method getFreeName (line 282) | private getFreeName(): string {
method isNameFree (line 292) | private isNameFree(name: string): boolean {
method dispose (line 296) | dispose(): void {
FILE: packages/backend/src/managers/podmanConnection.spec.ts
function getListeners (line 293) | async function getListeners(): Promise<{
FILE: packages/backend/src/managers/podmanConnection.ts
type PodmanConnectionEvent (line 41) | interface PodmanConnectionEvent {
type PodmanRunOptions (line 45) | interface PodmanRunOptions extends RunOptions {
class PodmanConnection (line 49) | class PodmanConnection extends Publisher<ContainerProviderConnectionInfo...
method constructor (line 57) | constructor(rpcExtension: RpcExtension) {
method execute (line 74) | execute(connection: ContainerProviderConnection, args: string[], optio...
method executeSSH (line 102) | executeSSH(connection: ContainerProviderConnection, args: string[], op...
method executeLegacy (line 112) | protected executeLegacy(args: string[], options?: RunOptions): Promise...
method getNameLegacyCompatibility (line 124) | protected getNameLegacyCompatibility(connection: ContainerProviderConn...
method getContainerProviderConnections (line 128) | getContainerProviderConnections(): ContainerProviderConnection[] {
method getContainerProviderConnectionInfo (line 135) | getContainerProviderConnectionInfo(): ContainerProviderConnectionInfo[] {
method init (line 155) | init(): void {
method dispose (line 162) | dispose(): void {
method getProviderContainerConnection (line 171) | protected getProviderContainerConnection(connection: ContainerProvider...
method refreshProviders (line 182) | protected refreshProviders(): void {
method listen (line 199) | private listen(): void {
method parseVMType (line 248) | protected parseVMType(vmtype: string | undefined): VMType {
method getVMType (line 262) | async getVMType(name?: string): Promise<VMType> {
method getContainerProviderConnection (line 284) | getContainerProviderConnection(connection: ContainerProviderConnection...
method findRunningContainerProviderConnection (line 292) | findRunningContainerProviderConnection(): ContainerProviderConnection ...
method getConnectionByEngineId (line 304) | async getConnectionByEngineId(engineId: string): Promise<ContainerProv...
method checkContainerConnectionStatusAndResources (line 315) | async checkContainerConnectionStatusAndResources(
FILE: packages/backend/src/managers/recipes/BuilderManager.ts
class BuilderManager (line 39) | class BuilderManager implements Disposable {
method constructor (line 42) | constructor(private taskRegistry: TaskRegistry) {}
method dispose (line 47) | dispose(): void {
method build (line 52) | async build(
FILE: packages/backend/src/managers/recipes/PodManager.ts
type PodEvent (line 23) | interface PodEvent {
class PodManager (line 27) | class PodManager implements Disposable {
method dispose (line 42) | dispose(): void {
method init (line 46) | init(): void {
method getAllPods (line 74) | getAllPods(): Promise<PodInfo[]> {
method findPodByLabelsValues (line 82) | async findPodByLabelsValues(requestedLabels: Record<string, string>): ...
method getPodsWithLabels (line 103) | async getPodsWithLabels(labels: string[]): Promise<PodInfo[]> {
method getHealth (line 114) | async getHealth(pod: PodInfo): Promise<PodHealth> {
method getPodById (line 130) | private async getPodById(id: string): Promise<PodInfo> {
method getPod (line 137) | async getPod(engineId: string, Id: string): Promise<PodInfo> {
method stopPod (line 144) | async stopPod(engineId: string, id: string): Promise<void> {
method removePod (line 148) | async removePod(engineId: string, id: string): Promise<void> {
method startPod (line 152) | async startPod(engineId: string, id: string): Promise<void> {
method createPod (line 156) | async createPod(podOptions: PodCreateOptions): Promise<{ engineId: str...
FILE: packages/backend/src/managers/recipes/RecipeManager.spec.ts
function getInitializedRecipeManager (line 130) | async function getInitializedRecipeManager(): Promise<RecipeManager> {
FILE: packages/backend/src/managers/recipes/RecipeManager.ts
type AIContainers (line 36) | interface AIContainers {
class RecipeManager (line 41) | class RecipeManager implements Disposable {
method constructor (line 42) | constructor(
method dispose (line 51) | dispose(): void {}
method init (line 53) | init(): void {}
method doCheckout (line 55) | private async doCheckout(gitCloneInfo: GitCloneInfo, labels?: { [id: s...
method cloneRecipe (line 76) | public async cloneRecipe(recipe: Recipe, labels?: { [key: string]: str...
method buildRecipe (line 99) | public async buildRecipe(options: ApplicationOptions, labels?: { [key:...
method getConfigAndFilterContainers (line 172) | private getConfigAndFilterContainers(
method filterContainers (line 214) | private filterContainers(aiConfig: AIConfig): ContainerConfig[] {
method getConfiguration (line 220) | private getConfiguration(recipeBaseDir: string | undefined, localFolde...
FILE: packages/backend/src/managers/snippets/java-okhttp-snippet.ts
function javaOkHttpGenerator (line 22) | async function javaOkHttpGenerator(requestOptions: RequestOptions): Prom...
FILE: packages/backend/src/managers/snippets/python-langchain-snippet.ts
function pythonLangChainGenerator (line 22) | async function pythonLangChainGenerator(requestOptions: RequestOptions):...
FILE: packages/backend/src/managers/snippets/quarkus-snippet.ts
constant SUFFIX_LENGTH (line 23) | const SUFFIX_LENGTH = '/chat/completions'.length;
constant METADATA_URL (line 25) | const METADATA_URL =
function getQuarkusLangchain4jVersion (line 30) | async function getQuarkusLangchain4jVersion(): Promise<string> {
function quarkusLangchain4Jgenerator (line 39) | async function quarkusLangchain4Jgenerator(requestOptions: RequestOption...
FILE: packages/backend/src/models/AIConfig.ts
type ContainerConfig (line 22) | interface ContainerConfig {
type AIConfigFormat (line 34) | enum AIConfigFormat {
type AIConfig (line 37) | interface AIConfig {
type AIConfigFile (line 44) | interface AIConfigFile {
function isString (line 49) | function isString(value: unknown): value is string {
function assertString (line 53) | function assertString(value: unknown): string {
function parseYamlFile (line 58) | function parseYamlFile(filepath: string, defaultArch: string): AIConfig {
FILE: packages/backend/src/models/ApplicationOptions.ts
type ApplicationOptions (line 23) | type ApplicationOptions = ApplicationOptionsDefault | ApplicationOptions...
type ApplicationOptionsDefault (line 25) | interface ApplicationOptionsDefault {
type ApplicationOptionsWithModelInference (line 31) | type ApplicationOptionsWithModelInference = ApplicationOptionsDefault & {
function isApplicationOptionsWithModelInference (line 35) | function isApplicationOptionsWithModelInference(
FILE: packages/backend/src/models/HuggingFaceModelHandler.spec.ts
method getModels (line 51) | getModels(): ModelInfo[] {
FILE: packages/backend/src/models/HuggingFaceModelHandler.ts
function parseURL (line 27) | function parseURL(url: string): { repo: string; revision?: string } | un...
class HuggingFaceDownloader (line 35) | class HuggingFaceDownloader extends Downloader {
method constructor (line 38) | constructor(
method getTarget (line 47) | override getTarget(): string {
method perform (line 51) | async perform(id: string): Promise<void> {
class HuggingFaceModelHandler (line 87) | class HuggingFaceModelHandler extends ModelHandler {
method constructor (line 88) | constructor(modelsManager: ModelsManager) {
method accept (line 92) | accept(url: string): boolean {
method createDownloader (line 96) | createDownloader(model: ModelInfo, abortSignal: AbortSignal): Download...
method deleteModel (line 104) | async deleteModel(model: ModelInfo): Promise<void> {
method dispose (line 112) | dispose(): void {}
method getLocalModelsFromDisk (line 114) | async getLocalModelsFromDisk(): Promise<void> {
FILE: packages/backend/src/models/ModelHandler.ts
method constructor (line 30) | protected constructor(name: string, modelsManager: ModelsManager) {
FILE: packages/backend/src/models/TaskRunner.ts
type RunAsTaskOptions (line 19) | interface RunAsTaskOptions {
type TaskRunnerTools (line 31) | interface TaskRunnerTools {
FILE: packages/backend/src/models/URLModelHandler.ts
class URLModelHandler (line 28) | class URLModelHandler extends ModelHandler {
method constructor (line 31) | constructor(
method dispose (line 42) | override dispose(): void {
method accept (line 46) | override accept(url: string): boolean {
method createDownloader (line 50) | override createDownloader(model: ModelInfo, abortSignal: AbortSignal):...
method getLocalModelsFromDisk (line 56) | override async getLocalModelsFromDisk(): Promise<void> {
method deleteModel (line 97) | async deleteModel(model: ModelInfo): Promise<void> {
FILE: packages/backend/src/models/baseEvent.ts
type BaseEvent (line 19) | interface BaseEvent {
type CompletionEvent (line 25) | interface CompletionEvent extends BaseEvent {
type ProgressEvent (line 30) | interface ProgressEvent extends BaseEvent {
FILE: packages/backend/src/registries/ApplicationRegistry.ts
class ApplicationRegistry (line 21) | class ApplicationRegistry<T extends RecipeModelIndex> {
method keys (line 24) | keys(): RecipeModelIndex[] {
method has (line 28) | has(recipeModel: RecipeModelIndex): boolean {
method delete (line 32) | delete(recipeModel: RecipeModelIndex): boolean {
method values (line 36) | values(): IterableIterator<T> {
method get (line 40) | get(recipeModel: RecipeModelIndex): T {
method set (line 46) | set(recipeModel: RecipeModelIndex, value: T): void {
method clear (line 50) | clear(): void {
method hash (line 54) | private hash(recipeModel: RecipeModelIndex): string {
FILE: packages/backend/src/registries/CancellationTokenRegistry.ts
class CancellationTokenRegistry (line 20) | class CancellationTokenRegistry implements Disposable {
method constructor (line 24) | constructor() {
method createCancellationTokenSource (line 33) | createCancellationTokenSource(func?: () => void): number {
method getCancellationTokenSource (line 48) | getCancellationTokenSource(id: number): CancellationTokenSource | unde...
method hasCancellationTokenSource (line 55) | hasCancellationTokenSource(id: number): boolean {
method cancel (line 59) | cancel(tokenId: number): void {
method delete (line 66) | delete(tokenId: number): void {
method dispose (line 70) | dispose(): void {
FILE: packages/backend/src/registries/ConfigurationRegistry.ts
constant CONFIGURATION_SECTIONS (line 25) | const CONFIGURATION_SECTIONS: string[] = [
constant API_PORT_DEFAULT (line 36) | const API_PORT_DEFAULT = 10434;
class ConfigurationRegistry (line 38) | class ConfigurationRegistry extends Publisher<ExtensionConfiguration> im...
method constructor (line 43) | constructor(
method getExtensionConfiguration (line 53) | getExtensionConfiguration(): ExtensionConfiguration {
method getPodmanDesktopVersion (line 66) | getPodmanDesktopVersion(): string {
method getFieldName (line 70) | private getFieldName(section: string): keyof Partial<ExtensionConfigur...
method updateExtensionConfiguration (line 74) | async updateExtensionConfiguration(update: Partial<ExtensionConfigurat...
method getModelsPath (line 85) | private getModelsPath(): string {
method dispose (line 93) | dispose(): void {
method init (line 97) | init(): void {
FILE: packages/backend/src/registries/ContainerRegistry.ts
type Subscriber (line 20) | type Subscriber = {
type ContainerEvent (line 25) | interface ContainerEvent {
type ContainerHealthy (line 29) | interface ContainerHealthy {
class ContainerRegistry (line 33) | class ContainerRegistry implements podmanDesktopApi.Disposable {
method init (line 47) | init(): void {
method dispose (line 76) | dispose(): void {
method subscribe (line 80) | subscribe(containerId: string, callback: (status: string) => void): po...
FILE: packages/backend/src/registries/ConversationRegistry.ts
class ConversationRegistry (line 32) | class ConversationRegistry extends Publisher<Conversation[]> implements ...
method constructor (line 36) | constructor(rpcExtension: RpcExtension) {
method getUniqueId (line 42) | getUniqueId(): string {
method removeMessage (line 51) | removeMessage(conversationId: string, messageId: string): void {
method update (line 64) | update(conversationId: string, messageId: string, message: Partial<Cha...
method deleteConversation (line 80) | deleteConversation(id: string): void {
method createConversation (line 85) | createConversation(name: string, modelId: string): string {
method completeMessage (line 106) | completeMessage(conversationId: string, messageId: string): void {
method setUsage (line 126) | setUsage(conversationId: string, usage: ModelUsage): void {
method textDelta (line 141) | textDelta(conversationId: string, messageId: string, delta: string): v...
method toolResult (line 156) | toolResult(conversationId: string, toolCallId: string, toolResult: str...
method submit (line 183) | submit(conversationId: string, message: Message): void {
method dispose (line 194) | dispose(): void {
method get (line 198) | get(conversationId: string): Conversation {
method getAll (line 204) | getAll(): Conversation[] {
FILE: packages/backend/src/registries/InferenceProviderRegistry.ts
class InferenceProviderRegistry (line 25) | class InferenceProviderRegistry extends Publisher<string[]> {
method constructor (line 27) | constructor(rpcExtension: RpcExtension) {
method register (line 32) | register(provider: InferenceProvider): Disposable {
method unregister (line 41) | unregister(name: string): void {
method getAll (line 45) | getAll(): InferenceProvider[] {
method getByType (line 49) | getByType(type: InferenceType): InferenceProvider[] {
method get (line 53) | get(name: string): InferenceProvider {
FILE: packages/backend/src/registries/LocalRepositoryRegistry.ts
class LocalRepositoryRegistry (line 31) | class LocalRepositoryRegistry extends Publisher<LocalRepository[]> imple...
method constructor (line 36) | constructor(
method dispose (line 44) | dispose(): void {
method init (line 48) | init(): void {
method register (line 56) | register(localRepository: LocalRepository): Disposable {
method unregister (line 65) | unregister(path: string): void {
method deleteLocalRepository (line 70) | async deleteLocalRepository(path: string): Promise<void> {
method getLocalRepositories (line 76) | getLocalRepositories(): LocalRepository[] {
method loadLocalRecipeRepositories (line 80) | private loadLocalRecipeRepositories(recipes: Recipe[]): void {
FILE: packages/backend/src/registries/ModelHandlerRegistry.ts
class ModelHandlerRegistry (line 24) | class ModelHandlerRegistry extends Publisher<string[]> {
method constructor (line 26) | constructor(rpcExtension: RpcExtension) {
method register (line 31) | register(provider: ModelHandler): Disposable {
method unregister (line 40) | unregister(provider: ModelHandler): void {
method getAll (line 45) | getAll(): ModelHandler[] {
method findModelHandler (line 49) | findModelHandler(url: string): ModelHandler | undefined {
FILE: packages/backend/src/registries/NavigationRegistry.ts
constant RECIPE_START_ROUTE (line 22) | const RECIPE_START_ROUTE = 'recipe.start';
constant RECIPE_START_NAVIGATE_COMMAND (line 23) | const RECIPE_START_NAVIGATE_COMMAND = 'ai-lab.navigation.recipe.start';
constant INFERENCE_CREATE_ROUTE (line 25) | const INFERENCE_CREATE_ROUTE = 'inference.create';
constant INFERENCE_CREATE_NAVIGATE_COMMAND (line 26) | const INFERENCE_CREATE_NAVIGATE_COMMAND = 'ai-lab.navigation.inference.c...
class NavigationRegistry (line 28) | class NavigationRegistry implements Disposable {
method constructor (line 32) | constructor(
method init (line 37) | init(): void {
method readRoute (line 60) | public readRoute(): string | undefined {
method dispose (line 66) | dispose(): void {
method updateRoute (line 70) | protected async updateRoute(route: string): Promise<void> {
method navigateToRecipeStart (line 76) | public async navigateToRecipeStart(recipeId: string, trackingId: strin...
method navigateToInferenceCreate (line 80) | public async navigateToInferenceCreate(trackingId: string): Promise<vo...
FILE: packages/backend/src/registries/TaskRegistry.ts
class TaskRegistry (line 27) | class TaskRegistry implements Disposable {
method constructor (line 35) | constructor(private rpcExtension: RpcExtension) {}
method dispose (line 37) | dispose(): void {
method get (line 47) | get(id: string): Task | undefined {
method createTask (line 59) | createTask(name: string, state: TaskState, labels: { [id: string]: str...
method updateTask (line 76) | updateTask(task: Task): void {
method delete (line 89) | delete(taskId: string): void {
method deleteAll (line 97) | deleteAll(taskIds: string[]): void {
method getTasks (line 106) | getTasks(): Task[] {
method getTasksByLabels (line 115) | getTasksByLabels(requestedLabels: { [key: string]: string }): Task[] {
method findTaskByLabels (line 123) | findTaskByLabels(requestedLabels: { [key: string]: string }): Task | u...
method filter (line 127) | private filter(task: Task, requestedLabels: { [key: string]: string })...
method deleteByLabels (line 142) | deleteByLabels(labels: { [key: string]: string }): void {
method notify (line 146) | private notify(): void {
FILE: packages/backend/src/studio-api-impl.ts
type PortQuickPickItem (line 62) | interface PortQuickPickItem extends podmanDesktopApi.QuickPickItem {
class StudioApiImpl (line 66) | class StudioApiImpl implements StudioAPI {
method constructor (line 67) | constructor(
method readRoute (line 84) | async readRoute(): Promise<string | undefined> {
method requestDeleteConversation (line 88) | async requestDeleteConversation(conversationId: string): Promise<void> {
method requestCreatePlayground (line 102) | async requestCreatePlayground(name: string, model: ModelInfo): Promise...
method submitPlaygroundMessage (line 111) | submitPlaygroundMessage(containerId: string, userInput: string, option...
method setPlaygroundSystemPrompt (line 115) | async setPlaygroundSystemPrompt(conversationId: string, content: strin...
method getPlaygroundConversations (line 119) | async getPlaygroundConversations(): Promise<Conversation[]> {
method getExtensionConfiguration (line 123) | async getExtensionConfiguration(): Promise<ExtensionConfiguration> {
method getPodmanDesktopVersion (line 127) | async getPodmanDesktopVersion(): Promise<string> {
method updateExtensionConfiguration (line 131) | async updateExtensionConfiguration(update: Partial<ExtensionConfigurat...
method getSnippetLanguages (line 135) | async getSnippetLanguages(): Promise<Language[]> {
method createSnippet (line 139) | createSnippet(options: RequestOptions, language: string, variant: stri...
method getInferenceServers (line 143) | async getInferenceServers(): Promise<InferenceServer[]> {
method getRegisteredProviders (line 147) | async getRegisteredProviders(): Promise<InferenceType[]> {
method requestDeleteInferenceServer (line 151) | async requestDeleteInferenceServer(...containerIds: string[]): Promise...
method requestCreateInferenceServer (line 178) | async requestCreateInferenceServer(options: CreationInferenceServerOpt...
method startInferenceServer (line 188) | startInferenceServer(containerId: string): Promise<void> {
method stopInferenceServer (line 192) | stopInferenceServer(containerId: string): Promise<void> {
method ping (line 196) | async ping(): Promise<string> {
method openURL (line 200) | async openURL(url: string): Promise<boolean> {
method openFile (line 204) | async openFile(file: string, recipeId?: string): Promise<boolean> {
method openDialog (line 218) | async openDialog(options?: podmanDesktopApi.OpenDialogOptions): Promis...
method cloneApplication (line 222) | async cloneApplication(recipeId: string): Promise<void> {
method getContainerProviderConnection (line 229) | async getContainerProviderConnection(): Promise<ContainerProviderConne...
method requestPullApplication (line 233) | async requestPullApplication(options: RecipePullOptions): Promise<stri...
method getModelsInfo (line 267) | async getModelsInfo(): Promise<ModelInfo[]> {
method getModelMetadata (line 271) | getModelMetadata(modelId: string): Promise<Record<string, unknown>> {
method getCatalog (line 275) | async getCatalog(): Promise<ApplicationCatalog> {
method filterRecipes (line 279) | async filterRecipes(filters: RecipeFilters): Promise<FilterRecipesResu...
method requestRemoveLocalModel (line 283) | async requestRemoveLocalModel(modelId: string): Promise<void> {
method navigateToContainer (line 309) | navigateToContainer(containerId: string): Promise<void> {
method navigateToPod (line 313) | async navigateToPod(podId: string): Promise<void> {
method navigateToResources (line 320) | async navigateToResources(): Promise<void> {
method navigateToEditConnectionProvider (line 327) | async navigateToEditConnectionProvider(connectionName: string): Promis...
method getApplicationsState (line 335) | async getApplicationsState(): Promise<ApplicationState[]> {
method requestStartApplication (line 339) | async requestStartApplication(recipeId: string, modelId: string): Prom...
method requestStopApplication (line 345) | async requestStopApplication(recipeId: string, modelId: string): Promi...
method requestRemoveApplication (line 351) | async requestRemoveApplication(recipeId: string, modelId: string): Pro...
method requestRestartApplication (line 379) | async requestRestartApplication(recipeId: string, modelId: string): Pr...
method requestOpenApplication (line 415) | async requestOpenApplication(recipeId: string, modelId: string): Promi...
method telemetryLogUsage (line 460) | async telemetryLogUsage(eventName: string, data?: Record<string, unkno...
method telemetryLogError (line 464) | async telemetryLogError(eventName: string, data?: Record<string, unkno...
method getLocalRepositories (line 468) | async getLocalRepositories(): Promise<LocalRepository[]> {
method getTasks (line 472) | async getTasks(): Promise<Task[]> {
method openVSCode (line 476) | async openVSCode(directory: string, recipeId?: string): Promise<void> {
method downloadModel (line 503) | async downloadModel(modelId: string): Promise<void> {
method getHostFreePort (line 512) | getHostFreePort(): Promise<number> {
method requestDeleteLocalRepository (line 516) | async requestDeleteLocalRepository(path: string): Promise<void> {
method requestCancelToken (line 537) | async requestCancelToken(tokenId: number): Promise<void> {
method importModels (line 543) | async importModels(models: LocalModelImportInfo[]): Promise<void> {
method validateLocalModel (line 547) | async validateLocalModel(model: LocalModelImportInfo): Promise<void> {
method copyToClipboard (line 561) | copyToClipboard(content: string): Promise<void> {
method checkContainerConnectionStatusAndResources (line 565) | async checkContainerConnectionStatusAndResources(
FILE: packages/backend/src/studio.ts
class Studio (line 67) | class Studio {
method constructor (line 106) | constructor(readonly extensionContext: ExtensionContext) {
method activate (line 110) | public async activate(): Promise<void> {
method deactivate (line 408) | public async deactivate(): Promise<void> {
FILE: packages/backend/src/tests/utils.ts
class TestEventEmitter (line 18) | class TestEventEmitter {
method constructor (line 23) | constructor() {
method fire (line 29) | fire(value: unknown): void {
FILE: packages/backend/src/utils/JsonWatcher.ts
class JsonWatcher (line 22) | class JsonWatcher<T> implements Disposable {
method constructor (line 28) | constructor(
method init (line 33) | init(): void {
method onDidCreate (line 51) | private onDidCreate(): void {
method onDidDelete (line 55) | private onDidDelete(): void {
method onDidChange (line 59) | private onDidChange(): void {
method requestUpdate (line 63) | private requestUpdate(): void {
method updateContent (line 69) | private async updateContent(): Promise<void> {
method dispose (line 83) | dispose(): void {
FILE: packages/backend/src/utils/Publisher.ts
class Publisher (line 20) | class Publisher<T> {
method constructor (line 21) | constructor(
method notify (line 27) | notify(): void {
FILE: packages/backend/src/utils/RecipeConstants.ts
constant CONFIG_FILENAME (line 19) | const CONFIG_FILENAME = 'ai-lab.yaml';
constant POD_LABEL_RECIPE_ID (line 22) | const POD_LABEL_RECIPE_ID = 'ai-lab-recipe-id';
constant POD_LABEL_MODEL_ID (line 23) | const POD_LABEL_MODEL_ID = 'ai-lab-model-id';
constant POD_LABEL_MODEL_PORTS (line 24) | const POD_LABEL_MODEL_PORTS = 'ai-lab-model-ports';
constant POD_LABEL_APP_PORTS (line 25) | const POD_LABEL_APP_PORTS = 'ai-lab-application-ports';
constant IMAGE_LABEL_RECIPE_ID (line 28) | const IMAGE_LABEL_RECIPE_ID = 'ai-lab-recipe-id';
constant IMAGE_LABEL_APP_PORTS (line 29) | const IMAGE_LABEL_APP_PORTS = 'ai-lab-application-ports';
constant IMAGE_LABEL_MODEL_SERVICE (line 30) | const IMAGE_LABEL_MODEL_SERVICE = 'ai-lab-model-service';
constant IMAGE_LABEL_APPLICATION_NAME (line 31) | const IMAGE_LABEL_APPLICATION_NAME = 'ai-lab-application-name';
FILE: packages/backend/src/utils/arch.ts
function goarch (line 25) | function goarch(): string {
FILE: packages/backend/src/utils/catalogUtils.ts
type CatalogFormat (line 24) | enum CatalogFormat {
function sanitize (line 29) | function sanitize(rawObject: object): ApplicationCatalog {
function hasCatalogWrongFormat (line 52) | function hasCatalogWrongFormat(raw: object): boolean {
function adaptToCurrent (line 58) | function adaptToCurrent(raw: object): object & { version: string } {
function merge (line 86) | function merge(a: ApplicationCatalog, b: ApplicationCatalog): Applicatio...
function isNonNullObject (line 102) | function isNonNullObject(obj: unknown): obj is object {
function isStringRecord (line 106) | function isStringRecord(obj: unknown): obj is Record<string, string> {
function isStringArray (line 113) | function isStringArray(obj: unknown): obj is Array<string> {
function sanitizeRecipe (line 117) | function sanitizeRecipe(recipe: unknown): Recipe {
function isLocalModelInfo (line 153) | function isLocalModelInfo(obj: unknown): obj is LocalModelInfo {
function sanitizeModel (line 163) | function sanitizeModel(model: unknown): ModelInfo {
function sanitizeCategory (line 197) | function sanitizeCategory(category: unknown): Category {
FILE: packages/backend/src/utils/downloader.ts
method constructor (line 27) | protected constructor(
method getTarget (line 32) | getTarget(): string {
FILE: packages/backend/src/utils/imagesUtils.ts
function getImageTag (line 22) | function getImageTag(recipe: Recipe, container: ContainerConfig): string {
FILE: packages/backend/src/utils/inferenceUtils.ts
constant LABEL_INFERENCE_SERVER (line 30) | const LABEL_INFERENCE_SERVER: string = 'ai-lab-inference-server';
function getImageInfo (line 38) | async function getImageInfo(
function withDefaultConfiguration (line 71) | async function withDefaultConfiguration(
function isTransitioning (line 87) | function isTransitioning(server: InferenceServer): boolean {
function parseInferenceType (line 104) | function parseInferenceType(value: string | undefined): InferenceType {
function getInferenceType (line 115) | function getInferenceType(modelsInfo: ModelInfo[]): InferenceType {
FILE: packages/backend/src/utils/mcpUtils.ts
function toMcpClients (line 22) | async function toMcpClients(...mcpServers: McpServer[]): Promise<McpClie...
FILE: packages/backend/src/utils/modelsUtils.ts
constant MACHINE_BASE_FOLDER (line 24) | const MACHINE_BASE_FOLDER = '/home/user/ai-lab/models';
function getLocalModelFile (line 30) | function getLocalModelFile(modelInfo: ModelInfo): string {
function getMountPath (line 39) | function getMountPath(modelInfo: ModelInfo): string {
function getRemoteModelFile (line 53) | function getRemoteModelFile(modelInfo: ModelInfo): string {
type ModelMountInfo (line 59) | interface ModelMountInfo {
function getHuggingFaceModelMountInfo (line 64) | function getHuggingFaceModelMountInfo(modelInfo: ModelInfo): ModelMountI...
function isModelUploaded (line 88) | async function isModelUploaded(machine: string, modelInfo: ModelInfo): P...
function deleteRemoteModel (line 104) | async function deleteRemoteModel(machine: string, modelInfo: ModelInfo):...
function getModelPropertiesForEnvironment (line 113) | function getModelPropertiesForEnvironment(modelInfo: ModelInfo): string[] {
FILE: packages/backend/src/utils/pathUtils.ts
function getParentDirectory (line 21) | function getParentDirectory(filePath: string): string {
function escapeSpaces (line 29) | function escapeSpaces(path: string): string {
FILE: packages/backend/src/utils/podman.ts
constant MIN_CPUS_VALUE (line 21) | const MIN_CPUS_VALUE = 4;
type MachineJSON (line 23) | type MachineJSON = {
function getPodmanCli (line 35) | function getPodmanCli(): string {
function getCustomBinaryPath (line 50) | function getCustomBinaryPath(): string | undefined {
function getPodmanMachineName (line 58) | function getPodmanMachineName(connection: ContainerProviderConnection): ...
function getPodmanConnection (line 74) | function getPodmanConnection(connectionName: string): ProviderContainerC...
FILE: packages/backend/src/utils/podsUtils.ts
function getPodHealth (line 20) | function getPodHealth(infos: (string | undefined)[]): PodHealth {
FILE: packages/backend/src/utils/ports.ts
function getFreeRandomPort (line 21) | async function getFreeRandomPort(address: string): Promise<number> {
function getPortsInfo (line 44) | async function getPortsInfo(portDescriptor: string): Promise<string | un...
function getPort (line 52) | async function getPort(portDescriptor: string): Promise<number | undefin...
function getPortsFromLabel (line 71) | function getPortsFromLabel(labels: { [key: string]: string }, key: strin...
FILE: packages/backend/src/utils/randomUtils.ts
function getRandomName (line 24) | function getRandomName(prefix: string): string {
FILE: packages/backend/src/utils/sha.ts
function hasValidSha (line 22) | async function hasValidSha(filePath: string, expectedSha: string): Promi...
function getHash (line 31) | function getHash(content: string): string {
FILE: packages/backend/src/utils/uploader.ts
class Uploader (line 28) | class Uploader {
method constructor (line 33) | constructor(
method perform (line 47) | async perform(id: string): Promise<string> {
FILE: packages/backend/src/utils/urldownloader.spec.ts
class DownloaderTest (line 197) | class DownloaderTest extends URLDownloader {
method getRedirect (line 198) | public override getRedirect(url: string, location: string): string {
constant SITE_EXAMPLE (line 203) | const SITE_EXAMPLE = 'https://example.com/hello';
constant SITE_DUMMY (line 204) | const SITE_DUMMY = 'https://dummy.com/world';
FILE: packages/backend/src/utils/urldownloader.ts
class URLDownloader (line 28) | class URLDownloader extends Downloader {
method constructor (line 31) | constructor(
method perform (line 40) | async perform(id: string): Promise<void> {
method download (line 77) | private download(url: string): Promise<void> {
method getRedirect (line 97) | protected getRedirect(url: string, location: string): string {
method followRedirects (line 106) | private followRedirects(url: string, callback: (message: { ok?: boolea...
FILE: packages/backend/src/utils/utils.ts
function timeout (line 20) | async function timeout(time: number): Promise<void> {
function isEndpointAlive (line 26) | async function isEndpointAlive(endPoint: string): Promise<boolean> {
function getDurationSecondsSince (line 49) | function getDurationSecondsSince(startTimeMs: number): number {
constant DISABLE_SELINUX_LABEL_SECURITY_OPTION (line 53) | const DISABLE_SELINUX_LABEL_SECURITY_OPTION = 'label=disable';
FILE: packages/backend/src/webviewUtils.ts
function getWebviewOptions (line 22) | function getWebviewOptions(extensionUri: Uri): WebviewOptions {
function initWebview (line 32) | async function initWebview(extensionUri: Uri): Promise<WebviewPanel> {
FILE: packages/backend/src/workers/IWorker.ts
type IWorker (line 19) | interface IWorker<T, R> {
FILE: packages/backend/src/workers/WindowsWorker.ts
method enabled (line 22) | enabled(): boolean {
FILE: packages/backend/src/workers/provider/InferenceProvider.spec.ts
class TestInferenceProvider (line 56) | class TestInferenceProvider extends InferenceProvider {
method constructor (line 57) | constructor() {
method enabled (line 61) | enabled(): boolean {
method publicPullImage (line 65) | publicPullImage(
method publicCreateContainer (line 73) | async publicCreateContainer(
method perform (line 85) | async perform(_config: InferenceServerConfig): Promise<InferenceServer> {
method dispose (line 88) | dispose(): void {}
FILE: packages/backend/src/workers/provider/InferenceProvider.ts
type BetterContainerCreateResult (line 33) | type BetterContainerCreateResult = ContainerCreateResult & { engineId: s...
method constructor (line 39) | protected constructor(
method prePerform (line 49) | prePerform(_config: InferenceServerConfig): Promise<void> {
method createContainer (line 55) | protected async createContainer(
method pullImage (line 89) | protected pullImage(
FILE: packages/backend/src/workers/provider/LlamaCppPython.spec.ts
class CDILlamaCppPython (line 562) | class CDILlamaCppPython extends LlamaCppPython {
method isNvidiaCDIConfigured (line 563) | override isNvidiaCDIConfigured(): boolean {
class NoCDILlamaCppPython (line 702) | class NoCDILlamaCppPython extends LlamaCppPython {
method isNvidiaCDIConfigured (line 703) | override isNvidiaCDIConfigured(): boolean {
FILE: packages/backend/src/workers/provider/LlamaCppPython.ts
constant SECOND (line 40) | const SECOND: number = 1_000_000_000;
type Device (line 42) | interface Device {
class LlamaCppPython (line 48) | class LlamaCppPython extends InferenceProvider {
method constructor (line 49) | constructor(
method dispose (line 58) | dispose(): void {}
method getContainerCreateOptions (line 62) | protected async getContainerCreateOptions(
method perform (line 252) | async perform(config: InferenceServerConfig): Promise<InferenceServer> {
method getLlamaCppInferenceImage (line 311) | protected getLlamaCppInferenceImage(vmType: VMType, gpu?: IGPUInfo): s...
method isNvidiaCDIConfigured (line 330) | protected isNvidiaCDIConfigured(gpu?: IGPUInfo): boolean {
FILE: packages/backend/src/workers/provider/OpenVINO.ts
constant SECOND (line 35) | const SECOND: number = 1_000_000_000;
constant CONFIG_FILE_NAME (line 37) | const CONFIG_FILE_NAME = `config-all.json`;
constant GRAPH_CONTENT (line 39) | const GRAPH_CONTENT = `input_stream: "HTTP_REQUEST_PAYLOAD:input"
class OpenVINO (line 76) | class OpenVINO extends InferenceProvider {
method constructor (line 77) | constructor(
method dispose (line 86) | dispose(): void {}
method getContainerCreateOptions (line 90) | protected async getContainerCreateOptions(
method prePerform (line 158) | override async prePerform(config: InferenceServerConfig): Promise<void> {
method perform (line 170) | async perform(config: InferenceServerConfig): Promise<InferenceServer> {
method validateAndGetModelInfo (line 220) | private validateAndGetModelInfo(config: InferenceServerConfig): ModelI...
method ensureGraphFile (line 232) | private async ensureGraphFile(modelFolder: string): Promise<string> {
method ensureConfigFile (line 243) | private async ensureConfigFile(modelInfo: ModelInfo): Promise<string> {
method getOpenVINOInferenceImage (line 260) | protected getOpenVINOInferenceImage(_vmType: VMType): string {
FILE: packages/backend/src/workers/provider/WhisperCpp.ts
class WhisperCpp (line 30) | class WhisperCpp extends InferenceProvider {
method constructor (line 31) | constructor(
method enabled (line 38) | override enabled(): boolean {
method perform (line 42) | override async perform(config: InferenceServerConfig): Promise<Inferen...
method dispose (line 126) | override dispose(): void {}
FILE: packages/backend/src/workers/uploader/UploaderOptions.ts
type UploaderOptions (line 22) | interface UploaderOptions {
FILE: packages/backend/src/workers/uploader/WSLUploader.ts
class WSLUploader (line 28) | class WSLUploader extends WindowsWorker<UploaderOptions, string> {
method perform (line 29) | async perform(options: UploaderOptions): Promise<string> {
FILE: packages/backend/vite.config.js
constant PACKAGE_ROOT (line 25) | const PACKAGE_ROOT = __dirname;
method buildStart (line 62) | async buildStart() {
FILE: packages/backend/vitest.config.js
constant PACKAGE_ROOT (line 22) | const PACKAGE_ROOT = __dirname;
FILE: packages/frontend/src/lib/RecipeCard.spec.ts
class ResizeObserver (line 57) | class ResizeObserver {
FILE: packages/frontend/src/lib/RecipeCardTags.spec.ts
class ResizeObserver (line 37) | class ResizeObserver {
FILE: packages/frontend/src/lib/RecipeCardTags.ts
constant USE_CASES (line 22) | const USE_CASES = ['natural-language-processing', 'audio', 'computer-vis...
constant LANGUAGES (line 23) | const LANGUAGES = ['java', 'javascript', 'python'];
constant FRAMEWORKS (line 24) | const FRAMEWORKS = ['langchain', 'langchain4j', 'quarkus', 'react', 'str...
constant TOOLS (line 25) | const TOOLS = ['none', 'llama-cpp', 'whisper-cpp', 'llama-stack'];
function setupProps (line 32) | async function setupProps(): Promise<void> {
function getColor (line 51) | function getColor(pdColor: string, darkColor: string, lightColor: string...
function createBGColorMap (line 60) | function createBGColorMap(): Map<string, string> {
function createTextColorMap (line 81) | function createTextColorMap(): Map<string, string> {
function getBGColor (line 114) | function getBGColor(tag: string): string {
function getTextColor (line 120) | function getTextColor(tag: string): string {
FILE: packages/frontend/src/lib/RecipesCard.spec.ts
class ResizeObserver (line 37) | class ResizeObserver {
FILE: packages/frontend/src/lib/monaco-editor/monaco.ts
method getWorker (line 23) | getWorker(_: unknown): Worker {
FILE: packages/frontend/src/models/IRouterState.ts
type RouterState (line 19) | interface RouterState {
FILE: packages/frontend/src/pages/CreateService.spec.ts
constant DUMMY_DOWNLOADED_MODEL (line 62) | const DUMMY_DOWNLOADED_MODEL: ModelInfo = {
FILE: packages/frontend/src/pages/NewInstructLabSession.spec.ts
function renderForm (line 129) | async function renderForm(): Promise<RenderResult<any>> {
FILE: packages/frontend/src/pages/Recipes.spec.ts
class ResizeObserver (line 89) | class ResizeObserver {
FILE: packages/frontend/src/pages/StartRecipe.spec.ts
function getSelectedOption (line 197) | function getSelectedOption<T>(container: HTMLElement): T | undefined {
function selectOption (line 210) | async function selectOption(container: HTMLElement, label: string): Prom...
FILE: packages/frontend/src/pages/applications.ts
function getApplicationStatus (line 22) | function getApplicationStatus(appState: ApplicationState): string {
function getApplicationStatusText (line 42) | function getApplicationStatusText(appState: ApplicationState): string {
FILE: packages/frontend/src/pages/instructlab/AboutInstructLab.spec.ts
class ResizeObserver (line 49) | class ResizeObserver {
FILE: packages/frontend/src/stores/conversations.ts
type ConversationWithBackend (line 28) | interface ConversationWithBackend extends Conversation {
function setWithBackend (line 50) | function setWithBackend(set: (value: ConversationWithBackend[]) => void,...
function getModelBackend (line 65) | function getModelBackend(modelsInfo: ModelInfo[], modelId: string): Infe...
FILE: packages/frontend/src/stores/modelsInfo.spec.ts
type MyModel (line 83) | type MyModel = {
FILE: packages/frontend/src/stores/rpcReadable.spec.ts
type Update (line 73) | type Update = {
type Update (line 97) | type Update = {
FILE: packages/frontend/src/stores/rpcReadable.ts
function RPCReadable (line 23) | function RPCReadable<T>(
FILE: packages/frontend/src/utils/client.ts
function getRouterState (line 46) | async function getRouterState(): Promise<RouterState> {
FILE: packages/frontend/src/utils/dimensions.ts
function humanizeAge (line 22) | function humanizeAge(created: number): string {
FILE: packages/frontend/src/utils/fileUtils.ts
function getFilesFromDropEvent (line 24) | function getFilesFromDropEvent(event: DragEvent): LocalModelImportInfo[] {
FILE: packages/frontend/src/utils/printers.ts
function displayPorts (line 19) | function displayPorts(ports: number[]): string {
FILE: packages/frontend/src/utils/versionControlUtils.ts
constant GITHUB_PREFIX (line 19) | const GITHUB_PREFIX = 'https://github.com/';
FILE: packages/frontend/vite.config.js
constant PACKAGE_ROOT (line 29) | const PACKAGE_ROOT = path.dirname(filename);
FILE: packages/shared/src/InstructlabAPI.ts
constant INSTRUCTLAB_API_CHANNEL (line 23) | const INSTRUCTLAB_API_CHANNEL = createRpcChannel<InstructlabAPI>('Instru...
type InstructlabAPI (line 24) | interface InstructlabAPI {
FILE: packages/shared/src/LlamaStackAPI.ts
constant LLAMA_STACK_API_CHANNEL (line 23) | const LLAMA_STACK_API_CHANNEL = createRpcChannel<LlamaStackAPI>('LlamaSt...
type LlamaStackAPI (line 24) | interface LlamaStackAPI {
FILE: packages/shared/src/Messages.ts
constant MSG_TASKS_UPDATE (line 35) | const MSG_TASKS_UPDATE = createRpcChannel<Task[]>('tasks-update');
constant MSG_SUPPORTED_LANGUAGES_UPDATE (line 36) | const MSG_SUPPORTED_LANGUAGES_UPDATE = createRpcChannel<Language[]>('sup...
constant MSG_NEW_MODELS_STATE (line 37) | const MSG_NEW_MODELS_STATE = createRpcChannel<ModelInfo[]>('new-models-s...
constant MSG_PODMAN_CONNECTION_UPDATE (line 38) | const MSG_PODMAN_CONNECTION_UPDATE =
constant MSG_INFERENCE_SERVERS_UPDATE (line 40) | const MSG_INFERENCE_SERVERS_UPDATE = createRpcChannel<InferenceServer[]>...
constant MSG_INSTRUCTLAB_SESSIONS_UPDATE (line 41) | const MSG_INSTRUCTLAB_SESSIONS_UPDATE = createRpcChannel<InstructlabSess...
constant MSG_LOCAL_REPOSITORY_UPDATE (line 42) | const MSG_LOCAL_REPOSITORY_UPDATE = createRpcChannel<LocalRepository[]>(...
constant MSG_CONVERSATIONS_UPDATE (line 43) | const MSG_CONVERSATIONS_UPDATE = createRpcChannel<Conversation[]>('conve...
constant MSG_CONFIGURATION_UPDATE (line 44) | const MSG_CONFIGURATION_UPDATE = createRpcChannel<ExtensionConfiguration...
constant MSG_MCP_SERVERS_UPDATE (line 45) | const MSG_MCP_SERVERS_UPDATE = createRpcChannel<McpSettings>('mcp-server...
constant MSG_NEW_CATALOG_STATE (line 46) | const MSG_NEW_CATALOG_STATE = createRpcChannel<ApplicationCatalog>('new-...
constant MSG_APPLICATIONS_STATE_UPDATE (line 47) | const MSG_APPLICATIONS_STATE_UPDATE = createRpcChannel<ApplicationState[...
constant MSG_GPUS_UPDATE (line 48) | const MSG_GPUS_UPDATE = createRpcChannel<IGPUInfo[]>('gpus-update');
constant MSG_MONITORING_UPDATE (line 49) | const MSG_MONITORING_UPDATE = createRpcChannel<StatsHistory[]>('monitori...
constant MSG_NAVIGATION_ROUTE_UPDATE (line 50) | const MSG_NAVIGATION_ROUTE_UPDATE = createRpcChannel<string>('navigation...
constant MSG_MODEL_HANDLERS_UPDATE (line 53) | const MSG_MODEL_HANDLERS_UPDATE = createRpcChannel<string[]>('model-hand...
constant MSG_INFERENCE_PROVIDER_UPDATE (line 55) | const MSG_INFERENCE_PROVIDER_UPDATE = createRpcChannel<string[]>('infere...
FILE: packages/shared/src/StudioAPI.ts
constant STUDIO_API_CHANNEL (line 43) | const STUDIO_API_CHANNEL = createRpcChannel<StudioAPI>('StudioAPI');
type StudioAPI (line 44) | interface StudioAPI {
FILE: packages/shared/src/messages/MessageProxy.spec.ts
type Ping (line 74) | type Ping = {
type Double (line 97) | type Double = {
type Sum (line 120) | type Sum = {
class Dummy (line 139) | class Dummy {
method ping (line 140) | async ping(): Promise<string> {
method ping (line 163) | async ping(): Promise<'pong'> {
method ping (line 316) | async ping(): Promise<'pong'> {
type Foo (line 158) | interface Foo {
class Dummy (line 162) | class Dummy implements Foo {
method ping (line 140) | async ping(): Promise<string> {
method ping (line 163) | async ping(): Promise<'pong'> {
method ping (line 316) | async ping(): Promise<'pong'> {
type TestError (line 184) | type TestError = {
function getMessageListener (line 206) | function getMessageListener(): (event: MessageEvent) => void {
type EventTest (line 216) | interface EventTest {
type EventTest (line 240) | interface EventTest {
type EventTest (line 264) | interface EventTest {
class Dummy (line 315) | class Dummy {
method ping (line 140) | async ping(): Promise<string> {
method ping (line 163) | async ping(): Promise<'pong'> {
method ping (line 316) | async ping(): Promise<'pong'> {
class DummyTimeout (line 340) | class DummyTimeout {
method ping (line 341) | async ping(): Promise<'pong'> {
type Double (line 428) | type Double = {
FILE: packages/shared/src/messages/MessageProxy.ts
type IMessage (line 22) | interface IMessage {
type IMessageRequest (line 28) | interface IMessageRequest extends IMessage {
type IMessageResponse (line 32) | interface IMessageResponse extends IMessageRequest {
type ISubscribedMessage (line 38) | interface ISubscribedMessage {
type UnaryRPC (line 44) | type UnaryRPC = (...args: any[]) => Promise<unknown>;
function isMessageRequest (line 46) | function isMessageRequest(content: unknown): content is IMessageRequest {
function isMessageResponse (line 50) | function isMessageResponse(content: unknown): content is IMessageResponse {
type ObjectInstance (line 56) | type ObjectInstance<T> = {
class RpcExtension (line 60) | class RpcExtension implements Disposable {
method constructor (line 65) | constructor(private webview: Webview) {}
method dispose (line 67) | dispose(): void {
method init (line 71) | init(): void {
method fire (line 115) | fire<T>(channel: RpcChannel<T>, body: T): Promise<boolean> {
method registerInstance (line 122) | registerInstance<T extends Record<keyof T, UnaryRPC>, R extends T>(cha...
type Subscriber (line 128) | interface Subscriber {
type Listener (line 132) | type Listener<T> = (value: T) => void;
class RpcBrowser (line 134) | class RpcBrowser {
method getUniqueId (line 139) | getUniqueId(): number {
method constructor (line 143) | constructor(
method init (line 150) | init(): void {
method getProxy (line 177) | getProxy<T extends Record<keyof T, UnaryRPC>>(
method invoke (line 201) | protected async invoke(
method subscribe (line 236) | subscribe<T>(rpcChannel: RpcChannel<T>, f: Listener<T>): Subscriber {
method isSubscribedMessage (line 249) | isSubscribedMessage(content: any): content is ISubscribedMessage {
class RpcChannel (line 257) | class RpcChannel<T> {
method constructor (line 261) | constructor(private readonly channel: string) {}
method name (line 263) | public get name(): string {
function createRpcChannel (line 273) | function createRpcChannel<T>(channel: string): RpcChannel<T> {
function clearRpcChannelList (line 281) | function clearRpcChannelList(): void {
FILE: packages/shared/src/models/FilterRecipesResult.ts
type FilterRecipesResult (line 21) | interface FilterRecipesResult {
type RecipeFilters (line 27) | type RecipeFilters = {
type RecipeChoices (line 31) | type RecipeChoices = {
type Choice (line 35) | type Choice = {
type CatalogFilterKey (line 40) | type CatalogFilterKey = 'languages' | 'tools' | 'frameworks';
FILE: packages/shared/src/models/IApplicationCatalog.ts
type ApplicationCatalog (line 23) | interface ApplicationCatalog {
FILE: packages/shared/src/models/IApplicationState.ts
type PodHealth (line 22) | type PodHealth = 'none' | 'starting' | 'healthy' | 'unhealthy';
type ApplicationState (line 24) | interface ApplicationState {
FILE: packages/shared/src/models/ICategory.ts
type Category (line 19) | interface Category {
FILE: packages/shared/src/models/IContainerConnectionInfo.ts
type ContainerProviderConnectionInfo (line 21) | interface ContainerProviderConnectionInfo {
type CheckContainerConnectionResourcesOptions (line 29) | interface CheckContainerConnectionResourcesOptions {
type ContainerConnectionInfo (line 35) | type ContainerConnectionInfo =
type ContainerConnectionInfoStatus (line 41) | type ContainerConnectionInfoStatus = 'running' | 'no-machine' | 'low-res...
type RunningContainerConnection (line 43) | interface RunningContainerConnection {
type LowResourcesContainerConnection (line 49) | interface LowResourcesContainerConnection {
type NoContainerConnection (line 60) | interface NoContainerConnection {
type NativeContainerConnection (line 65) | interface NativeContainerConnection {
FILE: packages/shared/src/models/IExtensionConfiguration.ts
type ExtensionConfiguration (line 19) | interface ExtensionConfiguration {
FILE: packages/shared/src/models/IGPUInfo.ts
type IGPUInfo (line 19) | interface IGPUInfo {
type GPUVendor (line 25) | enum GPUVendor {
FILE: packages/shared/src/models/IInference.ts
type InferenceType (line 20) | enum InferenceType {
function toInferenceType (line 36) | function toInferenceType(type: string | undefined): InferenceType {
function inferenceTypeLabel (line 46) | function inferenceTypeLabel(type: InferenceType): string {
type InferenceServerStatus (line 53) | type InferenceServerStatus = 'stopped' | 'running' | 'deleting' | 'stopp...
type InferenceServer (line 55) | interface InferenceServer {
FILE: packages/shared/src/models/ILocalModelInfo.ts
type LocalModelInfo (line 20) | interface LocalModelInfo {
type LocalModelImportInfo (line 27) | interface LocalModelImportInfo {
FILE: packages/shared/src/models/ILocalRepository.ts
type LocalRepository (line 1) | interface LocalRepository {
FILE: packages/shared/src/models/IModelInfo.ts
type ModelInfo (line 21) | interface ModelInfo {
type ModelCheckerContext (line 42) | type ModelCheckerContext = 'inference' | 'recipe';
FILE: packages/shared/src/models/IModelOptions.ts
type StreamOptions (line 1) | interface StreamOptions {
type ModelOptions (line 5) | interface ModelOptions {
FILE: packages/shared/src/models/IModelResponse.ts
type ModelResponse (line 19) | interface ModelResponse {
type ModelResponseChoice (line 28) | interface ModelResponseChoice {
type ModelResponseUsage (line 34) | interface ModelResponseUsage {
FILE: packages/shared/src/models/IPlaygroundMessage.ts
type Message (line 21) | interface Message {
type ErrorMessage (line 26) | interface ErrorMessage extends Message {
type ModelUsage (line 30) | interface ModelUsage {
type ChatMessage (line 35) | interface ChatMessage extends Message {
type AssistantChat (line 40) | interface AssistantChat extends ChatMessage {
type SystemPrompt (line 46) | interface SystemPrompt extends ChatMessage {
type PendingChat (line 51) | interface PendingChat extends AssistantChat {
type UserChat (line 56) | interface UserChat extends ChatMessage {
type Conversation (line 61) | interface Conversation {
type Choice (line 69) | interface Choice {
type ToolCall (line 73) | interface ToolCall {
function isErrorMessage (line 81) | function isErrorMessage(msg: Message): msg is ErrorMessage {
function isChatMessage (line 85) | function isChatMessage(msg: Message): msg is ChatMessage {
function isAssistantChat (line 89) | function isAssistantChat(msg: Message): msg is AssistantChat {
function isAssistantToolCall (line 93) | function isAssistantToolCall(msg: Message): msg is AssistantChat {
function isUserChat (line 97) | function isUserChat(msg: Message): msg is UserChat {
function isPendingChat (line 101) | function isPendingChat(msg: Message): msg is PendingChat {
function isSystemPrompt (line 105) | function isSystemPrompt(msg: Message): msg is SystemPrompt {
FILE: packages/shared/src/models/IPlaygroundV2.ts
type PlaygroundV2 (line 1) | interface PlaygroundV2 {
FILE: packages/shared/src/models/IPodman.ts
type VMType (line 19) | enum VMType {
FILE: packages/shared/src/models/IRecipe.ts
type RecipePullOptions (line 22) | type RecipePullOptions = RecipePullOptionsDefault | RecipePullOptionsWit...
type RecipePullOptionsDefault (line 24) | interface RecipePullOptionsDefault {
type RecipePullOptionsWithModelInference (line 30) | type RecipePullOptionsWithModelInference = RecipePullOptionsDefault & {
type RecipeDependencies (line 34) | interface RecipeDependencies {
function isRecipePullOptionsWithModelInference (line 38) | function isRecipePullOptionsWithModelInference(
type RecipeComponents (line 44) | interface RecipeComponents {
type RecipeImage (line 49) | interface RecipeImage {
type Recipe (line 60) | interface Recipe {
FILE: packages/shared/src/models/IRecipeModelIndex.ts
type RecipeModelIndex (line 19) | interface RecipeModelIndex {
FILE: packages/shared/src/models/ITask.ts
type TaskState (line 19) | type TaskState = 'loading' | 'error' | 'success';
type Task (line 21) | interface Task {
FILE: packages/shared/src/models/InferenceServerConfig.ts
type CreationInferenceServerOptions (line 21) | type CreationInferenceServerOptions = Partial<InferenceServerConfig> & {...
type InferenceServerConfig (line 23) | interface InferenceServerConfig {
FILE: packages/shared/src/models/McpSettings.ts
type McpSettings (line 20) | interface McpSettings {
type McpServerType (line 24) | enum McpServerType {
type McpServer (line 29) | interface McpServer {
type McpClient (line 39) | interface McpClient {
FILE: packages/shared/src/models/RequestOptions.ts
type FormParamDefinition (line 1) | interface FormParamDefinition {
type RequestOptions (line 7) | interface RequestOptions {
FILE: packages/shared/src/models/instructlab/IInstructlabContainerConfiguration.ts
type InstructlabContainerConfiguration (line 20) | interface InstructlabContainerConfiguration {
FILE: packages/shared/src/models/instructlab/IInstructlabContainerInfo.ts
constant INSTRUCTLAB_CONTAINER_TRACKINGID (line 18) | const INSTRUCTLAB_CONTAINER_TRACKINGID = 'instructlab.trackingid';
type InstructlabContainerInfo (line 20) | interface InstructlabContainerInfo {
FILE: packages/shared/src/models/instructlab/IInstructlabSession.ts
type InstructlabSessionStatus (line 19) | type InstructlabSessionStatus = 'fine-tuned' | 'generating-instructions';
type InstructlabSession (line 21) | interface InstructlabSession {
FILE: packages/shared/src/models/llama-stack/LlamaStackContainerConfiguration.ts
type LlamaStackContainerConfiguration (line 20) | interface LlamaStackContainerConfiguration {
FILE: packages/shared/src/models/llama-stack/LlamaStackContainerInfo.ts
constant LLAMA_STACK_CONTAINER_TRACKINGID (line 18) | const LLAMA_STACK_CONTAINER_TRACKINGID = 'llama-stack.trackingid';
type LlamaStackContainerInfo (line 20) | interface LlamaStackContainerInfo {
type LlamaStackContainers (line 25) | interface LlamaStackContainers {
FILE: packages/shared/src/uri/Uri.ts
class Uri (line 20) | class Uri {
method constructor (line 21) | private constructor(
method fsPath (line 29) | get fsPath(): string {
method scheme (line 32) | get scheme(): string {
method authority (line 36) | get authority(): string {
method path (line 40) | get path(): string {
method query (line 44) | get query(): string {
method fragment (line 48) | get fragment(): string {
method with (line 52) | with(_change?: { scheme?: string; authority?: string; path?: string; q...
method toString (line 56) | toString(): string {
method revive (line 60) | static revive(serialized: APIUri): Uri {
FILE: packages/shared/vite.config.js
constant PACKAGE_ROOT (line 22) | const PACKAGE_ROOT = __dirname;
FILE: packages/shared/vitest.config.js
constant PACKAGE_ROOT (line 22) | const PACKAGE_ROOT = __dirname;
FILE: tests/playwright/src/ai-lab-extension.spec.ts
constant AI_LAB_EXTENSION_OCI_IMAGE (line 63) | const AI_LAB_EXTENSION_OCI_IMAGE =
constant AI_LAB_EXTENSION_PREINSTALLED (line 65) | const AI_LAB_EXTENSION_PREINSTALLED: boolean = process.env.EXTENSION_PRE...
constant EXT_TEST_RAG_CHATBOT (line 66) | const EXT_TEST_RAG_CHATBOT: boolean = process.env.EXT_TEST_RAG_CHATBOT =...
constant AI_LAB_CATALOG_STATUS_ACTIVE (line 67) | const AI_LAB_CATALOG_STATUS_ACTIVE: string = 'ACTIVE';
constant AI_LAB_TESTS_WITH_GPU_ENABLED (line 68) | const AI_LAB_TESTS_WITH_GPU_ENABLED: boolean = process.env.EXT_TEST_GPU_...
constant TEST_AUDIO_FILE_PATH (line 78) | const TEST_AUDIO_FILE_PATH: string = path.resolve(
constant AI_JSON_FILE_PATH (line 86) | const AI_JSON_FILE_PATH: string = path.resolve(
constant AI_JSON (line 99) | const AI_JSON: ApplicationCatalog = JSON.parse(aiJSONFile) as Applicatio...
constant AI_APP_MODELS (line 100) | const AI_APP_MODELS: Set<string> = new Set();
constant AI_APP_MODEL_AND_NAMES (line 116) | const AI_APP_MODEL_AND_NAMES: Map<string, string[]> = new Map();
constant PLAYGROUND_TEST_MODELS (line 132) | const PLAYGROUND_TEST_MODELS: string[] = ['ibm-granite/granite-4.0-micro...
constant AI_APP_HTTP_TEST_APP_NAMES (line 134) | const AI_APP_HTTP_TEST_APP_NAMES: string[] = ['Object Detection'];
constant AI_APP_SERVICE_RESPONSE_TEST_APP_NAMES (line 135) | const AI_APP_SERVICE_RESPONSE_TEST_APP_NAMES: string[] = ['Audio to Text...
constant PLAYGROUND_NAME (line 137) | const PLAYGROUND_NAME = 'test playground';
constant SYSTEM_PROMPT (line 138) | const SYSTEM_PROMPT = 'Always respond with: "Hello, I am Chat Bot"';
function cleanupServices (line 721) | async function cleanupServices(): Promise<void> {
function getModelServicePort (line 733) | async function getModelServicePort(appModelName: string): Promise<string> {
function deleteAllModels (line 746) | async function deleteAllModels(): Promise<void> {
function restartApp (line 752) | async function restartApp(appName: string): Promise<void> {
function stopAndDeleteApp (line 788) | async function stopAndDeleteApp(appName: string): Promise<void> {
function deleteUnusedImages (line 807) | async function deleteUnusedImages(navigationBar: NavigationBar): Promise...
function waitForCatalogModel (line 819) | async function waitForCatalogModel(modelName: string): Promise<boolean> {
FILE: tests/playwright/src/model/ai-lab-app-details-page.ts
class AILabAppDetailsPage (line 24) | class AILabAppDetailsPage extends AILabBasePage {
method constructor (line 28) | constructor(page: Page, webview: Page, appName: string) {
method waitForLoad (line 34) | async waitForLoad(): Promise<void> {
method deleteLocalClone (line 38) | async deleteLocalClone(): Promise<void> {
method startNewDeployment (line 42) | async startNewDeployment(timeout: number = 1_400_000): Promise<void> {
method openRunningApps (line 53) | async openRunningApps(): Promise<void> {
method deleteRunningApp (line 57) | async deleteRunningApp(_containerName: string): Promise<void> {
FILE: tests/playwright/src/model/ai-lab-base-page.ts
method constructor (line 30) | constructor(page: Page, webview: Page, heading: string | undefined) {
method enableGpuSupport (line 41) | async enableGpuSupport(): Promise<void> {
FILE: tests/playwright/src/model/ai-lab-creating-model-service-page.ts
class AILabCreatingModelServicePage (line 24) | class AILabCreatingModelServicePage extends AILabBasePage {
method constructor (line 31) | constructor(page: Page, webview: Page) {
method waitForLoad (line 40) | async waitForLoad(): Promise<void> {
method getCurrentStatus (line 44) | async getCurrentStatus(): Promise<string> {
method getLastStatusIconClass (line 55) | async getLastStatusIconClass(): Promise<string> {
method createService (line 64) | async createService(modelName: string = '', port: number = 0): Promise...
method getStatusListLocator (line 89) | private async getStatusListLocator(): Promise<Locator[]> {
FILE: tests/playwright/src/model/ai-lab-dashboard-page.ts
class AILabDashboardPage (line 24) | class AILabDashboardPage extends AILabBasePage {
method constructor (line 27) | constructor(page: Page, webview: Page) {
method waitForLoad (line 32) | async waitForLoad(): Promise<void> {
FILE: tests/playwright/src/model/ai-lab-local-server-page.ts
class AILabLocalServerPage (line 23) | class AILabLocalServerPage extends AILabBasePage {
method constructor (line 26) | constructor(page: Page, webview: Page) {
method waitForLoad (line 31) | async waitForLoad(): Promise<void> {
method getLocalServerPort (line 35) | async getLocalServerPort(): Promise<string> {
FILE: tests/playwright/src/model/ai-lab-model-catalog-page.ts
class AILabCatalogPage (line 25) | class AILabCatalogPage extends AILabBasePage {
method constructor (line 29) | constructor(page: Page, webview: Page) {
method waitForLoad (line 35) | async waitForLoad(): Promise<void> {
method getModelRowByName (line 41) | async getModelRowByName(modelName: string): Promise<Locator | undefine...
method getModelNameByRow (line 53) | async getModelNameByRow(row: Locator): Promise<string> {
method downloadModel (line 59) | async downloadModel(modelName: string): Promise<void> {
method createModelService (line 70) | async createModelService(modelName: string): Promise<AILabCreatingMode...
method deleteModel (line 83) | async deleteModel(modelName: string): Promise<void> {
method deleteAllModels (line 101) | async deleteAllModels(): Promise<void> {
method isModelDownloaded (line 138) | async isModelDownloaded(modelName: string): Promise<boolean> {
method getAllModelRows (line 148) | private async getAllModelRows(): Promise<Locator[]> {
FILE: tests/playwright/src/model/ai-lab-model-llamastack-page.ts
class AiLlamaStackPage (line 21) | class AiLlamaStackPage extends AILabBasePage {
method constructor (line 26) | constructor(page: Page, webview: Page) {
method waitForLoad (line 37) | async waitForLoad(): Promise<void> {
method runLlamaStackContainer (line 41) | async runLlamaStackContainer(): Promise<void> {
method waitForOpenLlamaStackContainerButton (line 45) | async waitForOpenLlamaStackContainerButton(): Promise<void> {
method waitForExploreLlamaStackEnvironmentButton (line 49) | async waitForExploreLlamaStackEnvironmentButton(): Promise<void> {
FILE: tests/playwright/src/model/ai-lab-model-service-page.ts
class AiModelServicePage (line 26) | class AiModelServicePage extends AILabBasePage {
method constructor (line 32) | constructor(page: Page, webview: Page) {
method waitForLoad (line 40) | async waitForLoad(): Promise<void> {
method checkAllModelsForDeletion (line 44) | async checkAllModelsForDeletion(): Promise<void> {
method navigateToCreateNewModelPage (line 50) | async navigateToCreateNewModelPage(): Promise<AILabCreatingModelServic...
method deleteAllCurrentModels (line 56) | async deleteAllCurrentModels(): Promise<void> {
method getCurrentModelCount (line 90) | async getCurrentModelCount(): Promise<number> {
method openServiceDetails (line 94) | async openServiceDetails(modelName: string): Promise<AILabServiceDetai...
method getServiceByModel (line 104) | async getServiceByModel(modelName: string): Promise<Locator | undefine...
method getAllTableRows (line 116) | private async getAllTableRows(): Promise<Locator[]> {
FILE: tests/playwright/src/model/ai-lab-navigation-bar.ts
class AILabNavigationBar (line 32) | class AILabNavigationBar extends AILabBasePage {
method constructor (line 45) | constructor(page: Page, webview: Page) {
method waitForLoad (line 60) | async waitForLoad(): Promise<void> {
method openDashboard (line 64) | async openDashboard(): Promise<AILabDashboardPage> {
method openRecipesCatalog (line 70) | async openRecipesCatalog(): Promise<AILabRecipesCatalogPage> {
method openRunningApps (line 76) | async openRunningApps(): Promise<AiRunningAppsPage> {
method openServices (line 82) | async openServices(): Promise<AiModelServicePage> {
method openCatalog (line 88) | async openCatalog(): Promise<AILabModelCatalogPage> {
method openPlaygrounds (line 94) | async openPlaygrounds(): Promise<AILabPlaygroundsPage> {
method openLlamaStack (line 100) | async openLlamaStack(): Promise<AiLlamaStackPage> {
method openLocalServer (line 106) | async openLocalServer(): Promise<AILabLocalServerPage> {
method openTryInstructLab (line 112) | async openTryInstructLab(): Promise<AILabTryInstructLabPage> {
FILE: tests/playwright/src/model/ai-lab-playground-details-page.ts
class AILabPlaygroundDetailsPage (line 25) | class AILabPlaygroundDetailsPage extends AILabBasePage {
method constructor (line 40) | constructor(page: Page, webview: Page, playgroundName: string) {
method waitForLoad (line 60) | async waitForLoad(): Promise<void> {
method defineSystemPrompt (line 64) | async defineSystemPrompt(systemPrompt: string): Promise<void> {
method deletePlayground (line 73) | async deletePlayground(): Promise<AILabPlaygroundsPage> {
method submitUserInput (line 80) | async submitUserInput(prompt: string): Promise<void> {
method getAssistantResponse (line 87) | async getAssistantResponse(index: number): Promise<Locator> {
FILE: tests/playwright/src/model/ai-lab-playgrounds-page.ts
class AILabPlaygroundsPage (line 25) | class AILabPlaygroundsPage extends AILabBasePage {
method constructor (line 31) | constructor(page: Page, webview: Page) {
method waitForLoad (line 39) | async waitForLoad(): Promise<void> {
method createNewPlayground (line 43) | async createNewPlayground(name: string, timeout = 180_000): Promise<th...
method deletePlayground (line 55) | async deletePlayground(playgroundName: string): Promise<this> {
method doesPlaygroundExist (line 67) | async doesPlaygroundExist(playgroundName: string): Promise<boolean> {
method goToPlaygroundDetails (line 71) | async goToPlaygroundDetails(playgroundName: string): Promise<AILabPlay...
method getPlaygroundRowByName (line 84) | private async getPlaygroundRowByName(playgroundName: string): Promise<...
FILE: tests/playwright/src/model/ai-lab-recipes-catalog-page.ts
class AILabRecipesCatalogPage (line 24) | class AILabRecipesCatalogPage extends AILabBasePage {
method constructor (line 31) | constructor(page: Page, webview: Page) {
method waitForLoad (line 46) | async waitForLoad(): Promise<void> {
method openRecipesCatalogApp (line 51) | async openRecipesCatalogApp(appName: string): Promise<AILabAppDetailsP...
method getAppDetailsLocator (line 58) | private getAppDetailsLocator(appName: string): Locator {
FILE: tests/playwright/src/model/ai-lab-running-apps-page.ts
class AiRunningAppsPage (line 24) | class AiRunningAppsPage extends AILabBasePage {
method constructor (line 25) | constructor(page: Page, webview: Page) {
method waitForLoad (line 29) | async waitForLoad(): Promise<void> {
method getRowForApp (line 33) | async getRowForApp(appName: string): Promise<Locator> {
method getCurrentStatusForApp (line 44) | async getCurrentStatusForApp(appName: string): Promise<string> {
method restartApp (line 49) | async restartApp(appName: string): Promise<void> {
method stopApp (line 58) | async stopApp(appName: string): Promise<void> {
method openKebabMenuForApp (line 65) | async openKebabMenuForApp(appName: string): Promise<Locator> {
method deleteAIApp (line 73) | async deleteAIApp(appName: string): Promise<void> {
method appExists (line 82) | async appExists(appName: string): Promise<boolean> {
method getAppPort (line 95) | async getAppPort(appName: string): Promise<string> {
method getAllTableRows (line 107) | private async getAllTableRows(): Promise<Locator[]> {
FILE: tests/playwright/src/model/ai-lab-service-details-page.ts
class AILabServiceDetailsPage (line 25) | class AILabServiceDetailsPage extends AILabBasePage {
method constructor (line 34) | constructor(page: Page, webview: Page) {
method waitForLoad (line 45) | async waitForLoad(): Promise<void> {
method deleteService (line 49) | async deleteService(): Promise<AiModelServicePage> {
method stopService (line 56) | async stopService(): Promise<void> {
method startService (line 61) | async startService(): Promise<void> {
method getInferenceServerPort (line 66) | async getInferenceServerPort(): Promise<string> {
method getServiceState (line 72) | async getServiceState(): Promise<string> {
FILE: tests/playwright/src/model/ai-lab-start-recipe-page.ts
class AILabStartRecipePage (line 31) | class AILabStartRecipePage extends AILabBasePage {
method constructor (line 38) | constructor(page: Page, webview: Page) {
method waitForLoad (line 47) | async waitForLoad(): Promise<void> {
method startRecipe (line 51) | async startRecipe(
method getModelDownloadProgress (line 119) | async getModelDownloadProgress(): Promise<number> {
method getStatusListLocator (line 136) | private async getStatusListLocator(): Promise<Locator[]> {
method getDownloadStatusContent (line 140) | private async getDownloadStatusContent(): Promise<string> {
method getLatestStatus (line 144) | private async getLatestStatus(): Promise<string> {
method getStatusContent (line 148) | private async getStatusContent(index: number = 0): Promise<string> {
method refreshStartRecipeUI (line 179) | private async refreshStartRecipeUI(page: Page, webView: Page, appName:...
FILE: tests/playwright/src/model/ai-lab-try-instructlab-page.ts
class AILabTryInstructLabPage (line 23) | class AILabTryInstructLabPage extends AILabBasePage {
method constructor (line 28) | constructor(page: Page, webview: Page) {
method waitForLoad (line 35) | async waitForLoad(): Promise<void> {
FILE: tests/playwright/src/model/podman-extension-ai-lab-details-page.ts
class AILabExtensionDetailsPage (line 22) | class AILabExtensionDetailsPage extends ExtensionDetailsPage {
method constructor (line 25) | constructor(page: Page) {
method waitForLoad (line 30) | async waitForLoad(): Promise<void> {
method checkIsActive (line 34) | async checkIsActive(statusTest: string): Promise<void> {
method checkForErrors (line 38) | async checkForErrors(): Promise<void> {
FILE: tests/playwright/src/model/preferences-extension-ai-lab-page.ts
class ExtensionAILabPreferencesPage (line 22) | class ExtensionAILabPreferencesPage extends PreferencesPage {
method constructor (line 27) | constructor(page: Page) {
method waitForLoad (line 35) | async waitForLoad(): Promise<void> {
method disableGPUPreference (line 39) | public async disableGPUPreference(): Promise<void> {
method enableGPUPreference (line 44) | public async enableGPUPreference(): Promise<void> {
method isGPUPreferenceEnabled (line 48) | public async isGPUPreferenceEnabled(): Promise<boolean> {
FILE: tests/playwright/src/utils/aiLabHandler.ts
function reopenAILabDashboard (line 27) | async function reopenAILabDashboard(
function openAILabPreferences (line 41) | async function openAILabPreferences(
function openAILabExtensionDetails (line 57) | async function openAILabExtensionDetails(navigationBar: NavigationBar): ...
function getExtensionCard (line 65) | async function getExtensionCard(navigationBar: NavigationBar): Promise<E...
function waitForExtensionToInitialize (line 74) | async function waitForExtensionToInitialize(navigationBar: NavigationBar...
function getExtensionVersion (line 83) | async function getExtensionVersion(navigationBar: NavigationBar): Promis...
FILE: tests/playwright/src/utils/webviewHandler.ts
function handleWebview (line 24) | async function handleWebview(
FILE: types/podman-desktop-api.d.ts
type PodmanDesktopApi (line 6) | interface PodmanDesktopApi {
FILE: types/postman-code-generators.d.ts
type Language (line 23) | interface Language {
type LanguageVariant (line 30) | interface LanguageVariant {
type Option (line 36) | interface Option {
Condensed preview — 457 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (2,392K chars).
[
{
"path": ".dockerignore",
"chars": 13,
"preview": "node_modules\n"
},
{
"path": ".editorconfig",
"chars": 335,
"preview": "# EditorConfig is awesome: http://EditorConfig.org\n\n# https://github.com/jokeyrhyme/standard-editorconfig\n\n# top-most Ed"
},
{
"path": ".fmf/version",
"chars": 2,
"preview": "1\n"
},
{
"path": ".gitattributes",
"chars": 19,
"preview": "* text=auto eol=lf\n"
},
{
"path": ".github/ISSUE_TEMPLATE/bug_report.yml",
"chars": 1910,
"preview": "name: Bug 🐞\ndescription: Report a bug report\ntype: bug\n\nbody:\n - type: markdown\n attributes:\n value: |\n "
},
{
"path": ".github/ISSUE_TEMPLATE/config.yml",
"chars": 28,
"preview": "blank_issues_enabled: false\n"
},
{
"path": ".github/ISSUE_TEMPLATE/epic.yml",
"chars": 759,
"preview": "name: Epic ⚡\ndescription: A high-level feature\ntype: epic\n\nbody:\n - type: markdown\n attributes:\n value: |\n "
},
{
"path": ".github/ISSUE_TEMPLATE/feature_request.yml",
"chars": 1245,
"preview": "name: Feature 💡\ndescription: A request, idea, or new functionality\ntype: feature\n\nbody:\n - type: markdown\n attribute"
},
{
"path": ".github/ISSUE_TEMPLATE/ux-request.yaml",
"chars": 1595,
"preview": "name: UX Request\ndescription: UX Request Form\ntype: UX (design spec)\nlabels: [UX/UI Issue, Graphic design]\n\nbody:\n - ty"
},
{
"path": ".github/PULL_REQUEST_TEMPLATE.md",
"chars": 377,
"preview": "### What does this PR do?\n\n### Screenshot / video of UI\n\n<!-- If this PR is changing UI, please include\nscreenshots or s"
},
{
"path": ".github/dependabot.yml",
"chars": 535,
"preview": "# Set update schedule for GitHub Actions\n\nversion: 2\nupdates:\n - package-ecosystem: \"github-actions\"\n directory: \"/\""
},
{
"path": ".github/workflows/ai-lab-e2e-nightly-windows.yaml",
"chars": 10276,
"preview": "#\n# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not u"
},
{
"path": ".github/workflows/build-next.yaml",
"chars": 1641,
"preview": "#\n# Copyright (C) 2023-2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may "
},
{
"path": ".github/workflows/compute-model-sizes.yml",
"chars": 699,
"preview": "# This is a basic workflow that is manually triggered\n\nname: Compute model sizes\n\n# Controls when the action will run. W"
},
{
"path": ".github/workflows/e2e-main-tf.yaml",
"chars": 5580,
"preview": "# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use"
},
{
"path": ".github/workflows/e2e-main.yaml",
"chars": 7906,
"preview": "#\n# Copyright (C) 2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not u"
},
{
"path": ".github/workflows/llama-stack-playground.yaml",
"chars": 1871,
"preview": "#\n# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not u"
},
{
"path": ".github/workflows/pr-check.yaml",
"chars": 7597,
"preview": "#\n# Copyright (C) 2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not u"
},
{
"path": ".github/workflows/ramalama.yaml",
"chars": 6978,
"preview": "#\n# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not u"
},
{
"path": ".github/workflows/recipe-catalog-change-cleanup.yaml",
"chars": 2309,
"preview": "name: recipe-catalog-change-cleanup\n\non:\n workflow_run:\n workflows: [\"recipe-catalog-change-windows-trigger\"]\n ty"
},
{
"path": ".github/workflows/recipe-catalog-change-template.yaml",
"chars": 13633,
"preview": "name: Run recipe tests on catalog change\n\non:\n workflow_call:\n inputs:\n trigger-workflow-run-id:\n requir"
},
{
"path": ".github/workflows/recipe-catalog-change-trigger.yaml",
"chars": 3662,
"preview": "name: recipe-catalog-change-windows-trigger\n\non:\n workflow_run:\n workflows: [\"pr-check\"]\n types:\n - complete"
},
{
"path": ".github/workflows/release.yaml",
"chars": 6736,
"preview": "#\n# Copyright (C) 2024-2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may "
},
{
"path": ".github/workflows/update-ramalama-references.sh",
"chars": 1893,
"preview": "#!/usr/bin/env bash\n#\n# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licens"
},
{
"path": ".github/workflows/update-ramalama-references.yaml",
"chars": 3962,
"preview": "#\n# Copyright (C) 2025 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not u"
},
{
"path": ".gitignore",
"chars": 66,
"preview": "node_modules\n.DS_Store\ndist\n.eslintcache\n**/coverage\n.idea\noutput\n"
},
{
"path": ".husky/commit-msg",
"chars": 1149,
"preview": "#!/bin/sh\n#\n# Copyright (C) 2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
},
{
"path": ".husky/pre-commit",
"chars": 636,
"preview": "#\n# Copyright (C) 2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not u"
},
{
"path": ".npmrc",
"chars": 20,
"preview": "node-linker=hoisted\n"
},
{
"path": ".prettierrc",
"chars": 322,
"preview": "{\n \"svelteSortOrder\" : \"options-styles-scripts-markup\",\n \"svelteStrictMode\": true,\n \"svelteAllowShorthand\": false,\n "
},
{
"path": ".vscode/settings.json",
"chars": 69,
"preview": "{\n \"typescript.preferences.importModuleSpecifier\": \"non-relative\"\n}\n"
},
{
"path": "CODE-OF-CONDUCT.md",
"chars": 230,
"preview": "Podman Desktop Extension AI Lab Project Community Code of Conduct\n\nThe Podman Desktop Extension AI Lab Project follows t"
},
{
"path": "Containerfile",
"chars": 1223,
"preview": "#\n# Copyright (C) 2024 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not u"
},
{
"path": "LICENSE",
"chars": 11357,
"preview": " Apache License\n Version 2.0, January 2004\n "
},
{
"path": "MIGRATION.md",
"chars": 3248,
"preview": "# Migration guide\n\n## ℹ️ ApplicationCatalog\n\nBefore **Podman AI Lab** `v1.2.0` the [user-catalog](./PACKAGING-GUIDE.md#a"
},
{
"path": "PACKAGING-GUIDE.md",
"chars": 5861,
"preview": "# Packaging guide\n\n## ApplicationCatalog\n\nAI Lab uses an internal catalog embedded within the application. This catalog "
},
{
"path": "README.md",
"chars": 8601,
"preview": "# Podman AI Lab\n\nPodman AI Lab is an open source extension for Podman Desktop to work with LLMs (Large Language Models) "
},
{
"path": "RELEASE.md",
"chars": 4432,
"preview": "# Release process for Podman AI Lab\n\n## Pre-requisites\n\n- Create Enhancement Issue `Release vX.X.X` for current sprint, "
},
{
"path": "SECURITY.md",
"chars": 285,
"preview": "## Security and Disclosure Information Policy for the Podman Desktop Extension AI Lab Project\n\nThe Podman Desktop Extens"
},
{
"path": "USAGE_DATA.md",
"chars": 1437,
"preview": "# Data Collection\n\nThe AI Lab extension uses telemetry to collect anonymous usage data in order to identify issues and i"
},
{
"path": "api/openapi.yaml",
"chars": 23320,
"preview": "openapi: 3.0.0\ninfo:\n title: Podman Desktop AI Lab API\n description: API for interacting with the Podman Desktop AI La"
},
{
"path": "clean.sh",
"chars": 81,
"preview": "rm -rf node_modules packages/backend/node_modules packages/frontend/node_modules\n"
},
{
"path": "commitlint.config.js",
"chars": 67,
"preview": "module.exports = { extends: ['@commitlint/config-conventional'] };\n"
},
{
"path": "docs/proposals/ai-studio.md",
"chars": 3580,
"preview": "# Motivation\n\nToday, there is no notion of ordering between the containers. But we know that we have a dependency betwee"
},
{
"path": "docs/proposals/state-management.md",
"chars": 2512,
"preview": "# State management\n\nThe backend manages and persists the State. The backend pushes new state to the front-end\nwhen chang"
},
{
"path": "eslint.config.mjs",
"chars": 8163,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "package.json",
"chars": 3799,
"preview": "{\n \"name\": \"ai-lab-monorepo\",\n \"displayName\": \"ai-lab-monorepo\",\n \"description\": \"ai-lab-monorepo\",\n \"publisher\": \"r"
},
{
"path": "packages/backend/.gitignore",
"chars": 21,
"preview": "media\n/src-generated\n"
},
{
"path": "packages/backend/__mocks__/@podman-desktop/api.js",
"chars": 924,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/package.json",
"chars": 4645,
"preview": "{\n \"name\": \"ai-lab\",\n \"displayName\": \"Podman AI Lab\",\n \"description\": \"Podman AI Lab lets you work with LLMs locally,"
},
{
"path": "packages/backend/src/assets/ai.json",
"chars": 301040,
"preview": "{\n \"version\": \"1.0\",\n \"recipes\": [\n {\n \"id\": \"chatbot\",\n \"description\": \"This recipe provides a blueprint"
},
{
"path": "packages/backend/src/assets/inference-images.json",
"chars": 691,
"preview": "{\n \"whispercpp\": {\n \"default\": \"quay.io/ramalama/ramalama-whisper-server@sha256:2ce4e2751672e3baf76d6f220100160da86f"
},
{
"path": "packages/backend/src/assets/instructlab-images.json",
"chars": 120,
"preview": "{\n \"default\": \"docker.io/redhat/instructlab@sha256:c6b2ecb4547b1f43b5539ee99bdbf5c9ae40599fabe1c740622295d9721b91c4\"\n}\n"
},
{
"path": "packages/backend/src/assets/llama-stack-images.json",
"chars": 99,
"preview": "{\n \"default\": \"ghcr.io/containers/podman-ai-lab-stack:a06f399ebf7cb2645af126da0e84395db9bb0d1a\"\n}\n"
},
{
"path": "packages/backend/src/assets/llama-stack-playground-images.json",
"chars": 136,
"preview": "{\n \"default\": \"quay.io/podman-ai-lab/llama-stack-playground@sha256:2ee73137c0b2b401c2703b5881dd84c07f0baa385408e7c02f07"
},
{
"path": "packages/backend/src/assets/openai.json",
"chars": 61911,
"preview": "{\n \"openapi\": \"3.1.0\",\n \"info\": {\n \"title\": \"OpenAI API\",\n \"version\": \"0.3.2\"\n },\n \"servers\": [\n {\n \"u"
},
{
"path": "packages/backend/src/extension.spec.ts",
"chars": 1724,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/extension.ts",
"chars": 1149,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/instructlab-api-impl.ts",
"chars": 1887,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/llama-stack-api-impl.ts",
"chars": 1775,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/GPUManager.spec.ts",
"chars": 3602,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/managers/GPUManager.ts",
"chars": 2073,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/SnippetManager.spec.ts",
"chars": 4209,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/managers/SnippetManager.ts",
"chars": 3533,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/TaskRunner.spec.ts",
"chars": 7083,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/TaskRunner.ts",
"chars": 2377,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/apiServer.spec.ts",
"chars": 34545,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/managers/apiServer.ts",
"chars": 20124,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/application/applicationManager.spec.ts",
"chars": 17599,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/managers/application/applicationManager.ts",
"chars": 24177,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/catalogManager.spec.ts",
"chars": 18374,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/catalogManager.ts",
"chars": 12133,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/managers/gitManager.spec.ts",
"chars": 22267,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/gitManager.ts",
"chars": 9385,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/inference/inferenceManager.spec.ts",
"chars": 23311,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/inference/inferenceManager.ts",
"chars": 19189,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/instructlab/instructlabManager.spec.ts",
"chars": 6552,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/instructlab/instructlabManager.ts",
"chars": 10051,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/llama-stack/llamaStackManager.spec.ts",
"chars": 22457,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/llama-stack/llamaStackManager.ts",
"chars": 20370,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/modelsManager.spec.ts",
"chars": 30577,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/managers/modelsManager.ts",
"chars": 15877,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/managers/monitoringManager.spec.ts",
"chars": 6675,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/monitoringManager.ts",
"chars": 3027,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/managers/playground/McpServerManager.spec.ts",
"chars": 4246,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/playground/McpServerManager.ts",
"chars": 2981,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/playground/aiSdk.spec.ts",
"chars": 14420,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/playground/aiSdk.ts",
"chars": 6780,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/playgroundV2Manager.spec.ts",
"chars": 22772,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/playgroundV2Manager.ts",
"chars": 11109,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/podmanConnection.spec.ts",
"chars": 27480,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/managers/podmanConnection.ts",
"chars": 13064,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/managers/recipes/BuilderManager.spec.ts",
"chars": 5166,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/recipes/BuilderManager.ts",
"chars": 6640,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/recipes/PodManager.spec.ts",
"chars": 10199,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/recipes/PodManager.ts",
"chars": 5260,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/recipes/RecipeManager.spec.ts",
"chars": 6945,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/recipes/RecipeManager.ts",
"chars": 9269,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/snippets/java-okhttp-snippet.spec.ts",
"chars": 1154,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/snippets/java-okhttp-snippet.ts",
"chars": 1241,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/snippets/python-langchain-snippet.spec.ts",
"chars": 1163,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/snippets/python-langchain-snippet.ts",
"chars": 1293,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/snippets/quarkus-snippet.spec.ts",
"chars": 1646,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/managers/snippets/quarkus-snippet.ts",
"chars": 2034,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/models/AIConfig.spec.ts",
"chars": 7536,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/models/AIConfig.ts",
"chars": 4870,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/models/ApplicationOptions.ts",
"chars": 1508,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/models/HuggingFaceModelHandler.spec.ts",
"chars": 5409,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/models/HuggingFaceModelHandler.ts",
"chars": 4539,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/models/ModelHandler.ts",
"chars": 2151,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/models/TaskRunner.ts",
"chars": 1439,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/models/URLModelHandler.ts",
"chars": 3820,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/models/baseEvent.ts",
"chars": 1630,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/registries/ApplicationRegistry.ts",
"chars": 1911,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/registries/CancellationTokenRegistry.spec.ts",
"chars": 3361,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/registries/CancellationTokenRegistry.ts",
"chars": 2574,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/registries/ConfigurationRegistry.spec.ts",
"chars": 2601,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/registries/ConfigurationRegistry.ts",
"chars": 4087,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/registries/ContainerRegistry.spec.ts",
"chars": 6527,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/registries/ContainerRegistry.ts",
"chars": 3439,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/registries/ConversationRegistry.ts",
"chars": 7041,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/registries/InferenceProviderRegistry.ts",
"chars": 2200,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/registries/LocalRepositoryRegistry.spec.ts",
"chars": 5461,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/registries/LocalRepositoryRegistry.ts",
"chars": 3268,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/registries/ModelHandlerRegistry.ts",
"chars": 1916,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/registries/NavigationRegistry.spec.ts",
"chars": 4449,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/registries/NavigationRegistry.ts",
"chars": 3188,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/registries/TaskRegistry.spec.ts",
"chars": 3830,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/registries/TaskRegistry.ts",
"chars": 4609,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/studio-api-impl.spec.ts",
"chars": 12636,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/studio-api-impl.ts",
"chars": 23159,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/studio.spec.ts",
"chars": 4343,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/studio.ts",
"chars": 15962,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/templates/java-okhttp.mustache",
"chars": 1063,
"preview": "pom.xml\n=======\n<dependency>\n <groupId>com.squareup.okhttp</groupId>\n <artifactId>okhttp</artifactId>\n <version"
},
{
"path": "packages/backend/src/templates/python-langchain.mustache",
"chars": 608,
"preview": "pip\n=======\npip install langchain langchain-openai\n\nAiService.py\n==============\nfrom langchain_openai import OpenAI\nfrom"
},
{
"path": "packages/backend/src/templates/quarkus-langchain4j.mustache",
"chars": 903,
"preview": "application.properties\n======================\nquarkus.langchain4j.openai.base-url={{{ baseUrl }}}\nquarkus.langchain4j.op"
},
{
"path": "packages/backend/src/tests/ai-test.json",
"chars": 6372,
"preview": "{\n \"version\": \"1.0\",\n \"recipes\": [\n {\n \"id\": \"chatbot\",\n \"description\": \"Chat bot application\",\n \"na"
},
{
"path": "packages/backend/src/tests/ai-user-test.json",
"chars": 1007,
"preview": "{\n \"version\": \"1.0\",\n \"recipes\": [\n {\n \"id\": \"recipe 1\",\n \"description\" : \"Recipe 1\",\n \"name\" : \"Rec"
},
{
"path": "packages/backend/src/tests/utils.ts",
"chars": 1118,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/JsonWatcher.spec.ts",
"chars": 4837,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/JsonWatcher.ts",
"chars": 2815,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/Publisher.spec.ts",
"chars": 1564,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/utils/Publisher.ts",
"chars": 1226,
"preview": "/**********************************************************************\n * Copyright (C) 2024-2025 Red Hat, Inc.\n *\n * L"
},
{
"path": "packages/backend/src/utils/RecipeConstants.ts",
"chars": 1345,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/arch.ts",
"chars": 1049,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/catalogUtils.spec.ts",
"chars": 9320,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/catalogUtils.ts",
"chars": 7927,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/downloader.ts",
"chars": 1279,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/imagesUtils.spec.ts",
"chars": 1154,
"preview": "import { expect, test } from 'vitest';\nimport type { Recipe } from '@shared/models/IRecipe';\nimport type { ContainerConf"
},
{
"path": "packages/backend/src/utils/imagesUtils.ts",
"chars": 1110,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/inferenceUtils.spec.ts",
"chars": 4919,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/inferenceUtils.ts",
"chars": 4022,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/mcpUtils.ts",
"chars": 1853,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/modelsUtils.spec.ts",
"chars": 4917,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/modelsUtils.ts",
"chars": 4353,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/pathUtils.ts",
"chars": 1178,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/podman.spec.ts",
"chars": 3731,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/podman.ts",
"chars": 2812,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/podsUtils.ts",
"chars": 1225,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/ports.ts",
"chars": 2872,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/randomUtils.ts",
"chars": 1048,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/sha.spec.ts",
"chars": 2438,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/sha.ts",
"chars": 1321,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/uploader.spec.ts",
"chars": 2473,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/uploader.ts",
"chars": 3508,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/urldownloader.spec.ts",
"chars": 6467,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/urldownloader.ts",
"chars": 6397,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/utils/utils.ts",
"chars": 1701,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/webviewUtils.spec.ts",
"chars": 2699,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/webviewUtils.ts",
"chars": 2952,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/IWorker.ts",
"chars": 868,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/WindowsWorker.ts",
"chars": 1033,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/provider/InferenceProvider.spec.ts",
"chars": 6184,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/provider/InferenceProvider.ts",
"chars": 3969,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/provider/LlamaCppPython.spec.ts",
"chars": 23661,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/provider/LlamaCppPython.ts",
"chars": 11485,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/provider/OpenVINO.spec.ts",
"chars": 9873,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/provider/OpenVINO.ts",
"chars": 8909,
"preview": "/**********************************************************************\n * Copyright (C) 2025 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/provider/WhisperCpp.spec.ts",
"chars": 7869,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/provider/WhisperCpp.ts",
"chars": 4372,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/uploader/UploaderOptions.ts",
"chars": 1012,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/uploader/WSLUploader.spec.ts",
"chars": 5432,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/src/workers/uploader/WSLUploader.ts",
"chars": 2654,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/tsconfig.json",
"chars": 594,
"preview": "{\n \"compilerOptions\": {\n \"target\": \"esnext\",\n \"module\": \"esnext\",\n \"moduleResolution\": \"bundler\",\n \"resolve"
},
{
"path": "packages/backend/vite.config.js",
"chars": 3343,
"preview": "/**********************************************************************\n * Copyright (C) 2023 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/backend/vitest.config.js",
"chars": 1428,
"preview": "/**********************************************************************\n * Copyright (C) 2023 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/frontend/index.html",
"chars": 415,
"preview": "<!DOCTYPE html>\n<html class=\"fixed\" lang=\"en\">\n <head>\n <meta charset=\"UTF-8\" />\n <link rel=\"icon\" href=\"/favicon"
},
{
"path": "packages/frontend/package.json",
"chars": 1520,
"preview": "{\n \"name\": \"frontend-app\",\n \"displayName\": \"UI for AI Lab\",\n \"version\": \"1.10.0-next\",\n \"type\": \"module\",\n \"license"
},
{
"path": "packages/frontend/src/App.spec.ts",
"chars": 2300,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/frontend/src/App.svelte",
"chars": 5672,
"preview": "<script lang=\"ts\">\nimport './app.css';\nimport '@fortawesome/fontawesome-free/css/all.min.css';\nimport { router } from 't"
},
{
"path": "packages/frontend/src/Route.svelte",
"chars": 876,
"preview": "<script lang=\"ts\">\nimport { createRouteObject } from 'tinro/dist/tinro_lib';\nimport type { TinroRouteMeta } from 'tinro'"
},
{
"path": "packages/frontend/src/app.css",
"chars": 57,
"preview": "@import 'tailwindcss';\n@config '../tailwind.config.cjs';\n"
},
{
"path": "packages/frontend/src/index.html",
"chars": 282,
"preview": "<!doctype html>\n<html lang=\"en\">\n\t<head>\n\t\t<meta charset=\"utf-8\" />\n\t\t<meta name=\"viewport\" content=\"width=device-width,"
},
{
"path": "packages/frontend/src/lib/ApplicationActions.spec.ts",
"chars": 6167,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/frontend/src/lib/ApplicationActions.svelte",
"chars": 2942,
"preview": "<script lang=\"ts\">\nimport {\n faRotateForward,\n faArrowUpRightFromSquare,\n faTrash,\n faBookOpen,\n faStop,\n faPlay,\n"
},
{
"path": "packages/frontend/src/lib/Badge.spec.ts",
"chars": 1726,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/frontend/src/lib/Badge.svelte",
"chars": 510,
"preview": "<script lang=\"ts\">\nimport type { IconDefinition } from '@fortawesome/free-regular-svg-icons';\nimport Fa from 'svelte-fa'"
},
{
"path": "packages/frontend/src/lib/Card.svelte",
"chars": 1630,
"preview": "<script lang=\"ts\">\nimport Fa from 'svelte-fa';\nimport type { IconDefinition } from '@fortawesome/free-regular-svg-icons'"
},
{
"path": "packages/frontend/src/lib/ContentDetailsLayout.spec.ts",
"chars": 1838,
"preview": "/**********************************************************************\n * Copyright (C) 2024 Red Hat, Inc.\n *\n * Licens"
},
{
"path": "packages/frontend/src/lib/ContentDetailsLayout.svelte",
"chars": 1743,
"preview": "<script lang=\"ts\">\nexport let detailsTitle: string;\nexport let detailsLabel: string;\nexport let detailsSummary: string ="
}
]
// ... and 257 more files (download for full content)
About this extraction
This page contains the full source code of the containers/podman-desktop-extension-ai-lab GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 457 files (2.2 MB), approximately 594.9k tokens, and a symbol index with 1020 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.