Repository: Crosstalk-Solutions/project-nomad Branch: main Commit: efe6af9b2492 Files: 294 Total size: 1.1 MB Directory structure: gitextract_i1jwi_c6/ ├── .dockerignore ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.yml │ │ ├── config.yml │ │ └── feature_request.yml │ ├── dependabot.yaml │ ├── scripts/ │ │ └── finalize-release-notes.sh │ └── workflows/ │ ├── build-disk-collector.yml │ ├── build-primary-image.yml │ ├── build-sidecar-updater.yml │ ├── release.yml │ └── validate-collection-urls.yml ├── .gitignore ├── .releaserc.json ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── README.md ├── admin/ │ ├── .editorconfig │ ├── ace.js │ ├── adonisrc.ts │ ├── app/ │ │ ├── controllers/ │ │ │ ├── benchmark_controller.ts │ │ │ ├── chats_controller.ts │ │ │ ├── collection_updates_controller.ts │ │ │ ├── docs_controller.ts │ │ │ ├── downloads_controller.ts │ │ │ ├── easy_setup_controller.ts │ │ │ ├── home_controller.ts │ │ │ ├── maps_controller.ts │ │ │ ├── ollama_controller.ts │ │ │ ├── rag_controller.ts │ │ │ ├── settings_controller.ts │ │ │ ├── system_controller.ts │ │ │ └── zim_controller.ts │ │ ├── exceptions/ │ │ │ ├── handler.ts │ │ │ └── internal_server_error_exception.ts │ │ ├── jobs/ │ │ │ ├── check_service_updates_job.ts │ │ │ ├── check_update_job.ts │ │ │ ├── download_model_job.ts │ │ │ ├── embed_file_job.ts │ │ │ ├── run_benchmark_job.ts │ │ │ └── run_download_job.ts │ │ ├── middleware/ │ │ │ ├── container_bindings_middleware.ts │ │ │ ├── force_json_response_middleware.ts │ │ │ └── maps_static_middleware.ts │ │ ├── models/ │ │ │ ├── benchmark_result.ts │ │ │ ├── benchmark_setting.ts │ │ │ ├── chat_message.ts │ │ │ ├── chat_session.ts │ │ │ ├── collection_manifest.ts │ │ │ ├── installed_resource.ts │ │ │ ├── kv_store.ts │ │ │ ├── service.ts │ │ │ └── wikipedia_selection.ts │ │ ├── services/ │ │ │ ├── benchmark_service.ts │ │ │ ├── chat_service.ts │ │ │ ├── collection_manifest_service.ts │ │ │ ├── collection_update_service.ts │ │ │ ├── container_registry_service.ts │ │ │ ├── docker_service.ts │ │ │ ├── docs_service.ts │ │ │ ├── download_service.ts │ │ │ ├── map_service.ts │ │ │ ├── ollama_service.ts │ │ │ ├── queue_service.ts │ │ │ ├── rag_service.ts │ │ │ ├── system_service.ts │ │ │ ├── system_update_service.ts │ │ │ ├── zim_extraction_service.ts │ │ │ └── zim_service.ts │ │ ├── utils/ │ │ │ ├── downloads.ts │ │ │ ├── fs.ts │ │ │ ├── misc.ts │ │ │ └── version.ts │ │ └── validators/ │ │ ├── benchmark.ts │ │ ├── chat.ts │ │ ├── common.ts │ │ ├── curated_collections.ts │ │ ├── download.ts │ │ ├── ollama.ts │ │ ├── rag.ts │ │ ├── settings.ts │ │ ├── system.ts │ │ └── zim.ts │ ├── bin/ │ │ ├── console.ts │ │ ├── server.ts │ │ └── test.ts │ ├── commands/ │ │ ├── benchmark/ │ │ │ ├── results.ts │ │ │ ├── run.ts │ │ │ └── submit.ts │ │ └── queue/ │ │ └── work.ts │ ├── config/ │ │ ├── app.ts │ │ ├── bodyparser.ts │ │ ├── cors.ts │ │ ├── database.ts │ │ ├── hash.ts │ │ ├── inertia.ts │ │ ├── logger.ts │ │ ├── queue.ts │ │ ├── session.ts │ │ ├── shield.ts │ │ ├── static.ts │ │ ├── transmit.ts │ │ └── vite.ts │ ├── constants/ │ │ ├── broadcast.ts │ │ ├── kv_store.ts │ │ ├── misc.ts │ │ ├── ollama.ts │ │ ├── service_names.ts │ │ └── zim_extraction.ts │ ├── database/ │ │ ├── migrations/ │ │ │ ├── 1751086751801_create_services_table.ts │ │ │ ├── 1763499145832_update_services_table.ts │ │ │ ├── 1764912210741_create_curated_collections_table.ts │ │ │ ├── 1764912270123_create_curated_collection_resources_table.ts │ │ │ ├── 1768170944482_update_services_add_installation_statuses_table.ts │ │ │ ├── 1768453747522_update_services_add_icon.ts │ │ │ ├── 1769097600001_create_benchmark_results_table.ts │ │ │ ├── 1769097600002_create_benchmark_settings_table.ts │ │ │ ├── 1769300000001_add_powered_by_and_display_order_to_services.ts │ │ │ ├── 1769300000002_update_services_friendly_names.ts │ │ │ ├── 1769324448000_add_builder_tag_to_benchmark_results.ts │ │ │ ├── 1769400000001_create_installed_tiers_table.ts │ │ │ ├── 1769400000002_create_kv_store_table.ts │ │ │ ├── 1769500000001_create_wikipedia_selection_table.ts │ │ │ ├── 1769646771604_create_create_chat_sessions_table.ts │ │ │ ├── 1769646798266_create_create_chat_messages_table.ts │ │ │ ├── 1769700000001_create_zim_file_metadata_table.ts │ │ │ ├── 1770269324176_add_unique_constraint_to_curated_collection_resources_table.ts │ │ │ ├── 1770273423670_drop_installed_tiers_table.ts │ │ │ ├── 1770849108030_create_create_collection_manifests_table.ts │ │ │ ├── 1770849119787_create_create_installed_resources_table.ts │ │ │ ├── 1770850092871_create_drop_legacy_curated_tables_table.ts │ │ │ ├── 1771000000001_add_update_fields_to_services.ts │ │ │ └── 1771000000002_pin_latest_service_images.ts │ │ └── seeders/ │ │ └── service_seeder.ts │ ├── docs/ │ │ ├── about.md │ │ ├── faq.md │ │ ├── getting-started.md │ │ ├── home.md │ │ ├── release-notes.md │ │ └── use-cases.md │ ├── eslint.config.js │ ├── inertia/ │ │ ├── app/ │ │ │ └── app.tsx │ │ ├── components/ │ │ │ ├── ActiveDownloads.tsx │ │ │ ├── ActiveEmbedJobs.tsx │ │ │ ├── ActiveModelDownloads.tsx │ │ │ ├── Alert.tsx │ │ │ ├── BouncingDots.tsx │ │ │ ├── BouncingLogo.tsx │ │ │ ├── BuilderTagSelector.tsx │ │ │ ├── CategoryCard.tsx │ │ │ ├── CuratedCollectionCard.tsx │ │ │ ├── DebugInfoModal.tsx │ │ │ ├── DownloadURLModal.tsx │ │ │ ├── DynamicIcon.tsx │ │ │ ├── Footer.tsx │ │ │ ├── HorizontalBarChart.tsx │ │ │ ├── InfoTooltip.tsx │ │ │ ├── InstallActivityFeed.tsx │ │ │ ├── LoadingSpinner.tsx │ │ │ ├── MarkdocRenderer.tsx │ │ │ ├── ProgressBar.tsx │ │ │ ├── StorageProjectionBar.tsx │ │ │ ├── StyledButton.tsx │ │ │ ├── StyledModal.tsx │ │ │ ├── StyledSectionHeader.tsx │ │ │ ├── StyledSidebar.tsx │ │ │ ├── StyledTable.tsx │ │ │ ├── ThemeToggle.tsx │ │ │ ├── TierSelectionModal.tsx │ │ │ ├── UpdateServiceModal.tsx │ │ │ ├── WikipediaSelector.tsx │ │ │ ├── chat/ │ │ │ │ ├── ChatAssistantAvatar.tsx │ │ │ │ ├── ChatButton.tsx │ │ │ │ ├── ChatInterface.tsx │ │ │ │ ├── ChatMessageBubble.tsx │ │ │ │ ├── ChatModal.tsx │ │ │ │ ├── ChatSidebar.tsx │ │ │ │ ├── KnowledgeBaseModal.tsx │ │ │ │ └── index.tsx │ │ │ ├── file-uploader/ │ │ │ │ ├── index.css │ │ │ │ └── index.tsx │ │ │ ├── inputs/ │ │ │ │ ├── Input.tsx │ │ │ │ └── Switch.tsx │ │ │ ├── layout/ │ │ │ │ └── BackToHomeHeader.tsx │ │ │ ├── maps/ │ │ │ │ └── MapComponent.tsx │ │ │ ├── markdoc/ │ │ │ │ ├── Heading.tsx │ │ │ │ ├── Image.tsx │ │ │ │ ├── List.tsx │ │ │ │ ├── ListItem.tsx │ │ │ │ └── Table.tsx │ │ │ └── systeminfo/ │ │ │ ├── CircularGauge.tsx │ │ │ ├── InfoCard.tsx │ │ │ └── StatusCard.tsx │ │ ├── context/ │ │ │ ├── ModalContext.ts │ │ │ └── NotificationContext.ts │ │ ├── css/ │ │ │ └── app.css │ │ ├── hooks/ │ │ │ ├── useDebounce.ts │ │ │ ├── useDiskDisplayData.ts │ │ │ ├── useDownloads.ts │ │ │ ├── useEmbedJobs.ts │ │ │ ├── useErrorNotification.ts │ │ │ ├── useInternetStatus.ts │ │ │ ├── useMapRegionFiles.ts │ │ │ ├── useOllamaModelDownloads.ts │ │ │ ├── useServiceInstallationActivity.ts │ │ │ ├── useServiceInstalledStatus.tsx │ │ │ ├── useSystemInfo.ts │ │ │ ├── useSystemSetting.ts │ │ │ ├── useTheme.ts │ │ │ └── useUpdateAvailable.ts │ │ ├── layouts/ │ │ │ ├── AppLayout.tsx │ │ │ ├── DocsLayout.tsx │ │ │ ├── MapsLayout.tsx │ │ │ └── SettingsLayout.tsx │ │ ├── lib/ │ │ │ ├── api.ts │ │ │ ├── builderTagWords.ts │ │ │ ├── classNames.ts │ │ │ ├── collections.ts │ │ │ ├── navigation.ts │ │ │ └── util.ts │ │ ├── pages/ │ │ │ ├── about.tsx │ │ │ ├── chat.tsx │ │ │ ├── docs/ │ │ │ │ └── show.tsx │ │ │ ├── easy-setup/ │ │ │ │ ├── complete.tsx │ │ │ │ └── index.tsx │ │ │ ├── errors/ │ │ │ │ ├── not_found.tsx │ │ │ │ └── server_error.tsx │ │ │ ├── home.tsx │ │ │ ├── maps.tsx │ │ │ └── settings/ │ │ │ ├── apps.tsx │ │ │ ├── benchmark.tsx │ │ │ ├── legal.tsx │ │ │ ├── maps.tsx │ │ │ ├── models.tsx │ │ │ ├── support.tsx │ │ │ ├── system.tsx │ │ │ ├── update.tsx │ │ │ └── zim/ │ │ │ ├── index.tsx │ │ │ └── remote-explorer.tsx │ │ ├── providers/ │ │ │ ├── ModalProvider.tsx │ │ │ ├── NotificationProvider.tsx │ │ │ └── ThemeProvider.tsx │ │ └── tsconfig.json │ ├── package.json │ ├── providers/ │ │ └── map_static_provider.ts │ ├── resources/ │ │ └── views/ │ │ └── inertia_layout.edge │ ├── start/ │ │ ├── env.ts │ │ ├── kernel.ts │ │ └── routes.ts │ ├── tailwind.config.ts │ ├── tests/ │ │ └── bootstrap.ts │ ├── tsconfig.json │ ├── types/ │ │ ├── benchmark.ts │ │ ├── chat.ts │ │ ├── collections.ts │ │ ├── docker.ts │ │ ├── downloads.ts │ │ ├── files.ts │ │ ├── kv_store.ts │ │ ├── maps.ts │ │ ├── ollama.ts │ │ ├── rag.ts │ │ ├── services.ts │ │ ├── system.ts │ │ ├── util.ts │ │ └── zim.ts │ ├── util/ │ │ ├── docs.ts │ │ ├── files.ts │ │ └── zim.ts │ ├── views/ │ │ └── inertia_layout.edge │ └── vite.config.ts ├── collections/ │ ├── CATEGORIES-TODO.md │ ├── kiwix-categories.json │ ├── maps.json │ └── wikipedia.json ├── install/ │ ├── collect_disk_info.sh │ ├── entrypoint.sh │ ├── install_nomad.sh │ ├── management_compose.yaml │ ├── migrate-disk-collector.md │ ├── migrate-disk-collector.sh │ ├── run_updater_fixes.sh │ ├── sidecar-disk-collector/ │ │ ├── Dockerfile │ │ └── collect-disk-info.sh │ ├── sidecar-updater/ │ │ ├── Dockerfile │ │ └── update-watcher.sh │ ├── start_nomad.sh │ ├── stop_nomad.sh │ ├── uninstall_nomad.sh │ ├── update_nomad.sh │ ├── wikipedia_en_100_mini_2025-06.zim │ └── wikipedia_en_100_mini_2026-01.zim └── package.json ================================================ FILE CONTENTS ================================================ ================================================ FILE: .dockerignore ================================================ .env .env.* .git node_modules *.log admin/storage admin/node_modules admin/build ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.yml ================================================ name: Bug Report description: Report a bug or issue with Project N.O.M.A.D. title: "[Bug]: " labels: ["bug", "needs-triage"] body: - type: markdown attributes: value: | Thanks for taking the time to report a bug! Please fill out the information below to help us diagnose and fix the issue. **Before submitting:** - Search existing issues to avoid duplicates - Ensure you're running the latest version of N.O.M.A.D. - Redact any personal or sensitive information from logs/configs - Please don't submit issues related to running N.O.M.A.D. on Unraid or another NAS - we don't have plans to support these kinds of platforms at this time - type: dropdown id: issue-category attributes: label: Issue Category description: What area is this issue related to? options: - Installation/Setup - AI Assistant (Ollama) - Knowledge Base/RAG (Document Upload) - Docker/Container Issues - GPU Configuration - Content Downloads (ZIM, Maps, Collections) - Service Management (Start/Stop/Update) - System Performance/Resources - UI/Frontend Issue - Other validations: required: true - type: textarea id: description attributes: label: Bug Description description: Provide a clear and concise description of what the bug is placeholder: What happened? What did you expect to happen? validations: required: true - type: textarea id: reproduction attributes: label: Steps to Reproduce description: How can we reproduce this issue? placeholder: | 1. Go to '...' 2. Click on '...' 3. See error validations: required: true - type: textarea id: expected-behavior attributes: label: Expected Behavior description: What did you expect to happen? placeholder: Describe the expected outcome validations: required: true - type: textarea id: actual-behavior attributes: label: Actual Behavior description: What actually happened? placeholder: Describe what actually occurred, including any error messages validations: required: true - type: input id: nomad-version attributes: label: N.O.M.A.D. Version description: What version of N.O.M.A.D. are you running? (Check Settings > Update or run `docker ps` and check nomad_admin image tag) placeholder: "e.g., 1.29.0" validations: required: true - type: dropdown id: os attributes: label: Operating System description: What OS are you running N.O.M.A.D. on? options: - Ubuntu 24.04 - Ubuntu 22.04 - Ubuntu 20.04 - Debian 13 (Trixie) - Debian 12 (Bookworm) - Debian 11 (Bullseye) - Other Debian-based - Other (not yet officially supported) validations: required: true - type: input id: docker-version attributes: label: Docker Version description: What version of Docker are you running? (`docker --version`) placeholder: "e.g., Docker version 24.0.7" - type: dropdown id: gpu-present attributes: label: Do you have a dedicated GPU? options: - "Yes" - "No" - "Not sure" validations: required: true - type: input id: gpu-model attributes: label: GPU Model (if applicable) description: What GPU model do you have? (Check Settings > System or run `nvidia-smi` if NVIDIA GPU) placeholder: "e.g., NVIDIA GeForce RTX 3060" - type: textarea id: system-specs attributes: label: System Specifications description: Provide relevant system specs (CPU, RAM, available disk space) placeholder: | CPU: RAM: Available Disk Space: GPU (if any): - type: textarea id: service-status attributes: label: Service Status (if relevant) description: If this is a service-related issue, what's the status of relevant services? (Check Settings > Apps or run `docker ps`) placeholder: | Paste output from `docker ps` or describe service states from the UI - type: textarea id: logs attributes: label: Relevant Logs description: | Include any relevant logs or error messages. **Please redact any personal/sensitive information.** Useful commands for collecting logs: - N.O.M.A.D. management app: `docker logs nomad_admin` - Ollama: `docker logs nomad_ollama` - Qdrant: `docker logs nomad_qdrant` - Specific service: `docker logs nomad_` placeholder: Paste relevant log output here render: shell - type: textarea id: browser-console attributes: label: Browser Console Errors (if UI issue) description: If this is a UI issue, include any errors from your browser's developer console (F12) placeholder: Paste browser console errors here render: javascript - type: textarea id: screenshots attributes: label: Screenshots description: If applicable, add screenshots to help explain your problem (drag and drop images here) - type: textarea id: additional-context attributes: label: Additional Context description: Add any other context about the problem here (network setup, custom configurations, recent changes, etc.) - type: checkboxes id: terms attributes: label: Pre-submission Checklist description: Please confirm the following before submitting options: - label: I have searched for existing issues that might be related to this bug required: true - label: I am running the latest version of Project N.O.M.A.D. (or have noted my version above) required: true - label: I have redacted any personal or sensitive information from logs and screenshots required: true - label: This issue is NOT related to running N.O.M.A.D. on an unsupported/non-Debian-based OS required: false ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: false contact_links: - name: 💬 Discord Community url: https://discord.com/invite/crosstalksolutions about: Join our Discord community for general questions, support, and discussions - name: 📖 Documentation url: https://projectnomad.us about: Check the official documentation and guides - name: 🏆 Community Leaderboard url: https://benchmark.projectnomad.us about: View the N.O.M.A.D. benchmark leaderboard - name: 🤝 Contributing Guide url: https://github.com/Crosstalk-Solutions/project-nomad/blob/main/CONTRIBUTING.md about: Learn how to contribute to Project N.O.M.A.D. - name: 📅 Roadmap url: https://roadmap.projectnomad.us about: See our public roadmap, vote on features, and suggest new ones ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.yml ================================================ name: Feature Request description: Suggest a new feature or enhancement for Project N.O.M.A.D. title: "[Feature]: " labels: ["enhancement", "needs-discussion"] body: - type: markdown attributes: value: | Thanks for your interest in improving Project N.O.M.A.D.! Before you submit a feature request, consider checking our [roadmap](https://roadmap.projectnomad.us) to see if it's already planned or in progress. You're welcome to suggest new ideas there if you don't plan on opening PRs yourself. **Please note:** Feature requests are not guaranteed to be implemented. All requests are evaluated based on alignment with the project's goals, feasibility, and community demand. **Before submitting:** - Search existing feature requests and our [roadmap](https://roadmap.projectnomad.us) to avoid duplicates - Consider if this aligns with N.O.M.A.D.'s mission: offline-first knowledge and education - Consider the technical feasibility of the feature: N.O.M.A.D. is designed to be containerized and run on a wide range of hardware, so features that require heavy resources (aside from GPU-intensive tasks) or complex host configurations may be less likely to be implemented - Consider the scope of the feature: Small, focused enhancements that can be implemented incrementally are more likely to be implemented than large, broad features that would require significant development effort or have an unclear path forward - If you're able to contribute code, testing, or documentation, that significantly increases the chances of your feature being implemented - type: dropdown id: feature-category attributes: label: Feature Category description: What area does this feature relate to? options: - New Service/Tool Integration - AI Assistant Enhancement - Knowledge Base/RAG Improvement - Content Management (ZIM, Maps, Collections) - UI/UX Improvement - System Management - Performance Optimization - Documentation - Security - Other validations: required: true - type: textarea id: problem attributes: label: Problem Statement description: What problem does this feature solve? Is your feature request related to a pain point? placeholder: I find it frustrating when... / It would be helpful if... / Users struggle with... validations: required: true - type: textarea id: solution attributes: label: Proposed Solution description: Describe the feature or enhancement you'd like to see placeholder: Add a feature that... / Change the behavior to... / Integrate with... validations: required: true - type: textarea id: alternatives attributes: label: Alternative Solutions description: Have you considered any alternative solutions or workarounds? placeholder: I've tried... / Another approach could be... / A workaround is... - type: textarea id: use-case attributes: label: Use Case description: Describe a specific scenario where this feature would be valuable placeholder: | As a [type of user], when I [do something], I want to [accomplish something] so that [benefit]. Example: Because I have a dedicated GPU, I want to be able to see in the UI if GPU support is enabled so that I can optimize performance and troubleshoot issues more easily. - type: dropdown id: user-type attributes: label: Who would benefit from this feature? description: What type of users would find this most valuable? multiple: true options: - Individual/Home Users - Families - Teachers/Educators - Students - Survivalists/Preppers - Developers/Contributors - Organizations - All Users validations: required: true - type: dropdown id: priority attributes: label: How important is this feature to you? options: - Critical - Blocking my use of N.O.M.A.D. - High - Would significantly improve my experience - Medium - Would be nice to have - Low - Minor convenience validations: required: true - type: textarea id: implementation-ideas attributes: label: Implementation Ideas (Optional) description: If you have technical suggestions for how this could be implemented, share them here placeholder: This could potentially use... / It might integrate with... / A possible approach is... - type: textarea id: examples attributes: label: Examples or References description: Are there similar features in other applications? Include links, screenshots, or descriptions placeholder: Similar to how [app name] does... / See this example at [URL] - type: dropdown id: willing-to-contribute attributes: label: Would you be willing to help implement this? description: Contributing increases the likelihood of implementation options: - "Yes - I can write the code" - "Yes - I can help test" - "Yes - I can help with documentation" - "Maybe - with guidance" - "No - I don't have the skills/time" validations: required: true - type: textarea id: additional-context attributes: label: Additional Context description: Add any other context, mockups, diagrams, or information about the feature request - type: checkboxes id: checklist attributes: label: Pre-submission Checklist description: Please confirm the following before submitting options: - label: I have searched for existing feature requests that might be similar required: true - label: This feature aligns with N.O.M.A.D.'s mission of offline-first knowledge and education required: true - label: I understand that feature requests are not guaranteed to be implemented required: true ================================================ FILE: .github/dependabot.yaml ================================================ version: 2 updates: - package-ecosystem: "npm" directory: "/admin" schedule: interval: "weekly" target-branch: "rc" ================================================ FILE: .github/scripts/finalize-release-notes.sh ================================================ #!/usr/bin/env bash # # finalize-release-notes.sh # # Stamps the "## Unreleased" section in a release-notes file with a version # and date, and extracts the section content for use in GitHub releases / email. # Also includes all commits since the last release for complete transparency. # # Usage: finalize-release-notes.sh # # Exit codes: # 0 - Success: section stamped and extracted # 1 - No "## Unreleased" section found (skip gracefully) # 2 - Unreleased section exists but is empty (skip gracefully) set -euo pipefail VERSION="${1:?Usage: finalize-release-notes.sh }" FILE="${2:?Usage: finalize-release-notes.sh }" if [[ ! -f "$FILE" ]]; then echo "Error: File not found: $FILE" >&2 exit 1 fi # Find the line number of the ## Unreleased header (case-insensitive) HEADER_LINE=$(grep -inm1 '^## unreleased' "$FILE" | cut -d: -f1) if [[ -z "$HEADER_LINE" ]]; then echo "No '## Unreleased' section found. Skipping." exit 1 fi TOTAL_LINES=$(wc -l < "$FILE") # Find the next section header (## Version ...) or --- separator after the Unreleased header NEXT_SECTION_LINE="" if [[ $HEADER_LINE -lt $TOTAL_LINES ]]; then NEXT_SECTION_LINE=$(tail -n +"$((HEADER_LINE + 1))" "$FILE" \ | grep -nm1 '^## \|^---$' \ | cut -d: -f1) fi if [[ -n "$NEXT_SECTION_LINE" ]]; then # NEXT_SECTION_LINE is relative to HEADER_LINE+1, convert to absolute END_LINE=$((HEADER_LINE + NEXT_SECTION_LINE - 1)) else # Section runs to end of file END_LINE=$TOTAL_LINES fi # Extract content between header and next section (exclusive of both boundaries) CONTENT_START=$((HEADER_LINE + 1)) CONTENT_END=$END_LINE # Extract the section body (between header line and the next boundary) SECTION_BODY=$(sed -n "${CONTENT_START},${CONTENT_END}p" "$FILE" | sed '/^$/N;/^\n$/d') # Check for actual content: strip blank lines and lines that are only markdown headers (###...) TRIMMED=$(echo "$SECTION_BODY" | sed '/^[[:space:]]*$/d') HAS_CONTENT=$(echo "$SECTION_BODY" | sed '/^[[:space:]]*$/d' | grep -v '^###' || true) if [[ -z "$TRIMMED" || -z "$HAS_CONTENT" ]]; then echo "Unreleased section is empty. Skipping." exit 2 fi # Format the date as "Month Day, Year" DATE_STAMP=$(date +'%B %-d, %Y') NEW_HEADER="## Version ${VERSION} - ${DATE_STAMP}" # Build the replacement: swap the header line, keep everything else intact { # Lines before the Unreleased header if [[ $HEADER_LINE -gt 1 ]]; then head -n "$((HEADER_LINE - 1))" "$FILE" fi # New versioned header echo "$NEW_HEADER" # Content between header and next section sed -n "${CONTENT_START},${CONTENT_END}p" "$FILE" # Rest of the file after the section if [[ $END_LINE -lt $TOTAL_LINES ]]; then tail -n +"$((END_LINE + 1))" "$FILE" fi } > "${FILE}.tmp" mv "${FILE}.tmp" "$FILE" # Get commits since the last release LAST_TAG=$(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || echo "") COMMIT_LIST="" if [[ -n "$LAST_TAG" ]]; then echo "Fetching commits since ${LAST_TAG}..." # Get commits between last tag and HEAD, excluding merge commits and skip ci commits COMMIT_LIST=$(git log "${LAST_TAG}..HEAD" \ --no-merges \ --pretty=format:"- %s ([%h](https://github.com/${GITHUB_REPOSITORY}/commit/%H))" \ --grep="\[skip ci\]" --invert-grep \ || echo "") else echo "No previous tag found, fetching all commits..." COMMIT_LIST=$(git log \ --no-merges \ --pretty=format:"- %s ([%h](https://github.com/${GITHUB_REPOSITORY}/commit/%H))" \ --grep="\[skip ci\]" --invert-grep \ || echo "") fi # Write the extracted section content (for GitHub release body / future email) { echo "$NEW_HEADER" echo "" if [[ -n "$TRIMMED" ]]; then echo "$TRIMMED" echo "" fi # Add commit history if available if [[ -n "$COMMIT_LIST" ]]; then echo "---" echo "" echo "### 📝 All Changes" echo "" echo "$COMMIT_LIST" fi } > "${FILE}.section" echo "Finalized release notes for v${VERSION}" echo " Updated: ${FILE}" echo " Extracted: ${FILE}.section" exit 0 ================================================ FILE: .github/workflows/build-disk-collector.yml ================================================ name: Build Disk Collector Image on: workflow_dispatch: inputs: version: description: 'Semantic version to label the Docker image under (no "v" prefix, e.g. "1.2.3")' required: true type: string tag_latest: description: 'Also tag this image as :latest?' required: false type: boolean default: false jobs: check_authorization: name: Check authorization to publish new Docker image runs-on: ubuntu-latest outputs: isAuthorized: ${{ steps.check-auth.outputs.is_authorized }} steps: - name: check-auth id: check-auth run: echo "is_authorized=${{ contains(secrets.DEPLOYMENT_AUTHORIZED_USERS, github.triggering_actor) }}" >> $GITHUB_OUTPUT build: name: Build disk-collector image needs: check_authorization if: needs.check_authorization.outputs.isAuthorized == 'true' runs-on: ubuntu-latest permissions: contents: read packages: write steps: - name: Checkout code uses: actions/checkout@v4 - name: Log in to GitHub Container Registry uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push uses: docker/build-push-action@v5 with: context: install/sidecar-disk-collector push: true tags: | ghcr.io/crosstalk-solutions/project-nomad-disk-collector:${{ inputs.version }} ghcr.io/crosstalk-solutions/project-nomad-disk-collector:v${{ inputs.version }} ${{ inputs.tag_latest && 'ghcr.io/crosstalk-solutions/project-nomad-disk-collector:latest' || '' }} ================================================ FILE: .github/workflows/build-primary-image.yml ================================================ name: Build Primary Docker Image on: workflow_dispatch: inputs: version: description: 'Semantic version to label the Docker image under (no "v" prefix, e.g. "1.2.3")' required: true type: string tag_latest: description: 'Also tag this image as :latest? (Keep false for RC and beta releases)' required: false type: boolean default: false jobs: check_authorization: name: Check authorization to publish new Docker image runs-on: ubuntu-latest outputs: isAuthorized: ${{ steps.check-auth.outputs.is_authorized }} steps: - name: check-auth id: check-auth run: echo "is_authorized=${{ contains(secrets.DEPLOYMENT_AUTHORIZED_USERS, github.triggering_actor) }}" >> $GITHUB_OUTPUT build: name: Build Docker image needs: check_authorization if: needs.check_authorization.outputs.isAuthorized == 'true' runs-on: ubuntu-latest permissions: contents: read packages: write steps: - name: Checkout code uses: actions/checkout@v4 - name: Log in to GitHub Container Registry uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push uses: docker/build-push-action@v5 with: push: true tags: | ghcr.io/crosstalk-solutions/project-nomad:${{ inputs.version }} ghcr.io/crosstalk-solutions/project-nomad:v${{ inputs.version }} ${{ inputs.tag_latest && 'ghcr.io/crosstalk-solutions/project-nomad:latest' || '' }} build-args: | VERSION=${{ inputs.version }} BUILD_DATE=${{ github.event.workflow_run.created_at }} VCS_REF=${{ github.sha }} ================================================ FILE: .github/workflows/build-sidecar-updater.yml ================================================ name: Build Sidecar Updater Image on: workflow_dispatch: inputs: version: description: 'Semantic version to label the Docker image under (no "v" prefix, e.g. "1.2.3")' required: true type: string tag_latest: description: 'Also tag this image as :latest?' required: false type: boolean default: false jobs: check_authorization: name: Check authorization to publish new Docker image runs-on: ubuntu-latest outputs: isAuthorized: ${{ steps.check-auth.outputs.is_authorized }} steps: - name: check-auth id: check-auth run: echo "is_authorized=${{ contains(secrets.DEPLOYMENT_AUTHORIZED_USERS, github.triggering_actor) }}" >> $GITHUB_OUTPUT build: name: Build sidecar-updater image needs: check_authorization if: needs.check_authorization.outputs.isAuthorized == 'true' runs-on: ubuntu-latest permissions: contents: read packages: write steps: - name: Checkout code uses: actions/checkout@v4 - name: Log in to GitHub Container Registry uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push uses: docker/build-push-action@v5 with: context: install/sidecar-updater push: true tags: | ghcr.io/crosstalk-solutions/project-nomad-sidecar-updater:${{ inputs.version }} ghcr.io/crosstalk-solutions/project-nomad-sidecar-updater:v${{ inputs.version }} ${{ inputs.tag_latest && 'ghcr.io/crosstalk-solutions/project-nomad-sidecar-updater:latest' || '' }} ================================================ FILE: .github/workflows/release.yml ================================================ name: Release SemVer on: workflow_dispatch jobs: check_authorization: name: Check authorization to release new version runs-on: ubuntu-latest outputs: isAuthorized: ${{ steps.check-auth.outputs.is_authorized }} steps: - name: check-auth id: check-auth run: echo "is_authorized=${{ contains(secrets.DEPLOYMENT_AUTHORIZED_USERS, github.triggering_actor) }}" >> $GITHUB_OUTPUT release: name: Release needs: check_authorization if: needs.check_authorization.outputs.isAuthorized == 'true' runs-on: ubuntu-latest outputs: didRelease: ${{ steps.semver.outputs.new_release_published }} newVersion: ${{ steps.semver.outputs.new_release_version }} steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 persist-credentials: false - name: semantic-release uses: cycjimmy/semantic-release-action@v3 id: semver env: GITHUB_TOKEN: ${{ secrets.COSMISTACKBOT_ACCESS_TOKEN }} GIT_AUTHOR_NAME: cosmistack-bot GIT_AUTHOR_EMAIL: dev@cosmistack.com GIT_COMMITTER_NAME: cosmistack-bot GIT_COMMITTER_EMAIL: dev@cosmistack.com - name: Finalize release notes # Skip for pre-releases (versions containing a hyphen, e.g. 1.27.0-rc.1) if: | steps.semver.outputs.new_release_published == 'true' && !contains(steps.semver.outputs.new_release_version, '-') id: finalize-notes env: GITHUB_REPOSITORY: ${{ github.repository }} run: | git pull origin main chmod +x .github/scripts/finalize-release-notes.sh EXIT_CODE=0 .github/scripts/finalize-release-notes.sh \ "${{ steps.semver.outputs.new_release_version }}" \ admin/docs/release-notes.md || EXIT_CODE=$? if [[ "$EXIT_CODE" -eq 0 ]]; then echo "has_notes=true" >> $GITHUB_OUTPUT else echo "has_notes=false" >> $GITHUB_OUTPUT fi - name: Commit finalized release notes if: | steps.semver.outputs.new_release_published == 'true' && steps.finalize-notes.outputs.has_notes == 'true' && !contains(steps.semver.outputs.new_release_version, '-') run: | git config user.name "cosmistack-bot" git config user.email "dev@cosmistack.com" git remote set-url origin https://x-access-token:${{ secrets.COSMISTACKBOT_ACCESS_TOKEN }}@github.com/${{ github.repository }}.git git add admin/docs/release-notes.md git commit -m "docs(release): finalize v${{ steps.semver.outputs.new_release_version }} release notes [skip ci]" git push origin main - name: Update GitHub release body if: | steps.semver.outputs.new_release_published == 'true' && steps.finalize-notes.outputs.has_notes == 'true' && !contains(steps.semver.outputs.new_release_version, '-') env: GH_TOKEN: ${{ secrets.COSMISTACKBOT_ACCESS_TOKEN }} run: | gh release edit "v${{ steps.semver.outputs.new_release_version }}" \ --notes-file admin/docs/release-notes.md.section # Future: Send release notes email # - name: Send release notes email # if: steps.semver.outputs.new_release_published == 'true' && steps.finalize-notes.outputs.has_notes == 'true' # run: | # curl -X POST "https://api.projectnomad.us/api/v1/newsletter/release" \ # -H "Authorization: Bearer ${{ secrets.NOMAD_API_KEY }}" \ # -H "Content-Type: application/json" \ # -d "{\"version\": \"${{ steps.semver.outputs.new_release_version }}\", \"body\": $(cat admin/docs/release-notes.md.section | jq -Rs .)}" ================================================ FILE: .github/workflows/validate-collection-urls.yml ================================================ name: Validate Collection URLs on: push: paths: - 'collections/**.json' pull_request: paths: - 'collections/**.json' jobs: validate-urls: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Extract and validate URLs run: | FAILED=0 CHECKED=0 FAILED_URLS="" # Recursively extract all non-null string URLs from every JSON file in collections/ URLS=$(jq -r '.. | .url? | select(type == "string")' collections/*.json | sort -u) while IFS= read -r url; do [ -z "$url" ] && continue CHECKED=$((CHECKED + 1)) printf "Checking: %s ... " "$url" # Use Range: bytes=0-0 to avoid downloading the full file. # --max-filesize 1 aborts early if the server ignores the Range header # and returns 200 with the full body. The HTTP status is still captured. HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \ --range 0-0 \ --max-filesize 1 \ --max-time 30 \ --location \ "$url") if [ "$HTTP_CODE" = "200" ] || [ "$HTTP_CODE" = "206" ]; then echo "OK ($HTTP_CODE)" else echo "FAILED ($HTTP_CODE)" FAILED=$((FAILED + 1)) FAILED_URLS="$FAILED_URLS\n - $url (HTTP $HTTP_CODE)" fi done <<< "$URLS" echo "" echo "Checked $CHECKED URLs, $FAILED failed." if [ "$FAILED" -gt 0 ]; then echo "" echo "Broken URLs:" printf "%b\n" "$FAILED_URLS" exit 1 fi ================================================ FILE: .gitignore ================================================ # Logs logs *.log # Diagnostic reports (https://nodejs.org/api/report.html) report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json # Compiled binary addons (https://nodejs.org/api/addons.html) build/Release # Dependency directories node_modules/ # Optional npm cache directory .npm # dotenv environment variables file .env # Build / Dist dist build tmp # macOS Metafiles .DS_Store # Fonts .ttf # Runtime-generated Files server/public server/temp # IDE Files .vscode .idea # Frontend assets compiled code admin/public/assets # Admin specific development files admin/storage ================================================ FILE: .releaserc.json ================================================ { "branches": [ "main", { "name": "rc", "prerelease": "rc" } ], "plugins": [ "@semantic-release/commit-analyzer", "@semantic-release/release-notes-generator", ["@semantic-release/npm", { "npmPublish": false }], ["@semantic-release/git", { "assets": ["package.json"], "message": "chore(release): ${nextRelease.version} [skip ci]" }], "@semantic-release/github" ] } ================================================ FILE: CODE_OF_CONDUCT.md ================================================ # Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at . All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing to Project N.O.M.A.D. Thank you for your interest in contributing to Project N.O.M.A.D.! Community contributions are what keep this project growing and improving. Please read this guide fully before getting started — it will save you (and the maintainers) a lot of time. > **Note:** Acceptance of contributions is not guaranteed. All pull requests are evaluated based on quality, relevance, and alignment with the project's goals. The maintainers of Project N.O.M.A.D. ("Nomad") reserve the right accept, deny, or modify any pull request at their sole discretion. --- ## Table of Contents - [Code of Conduct](#code-of-conduct) - [Before You Start](#before-you-start) - [Getting Started](#getting-started) - [Development Workflow](#development-workflow) - [Commit Messages](#commit-messages) - [Release Notes](#release-notes) - [Versioning](#versioning) - [Submitting a Pull Request](#submitting-a-pull-request) - [Feedback & Community](#feedback--community) --- ## Code of Conduct Please read and review our full [Code of Conduct](https://github.com/Crosstalk-Solutions/project-nomad/blob/main/CODE_OF_CONDUCT.md) before contributing. In short: please be respectful and considerate in all interactions with maintainers and other contributors. We are committed to providing a welcoming environment for everyone. Disrespectful or abusive behavior will not be tolerated. --- ## Before You Start **Open an issue first.** Before writing any code, please [open an issue](../../issues/new) to discuss your proposed change. This helps avoid duplicate work and ensures your contribution aligns with the project's direction. When opening an issue: - Use a clear, descriptive title - Describe the problem you're solving or the feature you want to add - If it's a bug, include steps to reproduce it and as much detail about your environment as possible - Ensure you redact any personal or sensitive information in any logs, configs, etc. --- ## Getting Started with Contributing **Please note**: this is the Getting Started guide for developing and contributing to Nomad, NOT [installing Nomad](https://github.com/Crosstalk-Solutions/project-nomad/blob/main/README.md) for regular use! ### Prerequisites - A Debian-based OS (Ubuntu recommended) - `sudo`/root privileges - Docker installed and running - A stable internet connection (required for dependency downloads) - Node.js (for frontend/admin work) ### Fork & Clone 1. Click **Fork** at the top right of this repository 2. Clone your fork locally: ```bash git clone https://github.com/YOUR_USERNAME/project-nomad.git cd project-nomad ``` 3. Add the upstream remote so you can stay in sync: ```bash git remote add upstream https://github.com/Crosstalk-Solutions/project-nomad.git ``` ### Avoid Installing a Release Version Locally Because Nomad relies heavily on Docker, we actually recommend against installing a release version of the project on the same local machine where you are developing. This can lead to conflicts with ports, volumes, and other resources. Instead, you can run your development version in a separate Docker environment while keeping your local machine clean. It certainly __can__ be done, but it adds complexity to your setup and workflow. If you choose to install a release version locally, please ensure you have a clear strategy for managing potential conflicts and resource usage. --- ## Development Workflow 1. **Sync with upstream** before starting any new work. We prefer rebasing over merge commits to keep a clean, linear git history as much as possible (this also makes it easier for maintainers to review and merge your changes). To sync with upstream: ```bash git fetch upstream git checkout main git rebase upstream/main ``` 2. **Create a feature branch** off `main` with a descriptive name: ```bash git checkout -b fix/issue-123 # or git checkout -b feature/add-new-tool ``` 3. **Make your changes.** Follow existing code style and conventions. Test your changes locally against a running N.O.M.A.D. instance before submitting. 4. **Add release notes** (see [Release Notes](#release-notes) below). 5. **Commit your changes** using [Conventional Commits](#commit-messages). 6. **Push your branch** and open a pull request. --- ## Commit Messages This project uses [Conventional Commits](https://www.conventionalcommits.org/). All commit messages must follow this format: ``` (): ``` **Common types:** | Type | When to use | |------|-------------| | `feat` | A new user-facing feature | | `fix` | A bug fix | | `docs` | Documentation changes only | | `refactor` | Code change that isn't a fix or feature and does not affect functionality | | `chore` | Build process, dependency updates, tooling | | `test` | Adding or updating tests | **Scope** is optional but encouraged — use it to indicate the area of the codebase affected (e.g., `api`, `ui`, `maps`). **Examples:** ``` feat(ui): add dark mode toggle to Command Center fix(api): resolve container status not updating after restart docs: update hardware requirements in README chore(deps): bump docker-compose to v2.24 ``` --- ## Release Notes Human-readable release notes live in [`admin/docs/release-notes.md`](admin/docs/release-notes.md) and are displayed directly in the Command Center UI. When your changes include anything user-facing, **add a summary to the `## Unreleased` section** at the top of that file under the appropriate heading: - **Features** — new user-facing capabilities - **Bug Fixes** — corrections to existing behavior - **Improvements** — enhancements, refactors, docs, or dependency updates Use the format `- **Area**: Description` to stay consistent with existing entries. **Example:** ```markdown ## Unreleased ### Features - **Maps**: Added support for downloading South America regional maps ### Bug Fixes - **AI Chat**: Fixed document upload failing on filenames with special characters ``` > When a release is triggered, CI automatically stamps the version and date, commits the update, and publishes the content to the GitHub release. You do not need to do this manually. --- ## Versioning This project uses [Semantic Versioning](https://semver.org/). Versions are managed in the root `package.json` and updated automatically by `semantic-release`. The `project-nomad` Docker image uses this version. The `admin/package.json` version stays at `0.0.0` and should not be changed manually. --- ## Submitting a Pull Request 1. Push your branch to your fork: ```bash git push origin your-branch-name ``` 2. Open a pull request against the `main` branch of this repository 3. In the PR description: - Summarize what your changes do and why - Reference the related issue (e.g., `Closes #123`) - Note any relevant testing steps or environment details 4. Be responsive to feedback — maintainers may request changes. Pull requests with no activity for an extended period may be closed. --- ## Feedback & Community Have questions or want to discuss ideas before opening an issue? Join the community: - **Discord:** [Join the Crosstalk Solutions server](https://discord.com/invite/crosstalksolutions) — the best place to get help, share your builds, and talk with other N.O.M.A.D. users - **Website:** [www.projectnomad.us](https://www.projectnomad.us) - **Benchmark Leaderboard:** [benchmark.projectnomad.us](https://benchmark.projectnomad.us) --- *Project N.O.M.A.D. is licensed under the [Apache License 2.0](LICENSE).* ================================================ FILE: Dockerfile ================================================ FROM node:22-slim AS base # Install bash & curl for entrypoint script compatibility, graphicsmagick for pdf2pic, and vips-dev & build-base for sharp RUN apt-get update && apt-get install -y bash curl graphicsmagick libvips-dev build-essential # All deps stage FROM base AS deps WORKDIR /app ADD admin/package.json admin/package-lock.json ./ RUN npm ci # Production only deps stage FROM base AS production-deps WORKDIR /app ADD admin/package.json admin/package-lock.json ./ RUN npm ci --omit=dev # Build stage FROM base AS build WORKDIR /app COPY --from=deps /app/node_modules /app/node_modules ADD admin/ ./ RUN node ace build # Production stage FROM base ARG VERSION=dev ARG BUILD_DATE ARG VCS_REF # Labels LABEL org.opencontainers.image.title="Project N.O.M.A.D" \ org.opencontainers.image.description="The Project N.O.M.A.D Official Docker image" \ org.opencontainers.image.version="${VERSION}" \ org.opencontainers.image.created="${BUILD_DATE}" \ org.opencontainers.image.revision="${VCS_REF}" \ org.opencontainers.image.vendor="Crosstalk Solutions, LLC" \ org.opencontainers.image.documentation="https://github.com/CrosstalkSolutions/project-nomad/blob/main/README.md" \ org.opencontainers.image.source="https://github.com/CrosstalkSolutions/project-nomad" \ org.opencontainers.image.licenses="Apache-2.0" ENV NODE_ENV=production WORKDIR /app COPY --from=production-deps /app/node_modules /app/node_modules COPY --from=build /app/build /app # Copy root package.json for version info COPY package.json /app/version.json # Copy docs and README for access within the container COPY admin/docs /app/docs COPY README.md /app/README.md # Copy entrypoint script and ensure it's executable COPY install/entrypoint.sh /usr/local/bin/entrypoint.sh RUN chmod +x /usr/local/bin/entrypoint.sh EXPOSE 8080 ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to the Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by the Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding any notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2024-2026 Crosstalk Solutions LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================
# Project N.O.M.A.D. ### Node for Offline Media, Archives, and Data **Knowledge That Never Goes Offline** [![Website](https://img.shields.io/badge/Website-projectnomad.us-blue)](https://www.projectnomad.us) [![Discord](https://img.shields.io/badge/Discord-Join%20Community-5865F2)](https://discord.com/invite/crosstalksolutions) [![Benchmark](https://img.shields.io/badge/Benchmark-Leaderboard-green)](https://benchmark.projectnomad.us)
--- Project N.O.M.A.D. is a self-contained, offline-first knowledge and education server packed with critical tools, knowledge, and AI to keep you informed and empowered—anytime, anywhere. ## Installation & Quickstart Project N.O.M.A.D. can be installed on any Debian-based operating system (we recommend Ubuntu). Installation is completely terminal-based, and all tools and resources are designed to be accessed through the browser, so there's no need for a desktop environment if you'd rather setup N.O.M.A.D. as a "server" and access it through other clients. *Note: sudo/root privileges are required to run the install script* #### Quick Install (Debian-based OS Only) ```bash sudo apt-get update && sudo apt-get install -y curl && curl -fsSL https://raw.githubusercontent.com/Crosstalk-Solutions/project-nomad/refs/heads/main/install/install_nomad.sh -o install_nomad.sh && sudo bash install_nomad.sh ``` Project N.O.M.A.D. is now installed on your device! Open a browser and navigate to `http://localhost:8080` (or `http://DEVICE_IP:8080`) to start exploring! ### Advanced Installation For more control over the installation process, copy and paste the [Docker Compose template](https://raw.githubusercontent.com/Crosstalk-Solutions/project-nomad/refs/heads/main/install/management_compose.yaml) into a `docker-compose.yml` file and customize it to your liking (be sure to replace any placeholders with your actual values). Then, run `docker compose up -d` to start the Command Center and its dependencies. Note: this method is recommended for advanced users only, as it requires familiarity with Docker and manual configuration before starting. ## How It Works N.O.M.A.D. is a management UI ("Command Center") and API that orchestrates a collection of containerized tools and resources via [Docker](https://www.docker.com/). It handles installation, configuration, and updates for everything — so you don't have to. **Built-in capabilities include:** - **AI Chat with Knowledge Base** — local AI chat powered by [Ollama](https://ollama.com/), with document upload and semantic search (RAG via [Qdrant](https://qdrant.tech/)) - **Information Library** — offline Wikipedia, medical references, ebooks, and more via [Kiwix](https://kiwix.org/) - **Education Platform** — Khan Academy courses with progress tracking via [Kolibri](https://learningequality.org/kolibri/) - **Offline Maps** — downloadable regional maps via [ProtoMaps](https://protomaps.com) - **Data Tools** — encryption, encoding, and analysis via [CyberChef](https://gchq.github.io/CyberChef/) - **Notes** — local note-taking via [FlatNotes](https://github.com/dullage/flatnotes) - **System Benchmark** — hardware scoring with a [community leaderboard](https://benchmark.projectnomad.us) - **Easy Setup Wizard** — guided first-time configuration with curated content collections N.O.M.A.D. also includes built-in tools like a Wikipedia content selector, ZIM library manager, and content explorer. ## What's Included | Capability | Powered By | What You Get | |-----------|-----------|-------------| | Information Library | Kiwix | Offline Wikipedia, medical references, survival guides, ebooks | | AI Assistant | Ollama + Qdrant | Built-in chat with document upload and semantic search | | Education Platform | Kolibri | Khan Academy courses, progress tracking, multi-user support | | Offline Maps | ProtoMaps | Downloadable regional maps with search and navigation | | Data Tools | CyberChef | Encryption, encoding, hashing, and data analysis | | Notes | FlatNotes | Local note-taking with markdown support | | System Benchmark | Built-in | Hardware scoring, Builder Tags, and community leaderboard | ## Device Requirements While many similar offline survival computers are designed to be run on bare-minimum, lightweight hardware, Project N.O.M.A.D. is quite the opposite. To install and run the available AI tools, we highly encourage the use of a beefy, GPU-backed device to make the most of your install. At it's core, however, N.O.M.A.D. is still very lightweight. For a barebones installation of the management application itself, the following minimal specs are required: *Note: Project N.O.M.A.D. is not sponsored by any hardware manufacturer and is designed to be as hardware-agnostic as possible. The harware listed below is for example/comparison use only* #### Minimum Specs - Processor: 2 GHz dual-core processor or better - RAM: 4GB system memory - Storage: At least 5 GB free disk space - OS: Debian-based (Ubuntu recommended) - Stable internet connection (required during install only) To run LLM's and other included AI tools: #### Optimal Specs - Processor: AMD Ryzen 7 or Intel Core i7 or better - RAM: 32 GB system memory - Graphics: NVIDIA RTX 3060 or AMD equivalent or better (more VRAM = run larger models) - Storage: At least 250 GB free disk space (preferably on SSD) - OS: Debian-based (Ubuntu recommended) - Stable internet connection (required during install only) **For detailed build recommendations at three price points ($150–$1,000+), see the [Hardware Guide](https://www.projectnomad.us/hardware).** Again, Project N.O.M.A.D. itself is quite lightweight - it's the tools and resources you choose to install with N.O.M.A.D. that will determine the specs required for your unique deployment ## About Internet Usage & Privacy Project N.O.M.A.D. is designed for offline usage. An internet connection is only required during the initial installation (to download dependencies) and if you (the user) decide to download additional tools and resources at a later time. Otherwise, N.O.M.A.D. does not require an internet connection and has ZERO built-in telemetry. To test internet connectivity, N.O.M.A.D. attempts to make a request to Cloudflare's utility endpoint, `https://1.1.1.1/cdn-cgi/trace` and checks for a successful response. ## About Security By design, Project N.O.M.A.D. is intended to be open and available without hurdles - it includes no authentication. If you decide to connect your device to a local network after install (e.g. for allowing other devices to access it's resources), you can block/open ports to control which services are exposed. **Will authentication be added in the future?** Maybe. It's not currently a priority, but if there's enough demand for it, we may consider building in an optional authentication layer in a future release to support uses cases where multiple users need access to the same instance but with different permission levels (e.g. family use with parental controls, classroom use with teacher/admin accounts, etc.). For now, we recommend using network-level controls to manage access if you're planning to expose your N.O.M.A.D. instance to other devices on a local network. N.O.M.A.D. is not designed to be exposed directly to the internet, and we strongly advise against doing so unless you really know what you're doing, have taken appropriate security measures, and understand the risks involved. ## Contributing Contributions are welcome and appreciated! Please read this section fully to understand how to contribute to the project. ### General Guidelines - **Open an issue first**: Before starting work on a new feature or bug fix, please open an issue to discuss your proposed changes. This helps ensure that your contribution aligns with the project's goals and avoids duplicate work. Title the issue clearly and provide a detailed description of the problem or feature you want to work on. - **Fork the repository**: Click the "Fork" button at the top right of the repository page to create a copy of the project under your GitHub account. - **Create a new branch**: In your forked repository, create a new branch for your work. Use a descriptive name for the branch that reflects the purpose of your changes (e.g., `fix/issue-123` or `feature/add-new-tool`). - **Make your changes**: Implement your changes in the new branch. Follow the existing code style and conventions used in the project. Be sure to test your changes locally to ensure they work as expected. - **Add Release Notes**: If your changes include new features, bug fixes, or improvements, please see the "Release Notes" section below to properly document your contribution for the next release. - **Conventional Commits**: When committing your changes, please use conventional commit messages to provide clear and consistent commit history. The format is `(): `, where: - `type` is the type of change (e.g., `feat` for new features, `fix` for bug fixes, `docs` for documentation changes, etc.) - `scope` is an optional area of the codebase that your change affects (e.g., `api`, `ui`, `docs`, etc.) - `description` is a brief summary of the change - **Submit a pull request**: Once your changes are ready, submit a pull request to the main repository. Provide a clear description of your changes and reference any related issues. The project maintainers will review your pull request and may provide feedback or request changes before it can be merged. - **Be responsive to feedback**: If the maintainers request changes or provide feedback on your pull request, please respond in a timely manner. Stale pull requests may be closed if there is no activity for an extended period. - **Follow the project's code of conduct**: Please adhere to the project's code of conduct when interacting with maintainers and other contributors. Be respectful and considerate in your communications. - **No guarantee of acceptance**: The project is community-driven, and all contributions are appreciated, but acceptance is not guaranteed. The maintainers will evaluate each contribution based on its quality, relevance, and alignment with the project's goals. - **Thank you for contributing to Project N.O.M.A.D.!** Your efforts help make this project better for everyone. ### Versioning This project uses semantic versioning. The version is managed in the root `package.json` and automatically updated by semantic-release. For simplicity's sake, the "project-nomad" image uses the same version defined there instead of the version in `admin/package.json` (stays at 0.0.0), as it's the only published image derived from the code. ### Release Notes Human-readable release notes live in [`admin/docs/release-notes.md`](admin/docs/release-notes.md) and are displayed in the Command Center's built-in documentation. When working on changes, add a summary to the `## Unreleased` section at the top of that file under the appropriate heading: - **Features** — new user-facing capabilities - **Bug Fixes** — corrections to existing behavior - **Improvements** — enhancements, refactors, docs, or dependency updates Use the format `- **Area**: Description` to stay consistent with existing entries. When a release is triggered, CI automatically stamps the version and date, commits the update, and pushes the content to the GitHub release. ## Community & Resources - **Website:** [www.projectnomad.us](https://www.projectnomad.us) - Learn more about the project - **Discord:** [Join the Community](https://discord.com/invite/crosstalksolutions) - Get help, share your builds, and connect with other NOMAD users - **Benchmark Leaderboard:** [benchmark.projectnomad.us](https://benchmark.projectnomad.us) - See how your hardware stacks up against other NOMAD builds ## License Project N.O.M.A.D. is licensed under the [Apache License 2.0](LICENSE). ## Helper Scripts Once installed, Project N.O.M.A.D. has a few helper scripts should you ever need to troubleshoot issues or perform maintenance that can't be done through the Command Center. All of these scripts are found in Project N.O.M.A.D.'s install directory, `/opt/project-nomad` ### ###### Start Script - Starts all installed project containers ```bash sudo bash /opt/project-nomad/start_nomad.sh ``` ### ###### Stop Script - Stops all installed project containers ```bash sudo bash /opt/project-nomad/stop_nomad.sh ``` ### ###### Update Script - Attempts to pull the latest images for the Command Center and its dependencies (i.e. mysql) and recreate the containers. Note: this *only* updates the Command Center containers. It does not update the installable application containers - that should be done through the Command Center UI ```bash sudo bash /opt/project-nomad/update_nomad.sh ``` ###### Uninstall Script - Need to start fresh? Use the uninstall script to make your life easy. Note: this cannot be undone! ```bash curl -fsSL https://raw.githubusercontent.com/Crosstalk-Solutions/project-nomad/refs/heads/main/install/uninstall_nomad.sh -o uninstall_nomad.sh && sudo bash uninstall_nomad.sh ``` ================================================ FILE: admin/.editorconfig ================================================ # http://editorconfig.org [*] indent_style = space indent_size = 2 end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true [*.json] insert_final_newline = unset [**.min.js] indent_style = unset insert_final_newline = unset [MakeFile] indent_style = space [*.md] trim_trailing_whitespace = false ================================================ FILE: admin/ace.js ================================================ /* |-------------------------------------------------------------------------- | JavaScript entrypoint for running ace commands |-------------------------------------------------------------------------- | | DO NOT MODIFY THIS FILE AS IT WILL BE OVERRIDDEN DURING THE BUILD | PROCESS. | | See docs.adonisjs.com/guides/typescript-build-process#creating-production-build | | Since, we cannot run TypeScript source code using "node" binary, we need | a JavaScript entrypoint to run ace commands. | | This file registers the "ts-node/esm" hook with the Node.js module system | and then imports the "bin/console.ts" file. | */ /** * Register hook to process TypeScript files using ts-node-maintained */ import 'ts-node-maintained/register/esm' /** * Import ace console entrypoint */ await import('./bin/console.js') ================================================ FILE: admin/adonisrc.ts ================================================ import { defineConfig } from '@adonisjs/core/app' export default defineConfig({ /* |-------------------------------------------------------------------------- | Experimental flags |-------------------------------------------------------------------------- | | The following features will be enabled by default in the next major release | of AdonisJS. You can opt into them today to avoid any breaking changes | during upgrade. | */ experimental: { mergeMultipartFieldsAndFiles: true, shutdownInReverseOrder: true, }, /* |-------------------------------------------------------------------------- | Commands |-------------------------------------------------------------------------- | | List of ace commands to register from packages. The application commands | will be scanned automatically from the "./commands" directory. | */ commands: [() => import('@adonisjs/core/commands'), () => import('@adonisjs/lucid/commands')], /* |-------------------------------------------------------------------------- | Service providers |-------------------------------------------------------------------------- | | List of service providers to import and register when booting the | application | */ providers: [ () => import('@adonisjs/core/providers/app_provider'), () => import('@adonisjs/core/providers/hash_provider'), { file: () => import('@adonisjs/core/providers/repl_provider'), environment: ['repl', 'test'], }, () => import('@adonisjs/core/providers/vinejs_provider'), () => import('@adonisjs/core/providers/edge_provider'), () => import('@adonisjs/session/session_provider'), () => import('@adonisjs/vite/vite_provider'), () => import('@adonisjs/shield/shield_provider'), () => import('@adonisjs/static/static_provider'), () => import('@adonisjs/cors/cors_provider'), () => import('@adonisjs/lucid/database_provider'), () => import('@adonisjs/inertia/inertia_provider'), () => import('@adonisjs/transmit/transmit_provider'), () => import('#providers/map_static_provider') ], /* |-------------------------------------------------------------------------- | Preloads |-------------------------------------------------------------------------- | | List of modules to import before starting the application. | */ preloads: [() => import('#start/routes'), () => import('#start/kernel')], /* |-------------------------------------------------------------------------- | Tests |-------------------------------------------------------------------------- | | List of test suites to organize tests by their type. Feel free to remove | and add additional suites. | */ tests: { suites: [ { files: ['tests/unit/**/*.spec(.ts|.js)'], name: 'unit', timeout: 2000, }, { files: ['tests/functional/**/*.spec(.ts|.js)'], name: 'functional', timeout: 30000, }, ], forceExit: false, }, /* |-------------------------------------------------------------------------- | Metafiles |-------------------------------------------------------------------------- | | A collection of files you want to copy to the build folder when creating | the production build. | */ metaFiles: [ { pattern: 'resources/views/**/*.edge', reloadServer: false, }, { pattern: 'public/**', reloadServer: false, }, ], assetsBundler: false, hooks: { onBuildStarting: [() => import('@adonisjs/vite/build_hook')], }, }) ================================================ FILE: admin/app/controllers/benchmark_controller.ts ================================================ import { inject } from '@adonisjs/core' import type { HttpContext } from '@adonisjs/core/http' import { BenchmarkService } from '#services/benchmark_service' import { runBenchmarkValidator, submitBenchmarkValidator } from '#validators/benchmark' import { RunBenchmarkJob } from '#jobs/run_benchmark_job' import type { BenchmarkType } from '../../types/benchmark.js' import { randomUUID } from 'node:crypto' @inject() export default class BenchmarkController { constructor(private benchmarkService: BenchmarkService) {} /** * Start a benchmark run (async via job queue, or sync if specified) */ async run({ request, response }: HttpContext) { const payload = await request.validateUsing(runBenchmarkValidator) const benchmarkType: BenchmarkType = payload.benchmark_type || 'full' const runSync = request.input('sync') === 'true' || request.input('sync') === true // Check if a benchmark is already running const status = this.benchmarkService.getStatus() if (status.status !== 'idle') { return response.status(409).send({ success: false, error: 'A benchmark is already running', current_benchmark_id: status.benchmarkId, }) } // Run synchronously if requested (useful for local dev without Redis) if (runSync) { try { let result switch (benchmarkType) { case 'full': result = await this.benchmarkService.runFullBenchmark() break case 'system': result = await this.benchmarkService.runSystemBenchmarks() break case 'ai': result = await this.benchmarkService.runAIBenchmark() break default: result = await this.benchmarkService.runFullBenchmark() } return response.send({ success: true, benchmark_id: result.benchmark_id, nomad_score: result.nomad_score, result, }) } catch (error) { return response.status(500).send({ success: false, error: error.message, }) } } // Generate benchmark ID and dispatch job (async) const benchmarkId = randomUUID() const { job, created } = await RunBenchmarkJob.dispatch({ benchmark_id: benchmarkId, benchmark_type: benchmarkType, include_ai: benchmarkType === 'full' || benchmarkType === 'ai', }) return response.status(201).send({ success: true, job_id: job?.id || benchmarkId, benchmark_id: benchmarkId, message: created ? `${benchmarkType} benchmark started` : 'Benchmark job already exists', }) } /** * Run a system-only benchmark (CPU, memory, disk) */ async runSystem({ response }: HttpContext) { const status = this.benchmarkService.getStatus() if (status.status !== 'idle') { return response.status(409).send({ success: false, error: 'A benchmark is already running', }) } const benchmarkId = randomUUID() await RunBenchmarkJob.dispatch({ benchmark_id: benchmarkId, benchmark_type: 'system', include_ai: false, }) return response.status(201).send({ success: true, benchmark_id: benchmarkId, message: 'System benchmark started', }) } /** * Run an AI-only benchmark */ async runAI({ response }: HttpContext) { const status = this.benchmarkService.getStatus() if (status.status !== 'idle') { return response.status(409).send({ success: false, error: 'A benchmark is already running', }) } const benchmarkId = randomUUID() await RunBenchmarkJob.dispatch({ benchmark_id: benchmarkId, benchmark_type: 'ai', include_ai: true, }) return response.status(201).send({ success: true, benchmark_id: benchmarkId, message: 'AI benchmark started', }) } /** * Get all benchmark results */ async results({}: HttpContext) { const results = await this.benchmarkService.getAllResults() return { results, total: results.length, } } /** * Get the latest benchmark result */ async latest({}: HttpContext) { const result = await this.benchmarkService.getLatestResult() if (!result) { return { result: null } } return { result } } /** * Get a specific benchmark result by ID */ async show({ params, response }: HttpContext) { const result = await this.benchmarkService.getResultById(params.id) if (!result) { return response.status(404).send({ error: 'Benchmark result not found', }) } return { result } } /** * Submit benchmark results to central repository */ async submit({ request, response }: HttpContext) { const payload = await request.validateUsing(submitBenchmarkValidator) const anonymous = request.input('anonymous') === true || request.input('anonymous') === 'true' try { const submitResult = await this.benchmarkService.submitToRepository(payload.benchmark_id, anonymous) return response.send({ success: true, repository_id: submitResult.repository_id, percentile: submitResult.percentile, }) } catch (error) { // Pass through the status code from the service if available, otherwise default to 400 const statusCode = (error as any).statusCode || 400 return response.status(statusCode).send({ success: false, error: error.message, }) } } /** * Update builder tag for a benchmark result */ async updateBuilderTag({ request, response }: HttpContext) { const benchmarkId = request.input('benchmark_id') const builderTag = request.input('builder_tag') if (!benchmarkId) { return response.status(400).send({ success: false, error: 'benchmark_id is required', }) } const result = await this.benchmarkService.getResultById(benchmarkId) if (!result) { return response.status(404).send({ success: false, error: 'Benchmark result not found', }) } // Validate builder tag format if provided if (builderTag) { const tagPattern = /^[A-Za-z]+-[A-Za-z]+-\d{4}$/ if (!tagPattern.test(builderTag)) { return response.status(400).send({ success: false, error: 'Invalid builder tag format. Expected: Word-Word-0000', }) } } result.builder_tag = builderTag || null await result.save() return response.send({ success: true, builder_tag: result.builder_tag, }) } /** * Get comparison stats from central repository */ async comparison({}: HttpContext) { const stats = await this.benchmarkService.getComparisonStats() return { stats } } /** * Get current benchmark status */ async status({}: HttpContext) { return this.benchmarkService.getStatus() } /** * Get benchmark settings */ async settings({}: HttpContext) { const { default: BenchmarkSetting } = await import('#models/benchmark_setting') return await BenchmarkSetting.getAllSettings() } /** * Update benchmark settings */ async updateSettings({ request, response }: HttpContext) { const { default: BenchmarkSetting } = await import('#models/benchmark_setting') const body = request.body() if (body.allow_anonymous_submission !== undefined) { await BenchmarkSetting.setValue( 'allow_anonymous_submission', body.allow_anonymous_submission ? 'true' : 'false' ) } return response.send({ success: true, settings: await BenchmarkSetting.getAllSettings(), }) } } ================================================ FILE: admin/app/controllers/chats_controller.ts ================================================ import { inject } from '@adonisjs/core' import type { HttpContext } from '@adonisjs/core/http' import { ChatService } from '#services/chat_service' import { createSessionSchema, updateSessionSchema, addMessageSchema } from '#validators/chat' import KVStore from '#models/kv_store' import { SystemService } from '#services/system_service' import { SERVICE_NAMES } from '../../constants/service_names.js' @inject() export default class ChatsController { constructor(private chatService: ChatService, private systemService: SystemService) {} async inertia({ inertia, response }: HttpContext) { const aiAssistantInstalled = await this.systemService.checkServiceInstalled(SERVICE_NAMES.OLLAMA) if (!aiAssistantInstalled) { return response.status(404).json({ error: 'AI Assistant service not installed' }) } const chatSuggestionsEnabled = await KVStore.getValue('chat.suggestionsEnabled') return inertia.render('chat', { settings: { chatSuggestionsEnabled: chatSuggestionsEnabled ?? false, }, }) } async index({}: HttpContext) { return await this.chatService.getAllSessions() } async show({ params, response }: HttpContext) { const sessionId = parseInt(params.id) const session = await this.chatService.getSession(sessionId) if (!session) { return response.status(404).json({ error: 'Session not found' }) } return session } async store({ request, response }: HttpContext) { try { const data = await request.validateUsing(createSessionSchema) const session = await this.chatService.createSession(data.title, data.model) return response.status(201).json(session) } catch (error) { return response.status(500).json({ error: error instanceof Error ? error.message : 'Failed to create session', }) } } async suggestions({ response }: HttpContext) { try { const suggestions = await this.chatService.getChatSuggestions() return response.status(200).json({ suggestions }) } catch (error) { return response.status(500).json({ error: error instanceof Error ? error.message : 'Failed to get suggestions', }) } } async update({ params, request, response }: HttpContext) { try { const sessionId = parseInt(params.id) const data = await request.validateUsing(updateSessionSchema) const session = await this.chatService.updateSession(sessionId, data) return session } catch (error) { return response.status(500).json({ error: error instanceof Error ? error.message : 'Failed to update session', }) } } async destroy({ params, response }: HttpContext) { try { const sessionId = parseInt(params.id) await this.chatService.deleteSession(sessionId) return response.status(204) } catch (error) { return response.status(500).json({ error: error instanceof Error ? error.message : 'Failed to delete session', }) } } async addMessage({ params, request, response }: HttpContext) { try { const sessionId = parseInt(params.id) const data = await request.validateUsing(addMessageSchema) const message = await this.chatService.addMessage(sessionId, data.role, data.content) return response.status(201).json(message) } catch (error) { return response.status(500).json({ error: error instanceof Error ? error.message : 'Failed to add message', }) } } async destroyAll({ response }: HttpContext) { try { const result = await this.chatService.deleteAllSessions() return response.status(200).json(result) } catch (error) { return response.status(500).json({ error: error instanceof Error ? error.message : 'Failed to delete all sessions', }) } } } ================================================ FILE: admin/app/controllers/collection_updates_controller.ts ================================================ import { CollectionUpdateService } from '#services/collection_update_service' import { assertNotPrivateUrl, applyContentUpdateValidator, applyAllContentUpdatesValidator, } from '#validators/common' import type { HttpContext } from '@adonisjs/core/http' export default class CollectionUpdatesController { async checkForUpdates({}: HttpContext) { const service = new CollectionUpdateService() return await service.checkForUpdates() } async applyUpdate({ request }: HttpContext) { const update = await request.validateUsing(applyContentUpdateValidator) assertNotPrivateUrl(update.download_url) const service = new CollectionUpdateService() return await service.applyUpdate(update) } async applyAllUpdates({ request }: HttpContext) { const { updates } = await request.validateUsing(applyAllContentUpdatesValidator) for (const update of updates) { assertNotPrivateUrl(update.download_url) } const service = new CollectionUpdateService() return await service.applyAllUpdates(updates) } } ================================================ FILE: admin/app/controllers/docs_controller.ts ================================================ import { DocsService } from '#services/docs_service' import { inject } from '@adonisjs/core' import type { HttpContext } from '@adonisjs/core/http' @inject() export default class DocsController { constructor( private docsService: DocsService ) { } async list({ }: HttpContext) { return await this.docsService.getDocs(); } async show({ params, inertia }: HttpContext) { const content = await this.docsService.parseFile(params.slug); return inertia.render('docs/show', { content, }); } } ================================================ FILE: admin/app/controllers/downloads_controller.ts ================================================ import type { HttpContext } from '@adonisjs/core/http' import { DownloadService } from '#services/download_service' import { downloadJobsByFiletypeSchema } from '#validators/download' import { inject } from '@adonisjs/core' @inject() export default class DownloadsController { constructor(private downloadService: DownloadService) {} async index() { return this.downloadService.listDownloadJobs() } async filetype({ request }: HttpContext) { const payload = await request.validateUsing(downloadJobsByFiletypeSchema) return this.downloadService.listDownloadJobs(payload.params.filetype) } async removeJob({ params }: HttpContext) { await this.downloadService.removeFailedJob(params.jobId) return { success: true } } } ================================================ FILE: admin/app/controllers/easy_setup_controller.ts ================================================ import { SystemService } from '#services/system_service' import { ZimService } from '#services/zim_service' import { CollectionManifestService } from '#services/collection_manifest_service' import { inject } from '@adonisjs/core' import type { HttpContext } from '@adonisjs/core/http' @inject() export default class EasySetupController { constructor( private systemService: SystemService, private zimService: ZimService ) {} async index({ inertia }: HttpContext) { const services = await this.systemService.getServices({ installedOnly: false }) return inertia.render('easy-setup/index', { system: { services: services, }, }) } async complete({ inertia }: HttpContext) { return inertia.render('easy-setup/complete') } async listCuratedCategories({}: HttpContext) { return await this.zimService.listCuratedCategories() } async refreshManifests({}: HttpContext) { const manifestService = new CollectionManifestService() const [zimChanged, mapsChanged, wikiChanged] = await Promise.all([ manifestService.fetchAndCacheSpec('zim_categories'), manifestService.fetchAndCacheSpec('maps'), manifestService.fetchAndCacheSpec('wikipedia'), ]) return { success: true, changed: { zim_categories: zimChanged, maps: mapsChanged, wikipedia: wikiChanged, }, } } } ================================================ FILE: admin/app/controllers/home_controller.ts ================================================ import { SystemService } from '#services/system_service' import { inject } from '@adonisjs/core' import type { HttpContext } from '@adonisjs/core/http' @inject() export default class HomeController { constructor( private systemService: SystemService, ) { } async index({ response }: HttpContext) { // Redirect / to /home return response.redirect().toPath('/home'); } async home({ inertia }: HttpContext) { const services = await this.systemService.getServices({ installedOnly: true }); return inertia.render('home', { system: { services } }) } } ================================================ FILE: admin/app/controllers/maps_controller.ts ================================================ import { MapService } from '#services/map_service' import { assertNotPrivateUrl, downloadCollectionValidator, filenameParamValidator, remoteDownloadValidator, remoteDownloadValidatorOptional, } from '#validators/common' import { inject } from '@adonisjs/core' import type { HttpContext } from '@adonisjs/core/http' @inject() export default class MapsController { constructor(private mapService: MapService) {} async index({ inertia }: HttpContext) { const baseAssetsCheck = await this.mapService.ensureBaseAssets() const regionFiles = await this.mapService.listRegions() return inertia.render('maps', { maps: { baseAssetsExist: baseAssetsCheck, regionFiles: regionFiles.files, }, }) } async downloadBaseAssets({ request }: HttpContext) { const payload = await request.validateUsing(remoteDownloadValidatorOptional) if (payload.url) assertNotPrivateUrl(payload.url) await this.mapService.downloadBaseAssets(payload.url) return { success: true } } async downloadRemote({ request }: HttpContext) { const payload = await request.validateUsing(remoteDownloadValidator) assertNotPrivateUrl(payload.url) const filename = await this.mapService.downloadRemote(payload.url) return { message: 'Download started successfully', filename, url: payload.url, } } async downloadCollection({ request }: HttpContext) { const payload = await request.validateUsing(downloadCollectionValidator) const resources = await this.mapService.downloadCollection(payload.slug) return { message: 'Collection download started successfully', slug: payload.slug, resources, } } // For providing a "preflight" check in the UI before actually starting a background download async downloadRemotePreflight({ request }: HttpContext) { const payload = await request.validateUsing(remoteDownloadValidator) assertNotPrivateUrl(payload.url) const info = await this.mapService.downloadRemotePreflight(payload.url) return info } async fetchLatestCollections({}: HttpContext) { const success = await this.mapService.fetchLatestCollections() return { success } } async listCuratedCollections({}: HttpContext) { return await this.mapService.listCuratedCollections() } async listRegions({}: HttpContext) { return await this.mapService.listRegions() } async styles({ request, response }: HttpContext) { // Automatically ensure base assets are present before generating styles const baseAssetsExist = await this.mapService.ensureBaseAssets() if (!baseAssetsExist) { return response.status(500).send({ message: 'Base map assets are missing and could not be downloaded. Please check your connection and try again.', }) } const styles = await this.mapService.generateStylesJSON(request.host(), request.protocol()) return response.json(styles) } async delete({ request, response }: HttpContext) { const payload = await request.validateUsing(filenameParamValidator) try { await this.mapService.delete(payload.params.filename) } catch (error) { if (error.message === 'not_found') { return response.status(404).send({ message: `Map file with key ${payload.params.filename} not found`, }) } throw error // Re-throw any other errors and let the global error handler catch } return { message: 'Map file deleted successfully', } } } ================================================ FILE: admin/app/controllers/ollama_controller.ts ================================================ import { ChatService } from '#services/chat_service' import { OllamaService } from '#services/ollama_service' import { RagService } from '#services/rag_service' import { modelNameSchema } from '#validators/download' import { chatSchema, getAvailableModelsSchema } from '#validators/ollama' import { inject } from '@adonisjs/core' import type { HttpContext } from '@adonisjs/core/http' import { DEFAULT_QUERY_REWRITE_MODEL, RAG_CONTEXT_LIMITS, SYSTEM_PROMPTS } from '../../constants/ollama.js' import logger from '@adonisjs/core/services/logger' import type { Message } from 'ollama' @inject() export default class OllamaController { constructor( private chatService: ChatService, private ollamaService: OllamaService, private ragService: RagService ) { } async availableModels({ request }: HttpContext) { const reqData = await request.validateUsing(getAvailableModelsSchema) return await this.ollamaService.getAvailableModels({ sort: reqData.sort, recommendedOnly: reqData.recommendedOnly, query: reqData.query || null, limit: reqData.limit || 15, force: reqData.force, }) } async chat({ request, response }: HttpContext) { const reqData = await request.validateUsing(chatSchema) // Flush SSE headers immediately so the client connection is open while // pre-processing (query rewriting, RAG lookup) runs in the background. if (reqData.stream) { response.response.setHeader('Content-Type', 'text/event-stream') response.response.setHeader('Cache-Control', 'no-cache') response.response.setHeader('Connection', 'keep-alive') response.response.flushHeaders() } try { // If there are no system messages in the chat inject system prompts const hasSystemMessage = reqData.messages.some((msg) => msg.role === 'system') if (!hasSystemMessage) { const systemPrompt = { role: 'system' as const, content: SYSTEM_PROMPTS.default, } logger.debug('[OllamaController] Injecting system prompt') reqData.messages.unshift(systemPrompt) } // Query rewriting for better RAG retrieval with manageable context // Will return user's latest message if no rewriting is needed const rewrittenQuery = await this.rewriteQueryWithContext(reqData.messages) logger.debug(`[OllamaController] Rewritten query for RAG: "${rewrittenQuery}"`) if (rewrittenQuery) { const relevantDocs = await this.ragService.searchSimilarDocuments( rewrittenQuery, 5, // Top 5 most relevant chunks 0.3 // Minimum similarity score of 0.3 ) logger.debug(`[RAG] Retrieved ${relevantDocs.length} relevant documents for query: "${rewrittenQuery}"`) // If relevant context is found, inject as a system message with adaptive limits if (relevantDocs.length > 0) { // Determine context budget based on model size const { maxResults, maxTokens } = this.getContextLimitsForModel(reqData.model) let trimmedDocs = relevantDocs.slice(0, maxResults) // Apply token cap if set (estimate ~4 chars per token) // Always include the first (most relevant) result — the cap only gates subsequent results if (maxTokens > 0) { const charCap = maxTokens * 4 let totalChars = 0 trimmedDocs = trimmedDocs.filter((doc, idx) => { totalChars += doc.text.length return idx === 0 || totalChars <= charCap }) } logger.debug( `[RAG] Injecting ${trimmedDocs.length}/${relevantDocs.length} results (model: ${reqData.model}, maxResults: ${maxResults}, maxTokens: ${maxTokens || 'unlimited'})` ) const contextText = trimmedDocs .map((doc, idx) => `[Context ${idx + 1}] (Relevance: ${(doc.score * 100).toFixed(1)}%)\n${doc.text}`) .join('\n\n') const systemMessage = { role: 'system' as const, content: SYSTEM_PROMPTS.rag_context(contextText), } // Insert system message at the beginning (after any existing system messages) const firstNonSystemIndex = reqData.messages.findIndex((msg) => msg.role !== 'system') const insertIndex = firstNonSystemIndex === -1 ? 0 : firstNonSystemIndex reqData.messages.splice(insertIndex, 0, systemMessage) } } // Check if the model supports "thinking" capability for enhanced response generation // If gpt-oss model, it requires a text param for "think" https://docs.ollama.com/api/chat const thinkingCapability = await this.ollamaService.checkModelHasThinking(reqData.model) const think: boolean | 'medium' = thinkingCapability ? (reqData.model.startsWith('gpt-oss') ? 'medium' : true) : false // Separate sessionId from the Ollama request payload — Ollama rejects unknown fields const { sessionId, ...ollamaRequest } = reqData // Save user message to DB before streaming if sessionId provided let userContent: string | null = null if (sessionId) { const lastUserMsg = [...reqData.messages].reverse().find((m) => m.role === 'user') if (lastUserMsg) { userContent = lastUserMsg.content await this.chatService.addMessage(sessionId, 'user', userContent) } } if (reqData.stream) { logger.debug(`[OllamaController] Initiating streaming response for model: "${reqData.model}" with think: ${think}`) // Headers already flushed above const stream = await this.ollamaService.chatStream({ ...ollamaRequest, think }) let fullContent = '' for await (const chunk of stream) { if (chunk.message?.content) { fullContent += chunk.message.content } response.response.write(`data: ${JSON.stringify(chunk)}\n\n`) } response.response.end() // Save assistant message and optionally generate title if (sessionId && fullContent) { await this.chatService.addMessage(sessionId, 'assistant', fullContent) const messageCount = await this.chatService.getMessageCount(sessionId) if (messageCount <= 2 && userContent) { this.chatService.generateTitle(sessionId, userContent, fullContent).catch((err) => { logger.error(`[OllamaController] Title generation failed: ${err instanceof Error ? err.message : err}`) }) } } return } // Non-streaming (legacy) path const result = await this.ollamaService.chat({ ...ollamaRequest, think }) if (sessionId && result?.message?.content) { await this.chatService.addMessage(sessionId, 'assistant', result.message.content) const messageCount = await this.chatService.getMessageCount(sessionId) if (messageCount <= 2 && userContent) { this.chatService.generateTitle(sessionId, userContent, result.message.content).catch((err) => { logger.error(`[OllamaController] Title generation failed: ${err instanceof Error ? err.message : err}`) }) } } return result } catch (error) { if (reqData.stream) { response.response.write(`data: ${JSON.stringify({ error: true })}\n\n`) response.response.end() return } throw error } } async deleteModel({ request }: HttpContext) { const reqData = await request.validateUsing(modelNameSchema) await this.ollamaService.deleteModel(reqData.model) return { success: true, message: `Model deleted: ${reqData.model}`, } } async dispatchModelDownload({ request }: HttpContext) { const reqData = await request.validateUsing(modelNameSchema) await this.ollamaService.dispatchModelDownload(reqData.model) return { success: true, message: `Download job dispatched for model: ${reqData.model}`, } } async installedModels({ }: HttpContext) { return await this.ollamaService.getModels() } /** * Determines RAG context limits based on model size extracted from the model name. * Parses size indicators like "1b", "3b", "8b", "70b" from model names/tags. */ private getContextLimitsForModel(modelName: string): { maxResults: number; maxTokens: number } { // Extract parameter count from model name (e.g., "llama3.2:3b", "qwen2.5:1.5b", "gemma:7b") const sizeMatch = modelName.match(/(\d+\.?\d*)[bB]/) const paramBillions = sizeMatch ? parseFloat(sizeMatch[1]) : 8 // default to 8B if unknown for (const tier of RAG_CONTEXT_LIMITS) { if (paramBillions <= tier.maxParams) { return { maxResults: tier.maxResults, maxTokens: tier.maxTokens } } } // Fallback: no limits return { maxResults: 5, maxTokens: 0 } } private async rewriteQueryWithContext( messages: Message[] ): Promise { try { // Get recent conversation history (last 6 messages for 3 turns) const recentMessages = messages.slice(-6) // Skip rewriting for short conversations. Rewriting adds latency with // little RAG benefit until there is enough context to matter. const userMessages = recentMessages.filter(msg => msg.role === 'user') if (userMessages.length <= 2) { return userMessages[userMessages.length - 1]?.content || null } const conversationContext = recentMessages .map(msg => { const role = msg.role === 'user' ? 'User' : 'Assistant' // Truncate assistant messages to first 200 chars to keep context manageable const content = msg.role === 'assistant' ? msg.content.slice(0, 200) + (msg.content.length > 200 ? '...' : '') : msg.content return `${role}: "${content}"` }) .join('\n') const installedModels = await this.ollamaService.getModels(true) const rewriteModelAvailable = installedModels?.some(model => model.name === DEFAULT_QUERY_REWRITE_MODEL) if (!rewriteModelAvailable) { logger.warn(`[RAG] Query rewrite model "${DEFAULT_QUERY_REWRITE_MODEL}" not available. Skipping query rewriting.`) const lastUserMessage = [...messages].reverse().find(msg => msg.role === 'user') return lastUserMessage?.content || null } // FUTURE ENHANCEMENT: allow the user to specify which model to use for rewriting const response = await this.ollamaService.chat({ model: DEFAULT_QUERY_REWRITE_MODEL, messages: [ { role: 'system', content: SYSTEM_PROMPTS.query_rewrite, }, { role: 'user', content: `Conversation:\n${conversationContext}\n\nRewritten Query:`, }, ], }) const rewrittenQuery = response.message.content.trim() logger.info(`[RAG] Query rewritten: "${rewrittenQuery}"`) return rewrittenQuery } catch (error) { logger.error( `[RAG] Query rewriting failed: ${error instanceof Error ? error.message : error}` ) // Fallback to last user message if rewriting fails const lastUserMessage = [...messages].reverse().find(msg => msg.role === 'user') return lastUserMessage?.content || null } } } ================================================ FILE: admin/app/controllers/rag_controller.ts ================================================ import { RagService } from '#services/rag_service' import { EmbedFileJob } from '#jobs/embed_file_job' import { inject } from '@adonisjs/core' import type { HttpContext } from '@adonisjs/core/http' import app from '@adonisjs/core/services/app' import { randomBytes } from 'node:crypto' import { sanitizeFilename } from '../utils/fs.js' import { deleteFileSchema, getJobStatusSchema } from '#validators/rag' @inject() export default class RagController { constructor(private ragService: RagService) { } public async upload({ request, response }: HttpContext) { const uploadedFile = request.file('file') if (!uploadedFile) { return response.status(400).json({ error: 'No file uploaded' }) } const randomSuffix = randomBytes(6).toString('hex') const sanitizedName = sanitizeFilename(uploadedFile.clientName) const fileName = `${sanitizedName}-${randomSuffix}.${uploadedFile.extname || 'txt'}` const fullPath = app.makePath(RagService.UPLOADS_STORAGE_PATH, fileName) await uploadedFile.move(app.makePath(RagService.UPLOADS_STORAGE_PATH), { name: fileName, }) // Dispatch background job for embedding const result = await EmbedFileJob.dispatch({ filePath: fullPath, fileName, }) return response.status(202).json({ message: result.message, jobId: result.jobId, fileName, filePath: `/${RagService.UPLOADS_STORAGE_PATH}/${fileName}`, alreadyProcessing: !result.created, }) } public async getActiveJobs({ response }: HttpContext) { const jobs = await EmbedFileJob.listActiveJobs() return response.status(200).json(jobs) } public async getJobStatus({ request, response }: HttpContext) { const reqData = await request.validateUsing(getJobStatusSchema) const fullPath = app.makePath(RagService.UPLOADS_STORAGE_PATH, reqData.filePath) const status = await EmbedFileJob.getStatus(fullPath) if (!status.exists) { return response.status(404).json({ error: 'Job not found for this file' }) } return response.status(200).json(status) } public async getStoredFiles({ response }: HttpContext) { const files = await this.ragService.getStoredFiles() return response.status(200).json({ files }) } public async deleteFile({ request, response }: HttpContext) { const { source } = await request.validateUsing(deleteFileSchema) const result = await this.ragService.deleteFileBySource(source) if (!result.success) { return response.status(500).json({ error: result.message }) } return response.status(200).json({ message: result.message }) } public async scanAndSync({ response }: HttpContext) { try { const syncResult = await this.ragService.scanAndSyncStorage() return response.status(200).json(syncResult) } catch (error) { return response.status(500).json({ error: 'Error scanning and syncing storage', details: error.message }) } } } ================================================ FILE: admin/app/controllers/settings_controller.ts ================================================ import KVStore from '#models/kv_store'; import { BenchmarkService } from '#services/benchmark_service'; import { MapService } from '#services/map_service'; import { OllamaService } from '#services/ollama_service'; import { SystemService } from '#services/system_service'; import { updateSettingSchema } from '#validators/settings'; import { inject } from '@adonisjs/core'; import type { HttpContext } from '@adonisjs/core/http' import type { KVStoreKey } from '../../types/kv_store.js'; @inject() export default class SettingsController { constructor( private systemService: SystemService, private mapService: MapService, private benchmarkService: BenchmarkService, private ollamaService: OllamaService ) { } async system({ inertia }: HttpContext) { const systemInfo = await this.systemService.getSystemInfo(); return inertia.render('settings/system', { system: { info: systemInfo } }); } async apps({ inertia }: HttpContext) { const services = await this.systemService.getServices({ installedOnly: false }); return inertia.render('settings/apps', { system: { services } }); } async legal({ inertia }: HttpContext) { return inertia.render('settings/legal'); } async support({ inertia }: HttpContext) { return inertia.render('settings/support'); } async maps({ inertia }: HttpContext) { const baseAssetsCheck = await this.mapService.ensureBaseAssets(); const regionFiles = await this.mapService.listRegions(); return inertia.render('settings/maps', { maps: { baseAssetsExist: baseAssetsCheck, regionFiles: regionFiles.files } }); } async models({ inertia }: HttpContext) { const availableModels = await this.ollamaService.getAvailableModels({ sort: 'pulls', recommendedOnly: false, query: null, limit: 15 }); const installedModels = await this.ollamaService.getModels(); const chatSuggestionsEnabled = await KVStore.getValue('chat.suggestionsEnabled') const aiAssistantCustomName = await KVStore.getValue('ai.assistantCustomName') return inertia.render('settings/models', { models: { availableModels: availableModels?.models || [], installedModels: installedModels || [], settings: { chatSuggestionsEnabled: chatSuggestionsEnabled ?? false, aiAssistantCustomName: aiAssistantCustomName ?? '', } } }); } async update({ inertia }: HttpContext) { const updateInfo = await this.systemService.checkLatestVersion(); return inertia.render('settings/update', { system: { updateAvailable: updateInfo.updateAvailable, latestVersion: updateInfo.latestVersion, currentVersion: updateInfo.currentVersion } }); } async zim({ inertia }: HttpContext) { return inertia.render('settings/zim/index') } async zimRemote({ inertia }: HttpContext) { return inertia.render('settings/zim/remote-explorer'); } async benchmark({ inertia }: HttpContext) { const latestResult = await this.benchmarkService.getLatestResult(); const status = this.benchmarkService.getStatus(); return inertia.render('settings/benchmark', { benchmark: { latestResult, status: status.status, currentBenchmarkId: status.benchmarkId } }); } async getSetting({ request, response }: HttpContext) { const key = request.qs().key; const value = await KVStore.getValue(key as KVStoreKey); return response.status(200).send({ key, value }); } async updateSetting({ request, response }: HttpContext) { const reqData = await request.validateUsing(updateSettingSchema); await this.systemService.updateSetting(reqData.key, reqData.value); return response.status(200).send({ success: true, message: 'Setting updated successfully' }); } } ================================================ FILE: admin/app/controllers/system_controller.ts ================================================ import { DockerService } from '#services/docker_service'; import { SystemService } from '#services/system_service' import { SystemUpdateService } from '#services/system_update_service' import { ContainerRegistryService } from '#services/container_registry_service' import { CheckServiceUpdatesJob } from '#jobs/check_service_updates_job' import { affectServiceValidator, checkLatestVersionValidator, installServiceValidator, subscribeToReleaseNotesValidator, updateServiceValidator } from '#validators/system'; import { inject } from '@adonisjs/core' import type { HttpContext } from '@adonisjs/core/http' @inject() export default class SystemController { constructor( private systemService: SystemService, private dockerService: DockerService, private systemUpdateService: SystemUpdateService, private containerRegistryService: ContainerRegistryService ) { } async getInternetStatus({ }: HttpContext) { return await this.systemService.getInternetStatus(); } async getSystemInfo({ }: HttpContext) { return await this.systemService.getSystemInfo(); } async getServices({ }: HttpContext) { return await this.systemService.getServices({ installedOnly: true }); } async installService({ request, response }: HttpContext) { const payload = await request.validateUsing(installServiceValidator); const result = await this.dockerService.createContainerPreflight(payload.service_name); if (result.success) { response.send({ success: true, message: result.message }); } else { response.status(400).send({ error: result.message }); } } async affectService({ request, response }: HttpContext) { const payload = await request.validateUsing(affectServiceValidator); const result = await this.dockerService.affectContainer(payload.service_name, payload.action); if (!result) { response.internalServerError({ error: 'Failed to affect service' }); return; } response.send({ success: result.success, message: result.message }); } async checkLatestVersion({ request }: HttpContext) { const payload = await request.validateUsing(checkLatestVersionValidator) return await this.systemService.checkLatestVersion(payload.force); } async forceReinstallService({ request, response }: HttpContext) { const payload = await request.validateUsing(installServiceValidator); const result = await this.dockerService.forceReinstall(payload.service_name); if (!result) { response.internalServerError({ error: 'Failed to force reinstall service' }); return; } response.send({ success: result.success, message: result.message }); } async requestSystemUpdate({ response }: HttpContext) { if (!this.systemUpdateService.isSidecarAvailable()) { response.status(503).send({ success: false, error: 'Update sidecar is not available. Ensure the updater container is running.', }); return; } const result = await this.systemUpdateService.requestUpdate(); if (result.success) { response.send({ success: true, message: result.message, note: 'Monitor update progress via GET /api/system/update/status. The connection may drop during container restart.', }); } else { response.status(409).send({ success: false, error: result.message, }); } } async getSystemUpdateStatus({ response }: HttpContext) { const status = this.systemUpdateService.getUpdateStatus(); if (!status) { response.status(500).send({ error: 'Failed to retrieve update status', }); return; } response.send(status); } async getSystemUpdateLogs({ response }: HttpContext) { const logs = this.systemUpdateService.getUpdateLogs(); response.send({ logs }); } async subscribeToReleaseNotes({ request }: HttpContext) { const reqData = await request.validateUsing(subscribeToReleaseNotesValidator); return await this.systemService.subscribeToReleaseNotes(reqData.email); } async getDebugInfo({}: HttpContext) { const debugInfo = await this.systemService.getDebugInfo() return { debugInfo } } async checkServiceUpdates({ response }: HttpContext) { await CheckServiceUpdatesJob.dispatch() response.send({ success: true, message: 'Service update check dispatched' }) } async getAvailableVersions({ params, response }: HttpContext) { const serviceName = params.name const service = await (await import('#models/service')).default .query() .where('service_name', serviceName) .where('installed', true) .first() if (!service) { return response.status(404).send({ error: `Service ${serviceName} not found or not installed` }) } try { const hostArch = await this.getHostArch() const updates = await this.containerRegistryService.getAvailableUpdates( service.container_image, hostArch, service.source_repo ) response.send({ versions: updates }) } catch (error) { response.status(500).send({ error: `Failed to fetch versions: ${error.message}` }) } } async updateService({ request, response }: HttpContext) { const payload = await request.validateUsing(updateServiceValidator) const result = await this.dockerService.updateContainer( payload.service_name, payload.target_version ) if (result.success) { response.send({ success: true, message: result.message }) } else { response.status(400).send({ error: result.message }) } } private async getHostArch(): Promise { try { const info = await this.dockerService.docker.info() const arch = info.Architecture || '' const archMap: Record = { x86_64: 'amd64', aarch64: 'arm64', armv7l: 'arm', amd64: 'amd64', arm64: 'arm64', } return archMap[arch] || arch.toLowerCase() } catch { return 'amd64' } } } ================================================ FILE: admin/app/controllers/zim_controller.ts ================================================ import { ZimService } from '#services/zim_service' import { assertNotPrivateUrl, downloadCategoryTierValidator, filenameParamValidator, remoteDownloadWithMetadataValidator, selectWikipediaValidator, } from '#validators/common' import { listRemoteZimValidator } from '#validators/zim' import { inject } from '@adonisjs/core' import type { HttpContext } from '@adonisjs/core/http' @inject() export default class ZimController { constructor(private zimService: ZimService) {} async list({}: HttpContext) { return await this.zimService.list() } async listRemote({ request }: HttpContext) { const payload = await request.validateUsing(listRemoteZimValidator) const { start = 0, count = 12, query } = payload return await this.zimService.listRemote({ start, count, query }) } async downloadRemote({ request }: HttpContext) { const payload = await request.validateUsing(remoteDownloadWithMetadataValidator) assertNotPrivateUrl(payload.url) const { filename, jobId } = await this.zimService.downloadRemote(payload.url) return { message: 'Download started successfully', filename, jobId, url: payload.url, } } async listCuratedCategories({}: HttpContext) { return await this.zimService.listCuratedCategories() } async downloadCategoryTier({ request }: HttpContext) { const payload = await request.validateUsing(downloadCategoryTierValidator) const resources = await this.zimService.downloadCategoryTier( payload.categorySlug, payload.tierSlug ) return { message: 'Download started successfully', categorySlug: payload.categorySlug, tierSlug: payload.tierSlug, resources, } } async delete({ request, response }: HttpContext) { const payload = await request.validateUsing(filenameParamValidator) try { await this.zimService.delete(payload.params.filename) } catch (error) { if (error.message === 'not_found') { return response.status(404).send({ message: `ZIM file with key ${payload.params.filename} not found`, }) } throw error // Re-throw any other errors and let the global error handler catch } return { message: 'ZIM file deleted successfully', } } // Wikipedia selector endpoints async getWikipediaState({}: HttpContext) { return this.zimService.getWikipediaState() } async selectWikipedia({ request }: HttpContext) { const payload = await request.validateUsing(selectWikipediaValidator) return this.zimService.selectWikipedia(payload.optionId) } } ================================================ FILE: admin/app/exceptions/handler.ts ================================================ import app from '@adonisjs/core/services/app' import { HttpContext, ExceptionHandler } from '@adonisjs/core/http' import type { StatusPageRange, StatusPageRenderer } from '@adonisjs/core/types/http' export default class HttpExceptionHandler extends ExceptionHandler { /** * In debug mode, the exception handler will display verbose errors * with pretty printed stack traces. */ protected debug = !app.inProduction /** * Status pages are used to display a custom HTML pages for certain error * codes. You might want to enable them in production only, but feel * free to enable them in development as well. */ protected renderStatusPages = app.inProduction /** * Status pages is a collection of error code range and a callback * to return the HTML contents to send as a response. */ protected statusPages: Record = { '404': (error, { inertia }) => inertia.render('errors/not_found', { error }), '500..599': (error, { inertia }) => inertia.render('errors/server_error', { error }), } /** * The method is used for handling errors and returning * response to the client */ async handle(error: unknown, ctx: HttpContext) { return super.handle(error, ctx) } /** * The method is used to report error to the logging service or * the a third party error monitoring service. * * @note You should not attempt to send a response from this method. */ async report(error: unknown, ctx: HttpContext) { return super.report(error, ctx) } } ================================================ FILE: admin/app/exceptions/internal_server_error_exception.ts ================================================ import { Exception } from '@adonisjs/core/exceptions' export default class InternalServerErrorException extends Exception { static status = 500 static code = 'E_INTERNAL_SERVER_ERROR' } ================================================ FILE: admin/app/jobs/check_service_updates_job.ts ================================================ import { Job } from 'bullmq' import { QueueService } from '#services/queue_service' import { DockerService } from '#services/docker_service' import { ContainerRegistryService } from '#services/container_registry_service' import Service from '#models/service' import logger from '@adonisjs/core/services/logger' import transmit from '@adonisjs/transmit/services/main' import { BROADCAST_CHANNELS } from '../../constants/broadcast.js' import { DateTime } from 'luxon' export class CheckServiceUpdatesJob { static get queue() { return 'service-updates' } static get key() { return 'check-service-updates' } async handle(_job: Job) { logger.info('[CheckServiceUpdatesJob] Checking for service updates...') const dockerService = new DockerService() const registryService = new ContainerRegistryService() // Determine host architecture const hostArch = await this.getHostArch(dockerService) const installedServices = await Service.query().where('installed', true) let updatesFound = 0 for (const service of installedServices) { try { const updates = await registryService.getAvailableUpdates( service.container_image, hostArch, service.source_repo ) const latestUpdate = updates.length > 0 ? updates[0].tag : null service.available_update_version = latestUpdate service.update_checked_at = DateTime.now() await service.save() if (latestUpdate) { updatesFound++ logger.info( `[CheckServiceUpdatesJob] Update available for ${service.service_name}: ${service.container_image} → ${latestUpdate}` ) } } catch (error) { logger.error( `[CheckServiceUpdatesJob] Failed to check updates for ${service.service_name}: ${error.message}` ) // Continue checking other services } } logger.info( `[CheckServiceUpdatesJob] Completed. ${updatesFound} update(s) found for ${installedServices.length} service(s).` ) // Broadcast completion so the frontend can refresh transmit.broadcast(BROADCAST_CHANNELS.SERVICE_UPDATES, { status: 'completed', updatesFound, timestamp: new Date().toISOString(), }) return { updatesFound } } private async getHostArch(dockerService: DockerService): Promise { try { const info = await dockerService.docker.info() const arch = info.Architecture || '' // Map Docker architecture names to OCI names const archMap: Record = { x86_64: 'amd64', aarch64: 'arm64', armv7l: 'arm', amd64: 'amd64', arm64: 'arm64', } return archMap[arch] || arch.toLowerCase() } catch (error) { logger.warn( `[CheckServiceUpdatesJob] Could not detect host architecture: ${error.message}. Defaulting to amd64.` ) return 'amd64' } } static async scheduleNightly() { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) await queue.upsertJobScheduler( 'nightly-service-update-check', { pattern: '0 3 * * *' }, { name: this.key, opts: { removeOnComplete: { count: 7 }, removeOnFail: { count: 5 }, }, } ) logger.info('[CheckServiceUpdatesJob] Service update check scheduled with cron: 0 3 * * *') } static async dispatch() { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) const job = await queue.add( this.key, {}, { attempts: 3, backoff: { type: 'exponential', delay: 60000 }, removeOnComplete: { count: 7 }, removeOnFail: { count: 5 }, } ) logger.info(`[CheckServiceUpdatesJob] Dispatched ad-hoc service update check job ${job.id}`) return job } } ================================================ FILE: admin/app/jobs/check_update_job.ts ================================================ import { Job } from 'bullmq' import { QueueService } from '#services/queue_service' import { DockerService } from '#services/docker_service' import { SystemService } from '#services/system_service' import logger from '@adonisjs/core/services/logger' import KVStore from '#models/kv_store' export class CheckUpdateJob { static get queue() { return 'system' } static get key() { return 'check-update' } async handle(_job: Job) { logger.info('[CheckUpdateJob] Running update check...') const dockerService = new DockerService() const systemService = new SystemService(dockerService) try { const result = await systemService.checkLatestVersion() if (result.updateAvailable) { logger.info( `[CheckUpdateJob] Update available: ${result.currentVersion} → ${result.latestVersion}` ) } else { await KVStore.setValue('system.updateAvailable', false) logger.info( `[CheckUpdateJob] System is up to date (${result.currentVersion})` ) } return result } catch (error) { logger.error(`[CheckUpdateJob] Update check failed: ${error.message}`) throw error } } static async scheduleNightly() { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) await queue.upsertJobScheduler( 'nightly-update-check', { pattern: '0 2,14 * * *' }, // Every 12 hours at 2am and 2pm { name: this.key, opts: { removeOnComplete: { count: 7 }, removeOnFail: { count: 5 }, }, } ) logger.info('[CheckUpdateJob] Update check scheduled with cron: 0 2,14 * * *') } static async dispatch() { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) const job = await queue.add(this.key, {}, { attempts: 3, backoff: { type: 'exponential', delay: 60000 }, removeOnComplete: { count: 7 }, removeOnFail: { count: 5 }, }) logger.info(`[CheckUpdateJob] Dispatched ad-hoc update check job ${job.id}`) return job } } ================================================ FILE: admin/app/jobs/download_model_job.ts ================================================ import { Job } from 'bullmq' import { QueueService } from '#services/queue_service' import { createHash } from 'crypto' import logger from '@adonisjs/core/services/logger' import { OllamaService } from '#services/ollama_service' export interface DownloadModelJobParams { modelName: string } export class DownloadModelJob { static get queue() { return 'model-downloads' } static get key() { return 'download-model' } static getJobId(modelName: string): string { return createHash('sha256').update(modelName).digest('hex').slice(0, 16) } async handle(job: Job) { const { modelName } = job.data as DownloadModelJobParams logger.info(`[DownloadModelJob] Attempting to download model: ${modelName}`) const ollamaService = new OllamaService() // Even if no models are installed, this should return an empty array if ready const existingModels = await ollamaService.getModels() if (!existingModels) { logger.warn( `[DownloadModelJob] Ollama service not ready yet for model ${modelName}. Will retry...` ) throw new Error('Ollama service not ready yet') } logger.info( `[DownloadModelJob] Ollama service is ready. Initiating download for ${modelName}` ) // Services are ready, initiate the download with progress tracking const result = await ollamaService.downloadModel(modelName, (progressPercent) => { if (progressPercent) { job.updateProgress(Math.floor(progressPercent)) logger.info( `[DownloadModelJob] Model ${modelName}: ${progressPercent}%` ) } // Store detailed progress in job data for clients to query job.updateData({ ...job.data, status: 'downloading', progress: progressPercent, progress_timestamp: new Date().toISOString(), }) }) if (!result.success) { logger.error( `[DownloadModelJob] Failed to initiate download for model ${modelName}: ${result.message}` ) throw new Error(`Failed to initiate download for model: ${result.message}`) } logger.info(`[DownloadModelJob] Successfully completed download for model ${modelName}`) return { modelName, message: result.message, } } static async getByModelName(modelName: string): Promise { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) const jobId = this.getJobId(modelName) return await queue.getJob(jobId) } static async dispatch(params: DownloadModelJobParams) { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) const jobId = this.getJobId(params.modelName) try { const job = await queue.add(this.key, params, { jobId, attempts: 40, // Many attempts since services may take considerable time to install backoff: { type: 'fixed', delay: 60000, // Check every 60 seconds }, removeOnComplete: false, // Keep for status checking removeOnFail: false, // Keep failed jobs for debugging }) return { job, created: true, message: `Dispatched model download job for ${params.modelName}`, } } catch (error) { if (error.message.includes('job already exists')) { const existing = await queue.getJob(jobId) return { job: existing, created: false, message: `Job already exists for model ${params.modelName}`, } } throw error } } } ================================================ FILE: admin/app/jobs/embed_file_job.ts ================================================ import { Job, UnrecoverableError } from 'bullmq' import { QueueService } from '#services/queue_service' import { EmbedJobWithProgress } from '../../types/rag.js' import { RagService } from '#services/rag_service' import { DockerService } from '#services/docker_service' import { OllamaService } from '#services/ollama_service' import { createHash } from 'crypto' import logger from '@adonisjs/core/services/logger' export interface EmbedFileJobParams { filePath: string fileName: string fileSize?: number // Batch processing for large ZIM files batchOffset?: number // Current batch offset (for ZIM files) totalArticles?: number // Total articles in ZIM (for progress tracking) isFinalBatch?: boolean // Whether this is the last batch (prevents premature deletion) } export class EmbedFileJob { static get queue() { return 'file-embeddings' } static get key() { return 'embed-file' } static getJobId(filePath: string): string { return createHash('sha256').update(filePath).digest('hex').slice(0, 16) } async handle(job: Job) { const { filePath, fileName, batchOffset, totalArticles } = job.data as EmbedFileJobParams const isZimBatch = batchOffset !== undefined const batchInfo = isZimBatch ? ` (batch offset: ${batchOffset})` : '' logger.info(`[EmbedFileJob] Starting embedding process for: ${fileName}${batchInfo}`) const dockerService = new DockerService() const ollamaService = new OllamaService() const ragService = new RagService(dockerService, ollamaService) try { // Check if Ollama and Qdrant services are installed and ready // Use UnrecoverableError for "not installed" so BullMQ won't retry — // retrying 30x when the service doesn't exist just wastes Redis connections const ollamaUrl = await dockerService.getServiceURL('nomad_ollama') if (!ollamaUrl) { logger.warn('[EmbedFileJob] Ollama is not installed. Skipping embedding for: %s', fileName) throw new UnrecoverableError('Ollama service is not installed. Install AI Assistant to enable file embeddings.') } const existingModels = await ollamaService.getModels() if (!existingModels) { logger.warn('[EmbedFileJob] Ollama service not ready yet. Will retry...') throw new Error('Ollama service not ready yet') } const qdrantUrl = await dockerService.getServiceURL('nomad_qdrant') if (!qdrantUrl) { logger.warn('[EmbedFileJob] Qdrant is not installed. Skipping embedding for: %s', fileName) throw new UnrecoverableError('Qdrant service is not installed. Install AI Assistant to enable file embeddings.') } logger.info(`[EmbedFileJob] Services ready. Processing file: ${fileName}`) // Update progress starting await job.updateProgress(5) await job.updateData({ ...job.data, status: 'processing', startedAt: job.data.startedAt || Date.now(), }) logger.info(`[EmbedFileJob] Processing file: ${filePath}`) // Progress callback: maps service-reported 0-100% into the 5-95% job range const onProgress = async (percent: number) => { await job.updateProgress(Math.min(95, Math.round(5 + percent * 0.9))) } // Process and embed the file // Only allow deletion if explicitly marked as final batch const allowDeletion = job.data.isFinalBatch === true const result = await ragService.processAndEmbedFile( filePath, allowDeletion, batchOffset, onProgress ) if (!result.success) { logger.error(`[EmbedFileJob] Failed to process file ${fileName}: ${result.message}`) throw new Error(result.message) } // For ZIM files with batching, check if more batches are needed if (result.hasMoreBatches) { const nextOffset = (batchOffset || 0) + (result.articlesProcessed || 0) logger.info( `[EmbedFileJob] Batch complete. Dispatching next batch at offset ${nextOffset}` ) // Dispatch next batch (not final yet) await EmbedFileJob.dispatch({ filePath, fileName, batchOffset: nextOffset, totalArticles: totalArticles || result.totalArticles, isFinalBatch: false, // Explicitly not final }) // Calculate progress based on articles processed const progress = totalArticles ? Math.round((nextOffset / totalArticles) * 100) : 50 await job.updateProgress(progress) await job.updateData({ ...job.data, status: 'batch_completed', lastBatchAt: Date.now(), chunks: (job.data.chunks || 0) + (result.chunks || 0), }) return { success: true, fileName, filePath, chunks: result.chunks, hasMoreBatches: true, nextOffset, message: `Batch embedded ${result.chunks} chunks, next batch queued`, } } // Final batch or non-batched file - mark as complete const totalChunks = (job.data.chunks || 0) + (result.chunks || 0) await job.updateProgress(100) await job.updateData({ ...job.data, status: 'completed', completedAt: Date.now(), chunks: totalChunks, }) const batchMsg = isZimBatch ? ` (final batch, total chunks: ${totalChunks})` : '' logger.info( `[EmbedFileJob] Successfully embedded ${result.chunks} chunks from file: ${fileName}${batchMsg}` ) return { success: true, fileName, filePath, chunks: result.chunks, message: `Successfully embedded ${result.chunks} chunks`, } } catch (error) { logger.error(`[EmbedFileJob] Error embedding file ${fileName}:`, error) await job.updateData({ ...job.data, status: 'failed', failedAt: Date.now(), error: error instanceof Error ? error.message : 'Unknown error', }) throw error } } static async listActiveJobs(): Promise { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) const jobs = await queue.getJobs(['waiting', 'active', 'delayed']) return jobs.map((job) => ({ jobId: job.id!.toString(), fileName: (job.data as EmbedFileJobParams).fileName, filePath: (job.data as EmbedFileJobParams).filePath, progress: typeof job.progress === 'number' ? job.progress : 0, status: ((job.data as any).status as string) ?? 'waiting', })) } static async getByFilePath(filePath: string): Promise { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) const jobId = this.getJobId(filePath) return await queue.getJob(jobId) } static async dispatch(params: EmbedFileJobParams) { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) const jobId = this.getJobId(params.filePath) try { const job = await queue.add(this.key, params, { jobId, attempts: 30, backoff: { type: 'fixed', delay: 60000, // Check every 60 seconds for service readiness }, removeOnComplete: { count: 50 }, // Keep last 50 completed jobs for history removeOnFail: { count: 20 } // Keep last 20 failed jobs for debugging }) logger.info(`[EmbedFileJob] Dispatched embedding job for file: ${params.fileName}`) return { job, created: true, jobId, message: `File queued for embedding: ${params.fileName}`, } } catch (error) { if (error.message && error.message.includes('job already exists')) { const existing = await queue.getJob(jobId) logger.info(`[EmbedFileJob] Job already exists for file: ${params.fileName}`) return { job: existing, created: false, jobId, message: `Embedding job already exists for: ${params.fileName}`, } } throw error } } static async getStatus(filePath: string): Promise<{ exists: boolean status?: string progress?: number chunks?: number error?: string }> { const job = await this.getByFilePath(filePath) if (!job) { return { exists: false } } const state = await job.getState() const data = job.data return { exists: true, status: data.status || state, progress: typeof job.progress === 'number' ? job.progress : undefined, chunks: data.chunks, error: data.error, } } } ================================================ FILE: admin/app/jobs/run_benchmark_job.ts ================================================ import { Job } from 'bullmq' import { QueueService } from '#services/queue_service' import { BenchmarkService } from '#services/benchmark_service' import type { RunBenchmarkJobParams } from '../../types/benchmark.js' import logger from '@adonisjs/core/services/logger' import { DockerService } from '#services/docker_service' export class RunBenchmarkJob { static get queue() { return 'benchmarks' } static get key() { return 'run-benchmark' } async handle(job: Job) { const { benchmark_id, benchmark_type } = job.data as RunBenchmarkJobParams logger.info(`[RunBenchmarkJob] Starting benchmark ${benchmark_id} of type ${benchmark_type}`) const dockerService = new DockerService() const benchmarkService = new BenchmarkService(dockerService) try { let result switch (benchmark_type) { case 'full': result = await benchmarkService.runFullBenchmark() break case 'system': result = await benchmarkService.runSystemBenchmarks() break case 'ai': result = await benchmarkService.runAIBenchmark() break default: throw new Error(`Unknown benchmark type: ${benchmark_type}`) } logger.info(`[RunBenchmarkJob] Benchmark ${benchmark_id} completed with NOMAD score: ${result.nomad_score}`) return { success: true, benchmark_id: result.benchmark_id, nomad_score: result.nomad_score, } } catch (error) { logger.error(`[RunBenchmarkJob] Benchmark ${benchmark_id} failed: ${error.message}`) throw error } } static async dispatch(params: RunBenchmarkJobParams) { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) try { const job = await queue.add(this.key, params, { jobId: params.benchmark_id, attempts: 1, // Benchmarks shouldn't be retried automatically removeOnComplete: { count: 10, // Keep last 10 completed jobs }, removeOnFail: { count: 5, // Keep last 5 failed jobs }, }) logger.info(`[RunBenchmarkJob] Dispatched benchmark job ${params.benchmark_id}`) return { job, created: true, message: `Benchmark job ${params.benchmark_id} dispatched successfully`, } } catch (error) { if (error.message.includes('job already exists')) { const existing = await queue.getJob(params.benchmark_id) return { job: existing, created: false, message: `Benchmark job ${params.benchmark_id} already exists`, } } throw error } } static async getJob(benchmarkId: string): Promise { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) return await queue.getJob(benchmarkId) } static async getJobState(benchmarkId: string): Promise { const job = await this.getJob(benchmarkId) return job ? await job.getState() : undefined } } ================================================ FILE: admin/app/jobs/run_download_job.ts ================================================ import { Job } from 'bullmq' import { RunDownloadJobParams } from '../../types/downloads.js' import { QueueService } from '#services/queue_service' import { doResumableDownload } from '../utils/downloads.js' import { createHash } from 'crypto' import { DockerService } from '#services/docker_service' import { ZimService } from '#services/zim_service' import { MapService } from '#services/map_service' import { EmbedFileJob } from './embed_file_job.js' export class RunDownloadJob { static get queue() { return 'downloads' } static get key() { return 'run-download' } static getJobId(url: string): string { return createHash('sha256').update(url).digest('hex').slice(0, 16) } async handle(job: Job) { const { url, filepath, timeout, allowedMimeTypes, forceNew, filetype, resourceMetadata } = job.data as RunDownloadJobParams await doResumableDownload({ url, filepath, timeout, allowedMimeTypes, forceNew, onProgress(progress) { const progressPercent = (progress.downloadedBytes / (progress.totalBytes || 1)) * 100 job.updateProgress(Math.floor(progressPercent)) }, async onComplete(url) { try { // Create InstalledResource entry if metadata was provided if (resourceMetadata) { const { default: InstalledResource } = await import('#models/installed_resource') const { DateTime } = await import('luxon') const { getFileStatsIfExists, deleteFileIfExists } = await import('../utils/fs.js') const stats = await getFileStatsIfExists(filepath) // Look up the old entry so we can clean up the previous file after updating const oldEntry = await InstalledResource.query() .where('resource_id', resourceMetadata.resource_id) .where('resource_type', filetype as 'zim' | 'map') .first() const oldFilePath = oldEntry?.file_path ?? null await InstalledResource.updateOrCreate( { resource_id: resourceMetadata.resource_id, resource_type: filetype as 'zim' | 'map' }, { version: resourceMetadata.version, collection_ref: resourceMetadata.collection_ref, url: url, file_path: filepath, file_size_bytes: stats ? Number(stats.size) : null, installed_at: DateTime.now(), } ) // Delete the old file if it differs from the new one if (oldFilePath && oldFilePath !== filepath) { try { await deleteFileIfExists(oldFilePath) console.log(`[RunDownloadJob] Deleted old file: ${oldFilePath}`) } catch (deleteError) { console.warn( `[RunDownloadJob] Failed to delete old file ${oldFilePath}:`, deleteError ) } } } if (filetype === 'zim') { const dockerService = new DockerService() const zimService = new ZimService(dockerService) await zimService.downloadRemoteSuccessCallback([url], true) // Only dispatch embedding job if AI Assistant (Ollama) is installed const ollamaUrl = await dockerService.getServiceURL('nomad_ollama') if (ollamaUrl) { try { await EmbedFileJob.dispatch({ fileName: url.split('/').pop() || '', filePath: filepath, }) } catch (error) { console.error(`[RunDownloadJob] Error dispatching EmbedFileJob for URL ${url}:`, error) } } } else if (filetype === 'map') { const mapsService = new MapService() await mapsService.downloadRemoteSuccessCallback([url], false) } } catch (error) { console.error( `[RunDownloadJob] Error in download success callback for URL ${url}:`, error ) } job.updateProgress(100) }, }) return { url, filepath, } } static async getByUrl(url: string): Promise { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) const jobId = this.getJobId(url) return await queue.getJob(jobId) } static async dispatch(params: RunDownloadJobParams) { const queueService = new QueueService() const queue = queueService.getQueue(this.queue) const jobId = this.getJobId(params.url) try { const job = await queue.add(this.key, params, { jobId, attempts: 3, backoff: { type: 'exponential', delay: 2000 }, removeOnComplete: true, }) return { job, created: true, message: `Dispatched download job for URL ${params.url}`, } } catch (error) { if (error.message.includes('job already exists')) { const existing = await queue.getJob(jobId) return { job: existing, created: false, message: `Job already exists for URL ${params.url}`, } } throw error } } } ================================================ FILE: admin/app/middleware/container_bindings_middleware.ts ================================================ import { Logger } from '@adonisjs/core/logger' import { HttpContext } from '@adonisjs/core/http' import { NextFn } from '@adonisjs/core/types/http' /** * The container bindings middleware binds classes to their request * specific value using the container resolver. * * - We bind "HttpContext" class to the "ctx" object * - And bind "Logger" class to the "ctx.logger" object */ export default class ContainerBindingsMiddleware { handle(ctx: HttpContext, next: NextFn) { ctx.containerResolver.bindValue(HttpContext, ctx) ctx.containerResolver.bindValue(Logger, ctx.logger) return next() } } ================================================ FILE: admin/app/middleware/force_json_response_middleware.ts ================================================ import type { HttpContext } from '@adonisjs/core/http' import type { NextFn } from '@adonisjs/core/types/http' /** * Updating the "Accept" header to always accept "application/json" response * from the server. This will force the internals of the framework like * validator errors or auth errors to return a JSON response. */ export default class ForceJsonResponseMiddleware { async handle({ request }: HttpContext, next: NextFn) { const headers = request.headers() headers.accept = 'application/json' return next() } } ================================================ FILE: admin/app/middleware/maps_static_middleware.ts ================================================ import type { HttpContext } from '@adonisjs/core/http' import type { NextFn } from '@adonisjs/core/types/http' import StaticMiddleware from '@adonisjs/static/static_middleware' import { AssetsConfig } from '@adonisjs/static/types' /** * See #providers/map_static_provider.ts for explanation * of why this middleware exists. */ export default class MapsStaticMiddleware { constructor( private path: string, private config: AssetsConfig ) {} async handle(ctx: HttpContext, next: NextFn) { const staticMiddleware = new StaticMiddleware(this.path, this.config) return staticMiddleware.handle(ctx, next) } } ================================================ FILE: admin/app/models/benchmark_result.ts ================================================ import { DateTime } from 'luxon' import { BaseModel, column, SnakeCaseNamingStrategy } from '@adonisjs/lucid/orm' import type { BenchmarkType, DiskType } from '../../types/benchmark.js' export default class BenchmarkResult extends BaseModel { static namingStrategy = new SnakeCaseNamingStrategy() @column({ isPrimary: true }) declare id: number @column() declare benchmark_id: string @column() declare benchmark_type: BenchmarkType // Hardware information @column() declare cpu_model: string @column() declare cpu_cores: number @column() declare cpu_threads: number @column() declare ram_bytes: number @column() declare disk_type: DiskType @column() declare gpu_model: string | null // System benchmark scores @column() declare cpu_score: number @column() declare memory_score: number @column() declare disk_read_score: number @column() declare disk_write_score: number // AI benchmark scores (nullable for system-only benchmarks) @column() declare ai_tokens_per_second: number | null @column() declare ai_model_used: string | null @column() declare ai_time_to_first_token: number | null // Composite NOMAD score (0-100) @column() declare nomad_score: number // Repository submission tracking @column({ serialize(value) { return Boolean(value) }, }) declare submitted_to_repository: boolean @column.dateTime() declare submitted_at: DateTime | null @column() declare repository_id: string | null @column() declare builder_tag: string | null @column.dateTime({ autoCreate: true }) declare created_at: DateTime @column.dateTime({ autoCreate: true, autoUpdate: true }) declare updated_at: DateTime } ================================================ FILE: admin/app/models/benchmark_setting.ts ================================================ import { DateTime } from 'luxon' import { BaseModel, column, SnakeCaseNamingStrategy } from '@adonisjs/lucid/orm' import type { BenchmarkSettingKey } from '../../types/benchmark.js' export default class BenchmarkSetting extends BaseModel { static namingStrategy = new SnakeCaseNamingStrategy() @column({ isPrimary: true }) declare id: number @column() declare key: BenchmarkSettingKey @column() declare value: string | null @column.dateTime({ autoCreate: true }) declare created_at: DateTime @column.dateTime({ autoCreate: true, autoUpdate: true }) declare updated_at: DateTime /** * Get a setting value by key */ static async getValue(key: BenchmarkSettingKey): Promise { const setting = await this.findBy('key', key) return setting?.value ?? null } /** * Set a setting value by key (creates if not exists) */ static async setValue(key: BenchmarkSettingKey, value: string | null): Promise { const setting = await this.firstOrCreate({ key }, { key, value }) if (setting.value !== value) { setting.value = value await setting.save() } return setting } /** * Get all benchmark settings as a typed object */ static async getAllSettings(): Promise<{ allow_anonymous_submission: boolean installation_id: string | null last_benchmark_run: string | null }> { const settings = await this.all() const map = new Map(settings.map((s) => [s.key, s.value])) return { allow_anonymous_submission: map.get('allow_anonymous_submission') === 'true', installation_id: map.get('installation_id') ?? null, last_benchmark_run: map.get('last_benchmark_run') ?? null, } } } ================================================ FILE: admin/app/models/chat_message.ts ================================================ import { DateTime } from 'luxon' import { BaseModel, column, belongsTo, SnakeCaseNamingStrategy } from '@adonisjs/lucid/orm' import type { BelongsTo } from '@adonisjs/lucid/types/relations' import ChatSession from './chat_session.js' export default class ChatMessage extends BaseModel { static namingStrategy = new SnakeCaseNamingStrategy() @column({ isPrimary: true }) declare id: number @column() declare session_id: number @column() declare role: 'system' | 'user' | 'assistant' @column() declare content: string @belongsTo(() => ChatSession, { foreignKey: 'id', localKey: 'session_id' }) declare session: BelongsTo @column.dateTime({ autoCreate: true }) declare created_at: DateTime @column.dateTime({ autoCreate: true, autoUpdate: true }) declare updated_at: DateTime } ================================================ FILE: admin/app/models/chat_session.ts ================================================ import { DateTime } from 'luxon' import { BaseModel, column, hasMany, SnakeCaseNamingStrategy } from '@adonisjs/lucid/orm' import type { HasMany } from '@adonisjs/lucid/types/relations' import ChatMessage from './chat_message.js' export default class ChatSession extends BaseModel { static namingStrategy = new SnakeCaseNamingStrategy() @column({ isPrimary: true }) declare id: number @column() declare title: string @column() declare model: string | null @hasMany(() => ChatMessage, { foreignKey: 'session_id', localKey: 'id', }) declare messages: HasMany @column.dateTime({ autoCreate: true }) declare created_at: DateTime @column.dateTime({ autoCreate: true, autoUpdate: true }) declare updated_at: DateTime } ================================================ FILE: admin/app/models/collection_manifest.ts ================================================ import { DateTime } from 'luxon' import { BaseModel, column, SnakeCaseNamingStrategy } from '@adonisjs/lucid/orm' import type { ManifestType } from '../../types/collections.js' export default class CollectionManifest extends BaseModel { static namingStrategy = new SnakeCaseNamingStrategy() @column({ isPrimary: true }) declare type: ManifestType @column() declare spec_version: string @column({ consume: (value: string) => (typeof value === 'string' ? JSON.parse(value) : value), prepare: (value: any) => JSON.stringify(value), }) declare spec_data: any @column.dateTime() declare fetched_at: DateTime } ================================================ FILE: admin/app/models/installed_resource.ts ================================================ import { DateTime } from 'luxon' import { BaseModel, column, SnakeCaseNamingStrategy } from '@adonisjs/lucid/orm' export default class InstalledResource extends BaseModel { static namingStrategy = new SnakeCaseNamingStrategy() @column({ isPrimary: true }) declare id: number @column() declare resource_id: string @column() declare resource_type: 'zim' | 'map' @column() declare collection_ref: string | null @column() declare version: string @column() declare url: string @column() declare file_path: string @column() declare file_size_bytes: number | null @column.dateTime() declare installed_at: DateTime } ================================================ FILE: admin/app/models/kv_store.ts ================================================ import { DateTime } from 'luxon' import { BaseModel, column, SnakeCaseNamingStrategy } from '@adonisjs/lucid/orm' import { KV_STORE_SCHEMA, type KVStoreKey, type KVStoreValue } from '../../types/kv_store.js' import { parseBoolean } from '../utils/misc.js' /** * Generic key-value store model for storing various settings * that don't necessitate their own dedicated models. */ export default class KVStore extends BaseModel { static table = 'kv_store' static namingStrategy = new SnakeCaseNamingStrategy() @column({ isPrimary: true }) declare id: number @column() declare key: KVStoreKey @column() declare value: string | null @column.dateTime({ autoCreate: true }) declare created_at: DateTime @column.dateTime({ autoCreate: true, autoUpdate: true }) declare updated_at: DateTime /** * Get a setting value by key, automatically deserializing to the correct type. */ static async getValue(key: K): Promise | null> { const setting = await this.findBy('key', key) if (!setting || setting.value === undefined || setting.value === null) { return null } const raw = String(setting.value) return (KV_STORE_SCHEMA[key] === 'boolean' ? parseBoolean(raw) : raw) as KVStoreValue } /** * Set a setting value by key (creates if not exists), automatically serializing to string. */ static async setValue(key: K, value: KVStoreValue): Promise { const serialized = String(value) const setting = await this.firstOrCreate({ key }, { key, value: serialized }) if (setting.value !== serialized) { setting.value = serialized await setting.save() } return setting } /** * Clear a setting value by key, storing null so getValue returns null. */ static async clearValue(key: K): Promise { const setting = await this.findBy('key', key) if (setting && setting.value !== null) { setting.value = null await setting.save() } } } ================================================ FILE: admin/app/models/service.ts ================================================ import { BaseModel, belongsTo, column, hasMany, SnakeCaseNamingStrategy } from '@adonisjs/lucid/orm' import type { BelongsTo, HasMany } from '@adonisjs/lucid/types/relations' import { DateTime } from 'luxon' export default class Service extends BaseModel { static namingStrategy = new SnakeCaseNamingStrategy() @column({ isPrimary: true }) declare id: number @column() declare service_name: string @column() declare container_image: string @column() declare container_command: string | null @column() declare container_config: string | null @column() declare friendly_name: string | null @column() declare description: string | null @column() declare powered_by: string | null @column() declare display_order: number | null @column() declare icon: string | null // must be a TablerIcons name to be properly rendered in the UI (e.g. "IconBrandDocker") @column({ serialize(value) { return Boolean(value) }, }) declare installed: boolean @column() declare installation_status: 'idle' | 'installing' | 'error' @column() declare depends_on: string | null // For services that are dependencies for other services - not intended to be installed directly by users @column({ serialize(value) { return Boolean(value) }, }) declare is_dependency_service: boolean @column() declare ui_location: string | null @column() declare metadata: string | null @column() declare source_repo: string | null @column() declare available_update_version: string | null @column.dateTime() declare update_checked_at: DateTime | null @column.dateTime({ autoCreate: true }) declare created_at: DateTime @column.dateTime({ autoCreate: true, autoUpdate: true }) declare updated_at: DateTime | null // Define a self-referential relationship for dependencies @belongsTo(() => Service, { foreignKey: 'depends_on', }) declare dependency: BelongsTo @hasMany(() => Service, { foreignKey: 'depends_on', }) declare dependencies: HasMany } ================================================ FILE: admin/app/models/wikipedia_selection.ts ================================================ import { DateTime } from 'luxon' import { BaseModel, column, SnakeCaseNamingStrategy } from '@adonisjs/lucid/orm' export default class WikipediaSelection extends BaseModel { static namingStrategy = new SnakeCaseNamingStrategy() @column({ isPrimary: true }) declare id: number @column() declare option_id: string @column() declare url: string | null @column() declare filename: string | null @column() declare status: 'none' | 'downloading' | 'installed' | 'failed' @column.dateTime({ autoCreate: true }) declare created_at: DateTime @column.dateTime({ autoCreate: true, autoUpdate: true }) declare updated_at: DateTime } ================================================ FILE: admin/app/services/benchmark_service.ts ================================================ import { inject } from '@adonisjs/core' import logger from '@adonisjs/core/services/logger' import transmit from '@adonisjs/transmit/services/main' import si from 'systeminformation' import axios from 'axios' import { DateTime } from 'luxon' import BenchmarkResult from '#models/benchmark_result' import BenchmarkSetting from '#models/benchmark_setting' import { SystemService } from '#services/system_service' import type { BenchmarkType, BenchmarkStatus, BenchmarkProgress, HardwareInfo, DiskType, SystemScores, AIScores, SysbenchCpuResult, SysbenchMemoryResult, SysbenchDiskResult, RepositorySubmission, RepositorySubmitResponse, RepositoryStats, } from '../../types/benchmark.js' import { randomUUID, createHmac } from 'node:crypto' import { DockerService } from './docker_service.js' import { SERVICE_NAMES } from '../../constants/service_names.js' import { BROADCAST_CHANNELS } from '../../constants/broadcast.js' import Dockerode from 'dockerode' // HMAC secret for signing submissions to the benchmark repository // This provides basic protection against casual API abuse. // Note: Since NOMAD is open source, a determined attacker could extract this. // For stronger protection, see challenge-response authentication. const BENCHMARK_HMAC_SECRET = '778ba65d0bc0e23119e5ffce4b3716648a7d071f0a47ec3f' // Re-export default weights for use in service const SCORE_WEIGHTS = { ai_tokens_per_second: 0.30, cpu: 0.25, memory: 0.15, ai_ttft: 0.10, disk_read: 0.10, disk_write: 0.10, } // Benchmark configuration constants const SYSBENCH_IMAGE = 'severalnines/sysbench:latest' const SYSBENCH_CONTAINER_NAME = 'nomad_benchmark_sysbench' // Reference model for AI benchmark - small but meaningful const AI_BENCHMARK_MODEL = 'llama3.2:1b' const AI_BENCHMARK_PROMPT = 'Explain recursion in programming in exactly 100 words.' // Reference scores for normalization (calibrated to 0-100 scale) // These represent "expected" scores for a mid-range system (score ~50) const REFERENCE_SCORES = { cpu_events_per_second: 5000, // sysbench cpu events/sec for ~50 score memory_ops_per_second: 5000000, // sysbench memory ops/sec for ~50 score disk_read_mb_per_sec: 500, // 500 MB/s read for ~50 score disk_write_mb_per_sec: 400, // 400 MB/s write for ~50 score ai_tokens_per_second: 30, // 30 tok/s for ~50 score ai_ttft_ms: 500, // 500ms time to first token for ~50 score (lower is better) } @inject() export class BenchmarkService { private currentBenchmarkId: string | null = null private currentStatus: BenchmarkStatus = 'idle' constructor(private dockerService: DockerService) {} /** * Run a full benchmark suite */ async runFullBenchmark(): Promise { return this._runBenchmark('full', true) } /** * Run system benchmarks only (CPU, memory, disk) */ async runSystemBenchmarks(): Promise { return this._runBenchmark('system', false) } /** * Run AI benchmark only */ async runAIBenchmark(): Promise { return this._runBenchmark('ai', true) } /** * Get the latest benchmark result */ async getLatestResult(): Promise { return await BenchmarkResult.query().orderBy('created_at', 'desc').first() } /** * Get all benchmark results */ async getAllResults(): Promise { return await BenchmarkResult.query().orderBy('created_at', 'desc') } /** * Get a specific benchmark result by ID */ async getResultById(benchmarkId: string): Promise { return await BenchmarkResult.findBy('benchmark_id', benchmarkId) } /** * Submit benchmark results to central repository */ async submitToRepository(benchmarkId?: string, anonymous?: boolean): Promise { const result = benchmarkId ? await this.getResultById(benchmarkId) : await this.getLatestResult() if (!result) { throw new Error('No benchmark result found to submit') } // Only allow full benchmarks with AI data to be submitted to repository if (result.benchmark_type !== 'full') { throw new Error('Only full benchmarks can be shared with the community. Run a Full Benchmark to share your results.') } if (!result.ai_tokens_per_second || result.ai_tokens_per_second <= 0) { throw new Error('Benchmark must include AI performance data. Ensure AI Assistant is installed and run a Full Benchmark.') } if (result.submitted_to_repository) { throw new Error('Benchmark result has already been submitted') } const submission: RepositorySubmission = { cpu_model: result.cpu_model, cpu_cores: result.cpu_cores, cpu_threads: result.cpu_threads, ram_gb: Math.round(result.ram_bytes / (1024 * 1024 * 1024)), disk_type: result.disk_type, gpu_model: result.gpu_model, cpu_score: result.cpu_score, memory_score: result.memory_score, disk_read_score: result.disk_read_score, disk_write_score: result.disk_write_score, ai_tokens_per_second: result.ai_tokens_per_second, ai_time_to_first_token: result.ai_time_to_first_token, nomad_score: result.nomad_score, nomad_version: SystemService.getAppVersion(), benchmark_version: '1.0.0', builder_tag: anonymous ? null : result.builder_tag, } try { // Generate HMAC signature for submission verification const timestamp = Date.now().toString() const payload = timestamp + JSON.stringify(submission) const signature = createHmac('sha256', BENCHMARK_HMAC_SECRET) .update(payload) .digest('hex') const response = await axios.post( 'https://benchmark.projectnomad.us/api/v1/submit', submission, { timeout: 30000, headers: { 'X-NOMAD-Timestamp': timestamp, 'X-NOMAD-Signature': signature, }, } ) if (response.data.success) { result.submitted_to_repository = true result.submitted_at = DateTime.now() result.repository_id = response.data.repository_id await result.save() await BenchmarkSetting.setValue('last_benchmark_run', new Date().toISOString()) } return response.data as RepositorySubmitResponse } catch (error) { const detail = error.response?.data?.error || error.message || 'Unknown error' const statusCode = error.response?.status logger.error(`Failed to submit benchmark to repository: ${detail} (Status: ${statusCode})`) // Create an error with the status code attached for proper handling upstream const err: any = new Error(`Failed to submit benchmark: ${detail}`) err.statusCode = statusCode throw err } } /** * Get comparison stats from central repository */ async getComparisonStats(): Promise { try { const response = await axios.get('https://benchmark.projectnomad.us/api/v1/stats', { timeout: 10000, }) return response.data as RepositoryStats } catch (error) { logger.warn(`Failed to fetch comparison stats: ${error.message}`) return null } } /** * Get current benchmark status */ getStatus(): { status: BenchmarkStatus; benchmarkId: string | null } { return { status: this.currentStatus, benchmarkId: this.currentBenchmarkId, } } /** * Detect system hardware information */ async getHardwareInfo(): Promise { this._updateStatus('detecting_hardware', 'Detecting system hardware...') try { const [cpu, mem, diskLayout, graphics] = await Promise.all([ si.cpu(), si.mem(), si.diskLayout(), si.graphics(), ]) // Determine disk type from primary disk let diskType: DiskType = 'unknown' if (diskLayout.length > 0) { const primaryDisk = diskLayout[0] if (primaryDisk.type?.toLowerCase().includes('nvme')) { diskType = 'nvme' } else if (primaryDisk.type?.toLowerCase().includes('ssd')) { diskType = 'ssd' } else if (primaryDisk.type?.toLowerCase().includes('hdd') || primaryDisk.interfaceType === 'SATA') { // SATA could be SSD or HDD, check if it's rotational diskType = 'hdd' } } // Get GPU model (prefer discrete GPU with dedicated VRAM) let gpuModel: string | null = null if (graphics.controllers && graphics.controllers.length > 0) { // First, look for discrete GPUs (NVIDIA, AMD discrete, or any with significant VRAM) const discreteGpu = graphics.controllers.find((g) => { const vendor = g.vendor?.toLowerCase() || '' const model = g.model?.toLowerCase() || '' // NVIDIA GPUs are always discrete if (vendor.includes('nvidia') || model.includes('geforce') || model.includes('rtx') || model.includes('quadro')) { return true } // AMD discrete GPUs (Radeon, not integrated APU graphics) if ((vendor.includes('amd') || vendor.includes('ati')) && (model.includes('radeon') || model.includes('rx ') || model.includes('vega')) && !model.includes('graphics')) { return true } // Any GPU with dedicated VRAM > 512MB is likely discrete if (g.vram && g.vram > 512) { return true } return false }) gpuModel = discreteGpu?.model || graphics.controllers[0]?.model || null } // Fallback: Check Docker for nvidia runtime and query GPU model via nvidia-smi if (!gpuModel) { try { const dockerInfo = await this.dockerService.docker.info() const runtimes = dockerInfo.Runtimes || {} if ('nvidia' in runtimes) { logger.info('[BenchmarkService] NVIDIA container runtime detected, querying GPU model via nvidia-smi') const systemService = new (await import('./system_service.js')).SystemService(this.dockerService) const nvidiaInfo = await systemService.getNvidiaSmiInfo() if (Array.isArray(nvidiaInfo) && nvidiaInfo.length > 0) { gpuModel = nvidiaInfo[0].model } else { logger.warn(`[BenchmarkService] NVIDIA runtime detected but failed to get GPU info: ${typeof nvidiaInfo === 'string' ? nvidiaInfo : JSON.stringify(nvidiaInfo)}`) } } } catch (dockerError) { logger.warn(`[BenchmarkService] Could not query Docker info for GPU detection: ${dockerError.message}`) } } // Fallback: Extract integrated GPU from CPU model name if (!gpuModel) { const cpuFullName = `${cpu.manufacturer} ${cpu.brand}` // AMD APUs: e.g., "AMD Ryzen AI 9 HX 370 w/ Radeon 890M" -> "Radeon 890M" const radeonMatch = cpuFullName.match(/w\/\s*(Radeon\s+\d+\w*)/i) if (radeonMatch) { gpuModel = radeonMatch[1] } // Intel Core Ultra: These have Intel Arc Graphics integrated // e.g., "Intel Core Ultra 9 285HX" -> "Intel Arc Graphics (Integrated)" if (!gpuModel && cpu.manufacturer?.toLowerCase().includes('intel')) { if (cpu.brand?.toLowerCase().includes('core ultra')) { gpuModel = 'Intel Arc Graphics (Integrated)' } } } return { cpu_model: `${cpu.manufacturer} ${cpu.brand}`, cpu_cores: cpu.physicalCores, cpu_threads: cpu.cores, ram_bytes: mem.total, disk_type: diskType, gpu_model: gpuModel, } } catch (error) { logger.error(`Error detecting hardware: ${error.message}`) throw new Error(`Failed to detect hardware: ${error.message}`) } } /** * Main benchmark execution method */ private async _runBenchmark(type: BenchmarkType, includeAI: boolean): Promise { if (this.currentStatus !== 'idle') { throw new Error('A benchmark is already running') } this.currentBenchmarkId = randomUUID() this._updateStatus('starting', 'Starting benchmark...') try { // Detect hardware const hardware = await this.getHardwareInfo() // Run system benchmarks let systemScores: SystemScores = { cpu_score: 0, memory_score: 0, disk_read_score: 0, disk_write_score: 0, } if (type === 'full' || type === 'system') { systemScores = await this._runSystemBenchmarks() } // Run AI benchmark if requested and Ollama is available let aiScores: Partial = {} if (includeAI && (type === 'full' || type === 'ai')) { try { aiScores = await this._runAIBenchmark() } catch (error) { // For AI-only benchmarks, failing is fatal - don't save useless results with all zeros if (type === 'ai') { throw new Error(`AI benchmark failed: ${error.message}. Make sure AI Assistant is installed and running.`) } // For full benchmarks, AI is optional - continue without it logger.warn(`AI benchmark skipped: ${error.message}`) } } // Calculate NOMAD score this._updateStatus('calculating_score', 'Calculating NOMAD score...') const nomadScore = this._calculateNomadScore(systemScores, aiScores) // Save result const result = await BenchmarkResult.create({ benchmark_id: this.currentBenchmarkId, benchmark_type: type, cpu_model: hardware.cpu_model, cpu_cores: hardware.cpu_cores, cpu_threads: hardware.cpu_threads, ram_bytes: hardware.ram_bytes, disk_type: hardware.disk_type, gpu_model: hardware.gpu_model, cpu_score: systemScores.cpu_score, memory_score: systemScores.memory_score, disk_read_score: systemScores.disk_read_score, disk_write_score: systemScores.disk_write_score, ai_tokens_per_second: aiScores.ai_tokens_per_second || null, ai_model_used: aiScores.ai_model_used || null, ai_time_to_first_token: aiScores.ai_time_to_first_token || null, nomad_score: nomadScore, submitted_to_repository: false, }) this._updateStatus('completed', 'Benchmark completed successfully') this.currentStatus = 'idle' this.currentBenchmarkId = null return result } catch (error) { this._updateStatus('error', `Benchmark failed: ${error.message}`) this.currentStatus = 'idle' this.currentBenchmarkId = null throw error } } /** * Run system benchmarks using sysbench in Docker */ private async _runSystemBenchmarks(): Promise { // Ensure sysbench image is available await this._ensureSysbenchImage() // Run CPU benchmark this._updateStatus('running_cpu', 'Running CPU benchmark...') const cpuResult = await this._runSysbenchCpu() // Run memory benchmark this._updateStatus('running_memory', 'Running memory benchmark...') const memoryResult = await this._runSysbenchMemory() // Run disk benchmarks this._updateStatus('running_disk_read', 'Running disk read benchmark...') const diskReadResult = await this._runSysbenchDiskRead() this._updateStatus('running_disk_write', 'Running disk write benchmark...') const diskWriteResult = await this._runSysbenchDiskWrite() // Normalize scores to 0-100 scale return { cpu_score: this._normalizeScore(cpuResult.events_per_second, REFERENCE_SCORES.cpu_events_per_second), memory_score: this._normalizeScore(memoryResult.operations_per_second, REFERENCE_SCORES.memory_ops_per_second), disk_read_score: this._normalizeScore(diskReadResult.read_mb_per_sec, REFERENCE_SCORES.disk_read_mb_per_sec), disk_write_score: this._normalizeScore(diskWriteResult.write_mb_per_sec, REFERENCE_SCORES.disk_write_mb_per_sec), } } /** * Run AI benchmark using Ollama */ private async _runAIBenchmark(): Promise { try { this._updateStatus('running_ai', 'Running AI benchmark...') const ollamaAPIURL = await this.dockerService.getServiceURL(SERVICE_NAMES.OLLAMA) if (!ollamaAPIURL) { throw new Error('AI Assistant service location could not be determined. Ensure AI Assistant is installed and running.') } // Check if Ollama is available try { await axios.get(`${ollamaAPIURL}/api/tags`, { timeout: 5000 }) } catch (error) { const errorCode = error.code || error.response?.status || 'unknown' throw new Error(`Ollama is not running or not accessible (${errorCode}). Ensure AI Assistant is installed and running.`) } // Check if the benchmark model is available, pull if not const ollamaService = new (await import('./ollama_service.js')).OllamaService() const modelResponse = await ollamaService.downloadModel(AI_BENCHMARK_MODEL) if (!modelResponse.success) { throw new Error(`Model does not exist and failed to download: ${modelResponse.message}`) } // Run inference benchmark const startTime = Date.now() const response = await axios.post( `${ollamaAPIURL}/api/generate`, { model: AI_BENCHMARK_MODEL, prompt: AI_BENCHMARK_PROMPT, stream: false, }, { timeout: 120000 } ) const endTime = Date.now() const totalTime = (endTime - startTime) / 1000 // seconds // Ollama returns eval_count (tokens generated) and eval_duration (nanoseconds) if (response.data.eval_count && response.data.eval_duration) { const tokenCount = response.data.eval_count const evalDurationSeconds = response.data.eval_duration / 1e9 const tokensPerSecond = tokenCount / evalDurationSeconds // Time to first token from prompt_eval_duration const ttft = response.data.prompt_eval_duration ? response.data.prompt_eval_duration / 1e6 // Convert to ms : (totalTime * 1000) / 2 // Estimate if not available return { ai_tokens_per_second: Math.round(tokensPerSecond * 100) / 100, ai_model_used: AI_BENCHMARK_MODEL, ai_time_to_first_token: Math.round(ttft * 100) / 100, } } // Fallback calculation const estimatedTokens = response.data.response?.split(' ').length * 1.3 || 100 const tokensPerSecond = estimatedTokens / totalTime return { ai_tokens_per_second: Math.round(tokensPerSecond * 100) / 100, ai_model_used: AI_BENCHMARK_MODEL, ai_time_to_first_token: Math.round((totalTime * 1000) / 2), } } catch (error) { throw new Error(`AI benchmark failed: ${error.message}`) } } /** * Calculate weighted NOMAD score */ private _calculateNomadScore(systemScores: SystemScores, aiScores: Partial): number { let totalWeight = 0 let weightedSum = 0 // CPU score weightedSum += systemScores.cpu_score * SCORE_WEIGHTS.cpu totalWeight += SCORE_WEIGHTS.cpu // Memory score weightedSum += systemScores.memory_score * SCORE_WEIGHTS.memory totalWeight += SCORE_WEIGHTS.memory // Disk scores weightedSum += systemScores.disk_read_score * SCORE_WEIGHTS.disk_read totalWeight += SCORE_WEIGHTS.disk_read weightedSum += systemScores.disk_write_score * SCORE_WEIGHTS.disk_write totalWeight += SCORE_WEIGHTS.disk_write // AI scores (if available) if (aiScores.ai_tokens_per_second !== undefined && aiScores.ai_tokens_per_second !== null) { const aiScore = this._normalizeScore( aiScores.ai_tokens_per_second, REFERENCE_SCORES.ai_tokens_per_second ) weightedSum += aiScore * SCORE_WEIGHTS.ai_tokens_per_second totalWeight += SCORE_WEIGHTS.ai_tokens_per_second } if (aiScores.ai_time_to_first_token !== undefined && aiScores.ai_time_to_first_token !== null) { // For TTFT, lower is better, so we invert the score const ttftScore = this._normalizeScoreInverse( aiScores.ai_time_to_first_token, REFERENCE_SCORES.ai_ttft_ms ) weightedSum += ttftScore * SCORE_WEIGHTS.ai_ttft totalWeight += SCORE_WEIGHTS.ai_ttft } // Normalize by actual weight used (in case AI benchmarks were skipped) const nomadScore = totalWeight > 0 ? (weightedSum / totalWeight) * 100 : 0 return Math.round(Math.min(100, Math.max(0, nomadScore)) * 100) / 100 } /** * Normalize a raw score to 0-100 scale using log scaling * This provides diminishing returns for very high scores */ private _normalizeScore(value: number, reference: number): number { if (value <= 0) return 0 // Log scale: score = 50 * (1 + log2(value/reference)) // This gives 50 at reference value, scales logarithmically const ratio = value / reference const score = 50 * (1 + Math.log2(Math.max(0.01, ratio))) return Math.min(100, Math.max(0, score)) / 100 } /** * Normalize a score where lower is better (like latency) */ private _normalizeScoreInverse(value: number, reference: number): number { if (value <= 0) return 1 // Inverse: lower values = higher scores const ratio = reference / value const score = 50 * (1 + Math.log2(Math.max(0.01, ratio))) return Math.min(100, Math.max(0, score)) / 100 } /** * Ensure sysbench Docker image is available */ private async _ensureSysbenchImage(): Promise { try { await this.dockerService.docker.getImage(SYSBENCH_IMAGE).inspect() } catch { this._updateStatus('starting', `Pulling sysbench image...`) const pullStream = await this.dockerService.docker.pull(SYSBENCH_IMAGE) await new Promise((resolve) => this.dockerService.docker.modem.followProgress(pullStream, resolve)) } } /** * Run sysbench CPU benchmark */ private async _runSysbenchCpu(): Promise { const output = await this._runSysbenchCommand([ 'sysbench', 'cpu', '--cpu-max-prime=20000', '--threads=4', '--time=30', 'run', ]) // Parse output for events per second const eventsMatch = output.match(/events per second:\s*([\d.]+)/i) const totalTimeMatch = output.match(/total time:\s*([\d.]+)s/i) const totalEventsMatch = output.match(/total number of events:\s*(\d+)/i) return { events_per_second: eventsMatch ? parseFloat(eventsMatch[1]) : 0, total_time: totalTimeMatch ? parseFloat(totalTimeMatch[1]) : 30, total_events: totalEventsMatch ? parseInt(totalEventsMatch[1]) : 0, } } /** * Run sysbench memory benchmark */ private async _runSysbenchMemory(): Promise { const output = await this._runSysbenchCommand([ 'sysbench', 'memory', '--memory-block-size=1K', '--memory-total-size=10G', '--threads=4', 'run', ]) // Parse output const opsMatch = output.match(/Total operations:\s*\d+\s*\(([\d.]+)\s*per second\)/i) const transferMatch = output.match(/([\d.]+)\s*MiB\/sec/i) const timeMatch = output.match(/total time:\s*([\d.]+)s/i) return { operations_per_second: opsMatch ? parseFloat(opsMatch[1]) : 0, transfer_rate_mb_per_sec: transferMatch ? parseFloat(transferMatch[1]) : 0, total_time: timeMatch ? parseFloat(timeMatch[1]) : 0, } } /** * Run sysbench disk read benchmark */ private async _runSysbenchDiskRead(): Promise { // Run prepare, test, and cleanup in a single container // This is necessary because each container has its own filesystem const output = await this._runSysbenchCommand([ 'sh', '-c', 'sysbench fileio --file-total-size=1G --file-num=4 prepare && ' + 'sysbench fileio --file-total-size=1G --file-num=4 --file-test-mode=seqrd --time=30 run && ' + 'sysbench fileio --file-total-size=1G --file-num=4 cleanup', ]) // Parse output - look for the Throughput section const readMatch = output.match(/read,\s*MiB\/s:\s*([\d.]+)/i) const readsPerSecMatch = output.match(/reads\/s:\s*([\d.]+)/i) logger.debug(`[BenchmarkService] Disk read output parsing - read: ${readMatch?.[1]}, reads/s: ${readsPerSecMatch?.[1]}`) return { reads_per_second: readsPerSecMatch ? parseFloat(readsPerSecMatch[1]) : 0, writes_per_second: 0, read_mb_per_sec: readMatch ? parseFloat(readMatch[1]) : 0, write_mb_per_sec: 0, total_time: 30, } } /** * Run sysbench disk write benchmark */ private async _runSysbenchDiskWrite(): Promise { // Run prepare, test, and cleanup in a single container // This is necessary because each container has its own filesystem const output = await this._runSysbenchCommand([ 'sh', '-c', 'sysbench fileio --file-total-size=1G --file-num=4 prepare && ' + 'sysbench fileio --file-total-size=1G --file-num=4 --file-test-mode=seqwr --time=30 run && ' + 'sysbench fileio --file-total-size=1G --file-num=4 cleanup', ]) // Parse output - look for the Throughput section const writeMatch = output.match(/written,\s*MiB\/s:\s*([\d.]+)/i) const writesPerSecMatch = output.match(/writes\/s:\s*([\d.]+)/i) logger.debug(`[BenchmarkService] Disk write output parsing - written: ${writeMatch?.[1]}, writes/s: ${writesPerSecMatch?.[1]}`) return { reads_per_second: 0, writes_per_second: writesPerSecMatch ? parseFloat(writesPerSecMatch[1]) : 0, read_mb_per_sec: 0, write_mb_per_sec: writeMatch ? parseFloat(writeMatch[1]) : 0, total_time: 30, } } /** * Run a sysbench command in a Docker container */ private async _runSysbenchCommand(cmd: string[]): Promise { let container: Dockerode.Container | null = null try { // Create container with TTY to avoid multiplexed output container = await this.dockerService.docker.createContainer({ Image: SYSBENCH_IMAGE, Cmd: cmd, name: `${SYSBENCH_CONTAINER_NAME}_${Date.now()}`, Tty: true, // Important: prevents multiplexed stdout/stderr headers HostConfig: { AutoRemove: false, // Don't auto-remove to avoid race condition with fetching logs }, }) // Start container await container.start() // Wait for completion await container.wait() // Get logs after container has finished const logs = await container.logs({ stdout: true, stderr: true, }) // Parse logs (Docker logs include header bytes) const output = logs.toString('utf8') .replace(/[\x00-\x08]/g, '') // Remove control characters .trim() // Manually remove the container after getting logs try { await container.remove() } catch (removeError) { // Log but don't fail if removal fails (container might already be gone) logger.warn(`Failed to remove sysbench container: ${removeError.message}`) } return output } catch (error) { // Clean up container on error if it exists if (container) { try { await container.remove({ force: true }) } catch (removeError) { // Ignore removal errors } } logger.error(`Sysbench command failed: ${error.message}`) throw new Error(`Sysbench command failed: ${error.message}`) } } /** * Broadcast benchmark progress update */ private _updateStatus(status: BenchmarkStatus, message: string) { this.currentStatus = status const progress: BenchmarkProgress = { status, progress: this._getProgressPercent(status), message, current_stage: this._getStageLabel(status), timestamp: new Date().toISOString(), } transmit.broadcast(BROADCAST_CHANNELS.BENCHMARK_PROGRESS, { benchmark_id: this.currentBenchmarkId, ...progress, }) logger.info(`[BenchmarkService] ${status}: ${message}`) } /** * Get progress percentage for a given status */ private _getProgressPercent(status: BenchmarkStatus): number { const progressMap: Record = { idle: 0, starting: 5, detecting_hardware: 10, running_cpu: 25, running_memory: 40, running_disk_read: 55, running_disk_write: 70, downloading_ai_model: 80, running_ai: 85, calculating_score: 95, completed: 100, error: 0, } return progressMap[status] || 0 } /** * Get human-readable stage label */ private _getStageLabel(status: BenchmarkStatus): string { const labelMap: Record = { idle: 'Idle', starting: 'Starting', detecting_hardware: 'Detecting Hardware', running_cpu: 'CPU Benchmark', running_memory: 'Memory Benchmark', running_disk_read: 'Disk Read Test', running_disk_write: 'Disk Write Test', downloading_ai_model: 'Downloading AI Model', running_ai: 'AI Inference Test', calculating_score: 'Calculating Score', completed: 'Complete', error: 'Error', } return labelMap[status] || status } } ================================================ FILE: admin/app/services/chat_service.ts ================================================ import ChatSession from '#models/chat_session' import ChatMessage from '#models/chat_message' import logger from '@adonisjs/core/services/logger' import { DateTime } from 'luxon' import { inject } from '@adonisjs/core' import { OllamaService } from './ollama_service.js' import { DEFAULT_QUERY_REWRITE_MODEL, SYSTEM_PROMPTS } from '../../constants/ollama.js' import { toTitleCase } from '../utils/misc.js' @inject() export class ChatService { constructor(private ollamaService: OllamaService) {} async getAllSessions() { try { const sessions = await ChatSession.query().orderBy('updated_at', 'desc') return sessions.map((session) => ({ id: session.id.toString(), title: session.title, model: session.model, timestamp: session.updated_at.toJSDate(), lastMessage: null, // Will be populated from messages if needed })) } catch (error) { logger.error( `[ChatService] Failed to get sessions: ${error instanceof Error ? error.message : error}` ) return [] } } async getChatSuggestions() { try { const models = await this.ollamaService.getModels() if (!models || models.length === 0) { return [] // If no models are available, return empty suggestions } // Larger models generally give "better" responses, so pick the largest one const largestModel = models.reduce((prev, current) => { return prev.size > current.size ? prev : current }) if (!largestModel) { return [] } const response = await this.ollamaService.chat({ model: largestModel.name, messages: [ { role: 'user', content: SYSTEM_PROMPTS.chat_suggestions, } ], stream: false, }) if (response && response.message && response.message.content) { const content = response.message.content.trim() // Handle both comma-separated and newline-separated formats let suggestions: string[] = [] // Try splitting by commas first if (content.includes(',')) { suggestions = content.split(',').map((s) => s.trim()) } // Fall back to newline separation else { suggestions = content .split(/\r?\n/) .map((s) => s.trim()) // Remove numbered list markers (1., 2., 3., etc.) and bullet points .map((s) => s.replace(/^\d+\.\s*/, '').replace(/^[-*•]\s*/, '')) // Remove surrounding quotes if present .map((s) => s.replace(/^["']|["']$/g, '')) } // Filter out empty strings and limit to 3 suggestions const filtered = suggestions .filter((s) => s.length > 0) .slice(0, 3) return filtered.map((s) => toTitleCase(s)) } else { return [] } } catch (error) { logger.error( `[ChatService] Failed to get chat suggestions: ${ error instanceof Error ? error.message : error }` ) return [] } } async getSession(sessionId: number) { try { const session = await ChatSession.query().where('id', sessionId).preload('messages').first() if (!session) { return null } return { id: session.id.toString(), title: session.title, model: session.model, timestamp: session.updated_at.toJSDate(), messages: session.messages.map((msg) => ({ id: msg.id.toString(), role: msg.role, content: msg.content, timestamp: msg.created_at.toJSDate(), })), } } catch (error) { logger.error( `[ChatService] Failed to get session ${sessionId}: ${ error instanceof Error ? error.message : error }` ) return null } } async createSession(title: string, model?: string) { try { const session = await ChatSession.create({ title, model: model || null, }) return { id: session.id.toString(), title: session.title, model: session.model, timestamp: session.created_at.toJSDate(), } } catch (error) { logger.error( `[ChatService] Failed to create session: ${error instanceof Error ? error.message : error}` ) throw new Error('Failed to create chat session') } } async updateSession(sessionId: number, data: { title?: string; model?: string }) { try { const session = await ChatSession.findOrFail(sessionId) if (data.title) { session.title = data.title } if (data.model !== undefined) { session.model = data.model } await session.save() return { id: session.id.toString(), title: session.title, model: session.model, timestamp: session.updated_at.toJSDate(), } } catch (error) { logger.error( `[ChatService] Failed to update session ${sessionId}: ${ error instanceof Error ? error.message : error }` ) throw new Error('Failed to update chat session') } } async addMessage(sessionId: number, role: 'system' | 'user' | 'assistant', content: string) { try { const message = await ChatMessage.create({ session_id: sessionId, role, content, }) // Update session's updated_at timestamp const session = await ChatSession.findOrFail(sessionId) session.updated_at = DateTime.now() await session.save() return { id: message.id.toString(), role: message.role, content: message.content, timestamp: message.created_at.toJSDate(), } } catch (error) { logger.error( `[ChatService] Failed to add message to session ${sessionId}: ${ error instanceof Error ? error.message : error }` ) throw new Error('Failed to add message') } } async deleteSession(sessionId: number) { try { const session = await ChatSession.findOrFail(sessionId) await session.delete() return { success: true } } catch (error) { logger.error( `[ChatService] Failed to delete session ${sessionId}: ${ error instanceof Error ? error.message : error }` ) throw new Error('Failed to delete chat session') } } async getMessageCount(sessionId: number): Promise { try { const count = await ChatMessage.query().where('session_id', sessionId).count('* as total') return Number(count[0].$extras.total) } catch (error) { logger.error( `[ChatService] Failed to get message count for session ${sessionId}: ${error instanceof Error ? error.message : error}` ) return 0 } } async generateTitle(sessionId: number, userMessage: string, assistantMessage: string) { try { const models = await this.ollamaService.getModels() const titleModelAvailable = models?.some((m) => m.name === DEFAULT_QUERY_REWRITE_MODEL) let title: string if (!titleModelAvailable) { title = userMessage.slice(0, 57) + (userMessage.length > 57 ? '...' : '') } else { const response = await this.ollamaService.chat({ model: DEFAULT_QUERY_REWRITE_MODEL, messages: [ { role: 'system', content: SYSTEM_PROMPTS.title_generation }, { role: 'user', content: userMessage }, { role: 'assistant', content: assistantMessage }, ], }) title = response?.message?.content?.trim() if (!title) { title = userMessage.slice(0, 57) + (userMessage.length > 57 ? '...' : '') } } await this.updateSession(sessionId, { title }) logger.info(`[ChatService] Generated title for session ${sessionId}: "${title}"`) } catch (error) { logger.error( `[ChatService] Failed to generate title for session ${sessionId}: ${error instanceof Error ? error.message : error}` ) // Fall back to truncated user message try { const fallbackTitle = userMessage.slice(0, 57) + (userMessage.length > 57 ? '...' : '') await this.updateSession(sessionId, { title: fallbackTitle }) } catch { // Silently fail - session keeps "New Chat" title } } } async deleteAllSessions() { try { await ChatSession.query().delete() return { success: true, message: 'All chat sessions deleted' } } catch (error) { logger.error( `[ChatService] Failed to delete all sessions: ${ error instanceof Error ? error.message : error }` ) throw new Error('Failed to delete all chat sessions') } } } ================================================ FILE: admin/app/services/collection_manifest_service.ts ================================================ import axios from 'axios' import vine from '@vinejs/vine' import logger from '@adonisjs/core/services/logger' import { DateTime } from 'luxon' import { join } from 'path' import CollectionManifest from '#models/collection_manifest' import InstalledResource from '#models/installed_resource' import { zimCategoriesSpecSchema, mapsSpecSchema, wikipediaSpecSchema } from '#validators/curated_collections' import { ensureDirectoryExists, listDirectoryContents, getFileStatsIfExists, ZIM_STORAGE_PATH, } from '../utils/fs.js' import type { ManifestType, ZimCategoriesSpec, MapsSpec, CategoryWithStatus, CollectionWithStatus, SpecResource, SpecTier, } from '../../types/collections.js' const SPEC_URLS: Record = { zim_categories: 'https://raw.githubusercontent.com/Crosstalk-Solutions/project-nomad/refs/heads/main/collections/kiwix-categories.json', maps: 'https://github.com/Crosstalk-Solutions/project-nomad/raw/refs/heads/main/collections/maps.json', wikipedia: 'https://raw.githubusercontent.com/Crosstalk-Solutions/project-nomad/refs/heads/main/collections/wikipedia.json', } const VALIDATORS: Record = { zim_categories: zimCategoriesSpecSchema, maps: mapsSpecSchema, wikipedia: wikipediaSpecSchema, } export class CollectionManifestService { private readonly mapStoragePath = '/storage/maps' // ---- Spec management ---- async fetchAndCacheSpec(type: ManifestType): Promise { try { const response = await axios.get(SPEC_URLS[type], { timeout: 15000 }) const validated = await vine.validate({ schema: VALIDATORS[type], data: response.data, }) const existing = await CollectionManifest.find(type) const specVersion = validated.spec_version if (existing) { const changed = existing.spec_version !== specVersion existing.spec_version = specVersion existing.spec_data = validated existing.fetched_at = DateTime.now() await existing.save() return changed } await CollectionManifest.create({ type, spec_version: specVersion, spec_data: validated, fetched_at: DateTime.now(), }) return true } catch (error) { logger.error(`[CollectionManifestService] Failed to fetch spec for ${type}:`, error?.message || error) return false } } async getCachedSpec(type: ManifestType): Promise { const manifest = await CollectionManifest.find(type) if (!manifest) return null return manifest.spec_data as T } async getSpecWithFallback(type: ManifestType): Promise { try { await this.fetchAndCacheSpec(type) } catch { // Fetch failed, will fall back to cache } return this.getCachedSpec(type) } // ---- Status computation ---- async getCategoriesWithStatus(): Promise { const spec = await this.getSpecWithFallback('zim_categories') if (!spec) return [] const installedResources = await InstalledResource.query().where('resource_type', 'zim') const installedMap = new Map(installedResources.map((r) => [r.resource_id, r])) return spec.categories.map((category) => ({ ...category, installedTierSlug: this.getInstalledTierForCategory(category.tiers, installedMap), })) } async getMapCollectionsWithStatus(): Promise { const spec = await this.getSpecWithFallback('maps') if (!spec) return [] const installedResources = await InstalledResource.query().where('resource_type', 'map') const installedIds = new Set(installedResources.map((r) => r.resource_id)) return spec.collections.map((collection) => { const installedCount = collection.resources.filter((r) => installedIds.has(r.id)).length return { ...collection, all_installed: installedCount === collection.resources.length, installed_count: installedCount, total_count: collection.resources.length, } }) } // ---- Tier resolution ---- static resolveTierResources(tier: SpecTier, allTiers: SpecTier[]): SpecResource[] { const visited = new Set() return CollectionManifestService._resolveTierResourcesInner(tier, allTiers, visited) } private static _resolveTierResourcesInner( tier: SpecTier, allTiers: SpecTier[], visited: Set ): SpecResource[] { if (visited.has(tier.slug)) return [] // cycle detection visited.add(tier.slug) const resources: SpecResource[] = [] if (tier.includesTier) { const included = allTiers.find((t) => t.slug === tier.includesTier) if (included) { resources.push(...CollectionManifestService._resolveTierResourcesInner(included, allTiers, visited)) } } resources.push(...tier.resources) return resources } getInstalledTierForCategory( tiers: SpecTier[], installedMap: Map ): string | undefined { // Check from highest tier to lowest (tiers are ordered low to high in spec) const reversedTiers = [...tiers].reverse() for (const tier of reversedTiers) { const resolved = CollectionManifestService.resolveTierResources(tier, tiers) if (resolved.length === 0) continue const allInstalled = resolved.every((r) => installedMap.has(r.id)) if (allInstalled) { return tier.slug } } return undefined } // ---- Filename parsing ---- static parseZimFilename(filename: string): { resource_id: string; version: string } | null { const name = filename.replace(/\.zim$/, '') const match = name.match(/^(.+)_(\d{4}-\d{2})$/) if (!match) return null return { resource_id: match[1], version: match[2] } } static parseMapFilename(filename: string): { resource_id: string; version: string } | null { const name = filename.replace(/\.pmtiles$/, '') const match = name.match(/^(.+)_(\d{4}-\d{2})$/) if (!match) return null return { resource_id: match[1], version: match[2] } } // ---- Filesystem reconciliation ---- async reconcileFromFilesystem(): Promise<{ zim: number; map: number }> { let zimCount = 0 let mapCount = 0 console.log("RECONCILING FILESYSTEM MANIFESTS...") // Reconcile ZIM files try { const zimDir = join(process.cwd(), ZIM_STORAGE_PATH) await ensureDirectoryExists(zimDir) const zimItems = await listDirectoryContents(zimDir) const zimFiles = zimItems.filter((f) => f.name.endsWith('.zim')) console.log(`Found ${zimFiles.length} ZIM files on disk. Reconciling with database...`) // Get spec for URL lookup const zimSpec = await this.getCachedSpec('zim_categories') const specResourceMap = new Map() if (zimSpec) { for (const cat of zimSpec.categories) { for (const tier of cat.tiers) { for (const res of tier.resources) { specResourceMap.set(res.id, res) } } } } const seenZimIds = new Set() for (const file of zimFiles) { console.log(`Processing ZIM file: ${file.name}`) // Skip Wikipedia files (managed by WikipediaSelection model) if (file.name.startsWith('wikipedia_en_')) continue const parsed = CollectionManifestService.parseZimFilename(file.name) console.log(`Parsed ZIM filename:`, parsed) if (!parsed) continue seenZimIds.add(parsed.resource_id) const specRes = specResourceMap.get(parsed.resource_id) const filePath = join(zimDir, file.name) const stats = await getFileStatsIfExists(filePath) await InstalledResource.updateOrCreate( { resource_id: parsed.resource_id, resource_type: 'zim' }, { version: parsed.version, url: specRes?.url || '', file_path: filePath, file_size_bytes: stats ? Number(stats.size) : null, installed_at: DateTime.now(), } ) zimCount++ } // Remove entries for ZIM files no longer on disk const existingZim = await InstalledResource.query().where('resource_type', 'zim') for (const entry of existingZim) { if (!seenZimIds.has(entry.resource_id)) { await entry.delete() } } } catch (error) { logger.error('[CollectionManifestService] Error reconciling ZIM files:', error) } // Reconcile map files try { const mapDir = join(process.cwd(), this.mapStoragePath, 'pmtiles') await ensureDirectoryExists(mapDir) const mapItems = await listDirectoryContents(mapDir) const mapFiles = mapItems.filter((f) => f.name.endsWith('.pmtiles')) // Get spec for URL/version lookup const mapSpec = await this.getCachedSpec('maps') const mapResourceMap = new Map() if (mapSpec) { for (const col of mapSpec.collections) { for (const res of col.resources) { mapResourceMap.set(res.id, res) } } } const seenMapIds = new Set() for (const file of mapFiles) { const parsed = CollectionManifestService.parseMapFilename(file.name) if (!parsed) continue seenMapIds.add(parsed.resource_id) const specRes = mapResourceMap.get(parsed.resource_id) const filePath = join(mapDir, file.name) const stats = await getFileStatsIfExists(filePath) await InstalledResource.updateOrCreate( { resource_id: parsed.resource_id, resource_type: 'map' }, { version: parsed.version, url: specRes?.url || '', file_path: filePath, file_size_bytes: stats ? Number(stats.size) : null, installed_at: DateTime.now(), } ) mapCount++ } // Remove entries for map files no longer on disk const existingMaps = await InstalledResource.query().where('resource_type', 'map') for (const entry of existingMaps) { if (!seenMapIds.has(entry.resource_id)) { await entry.delete() } } } catch (error) { logger.error('[CollectionManifestService] Error reconciling map files:', error) } logger.info(`[CollectionManifestService] Reconciled ${zimCount} ZIM files, ${mapCount} map files`) return { zim: zimCount, map: mapCount } } } ================================================ FILE: admin/app/services/collection_update_service.ts ================================================ import logger from '@adonisjs/core/services/logger' import env from '#start/env' import axios from 'axios' import InstalledResource from '#models/installed_resource' import { RunDownloadJob } from '../jobs/run_download_job.js' import { ZIM_STORAGE_PATH } from '../utils/fs.js' import { join } from 'path' import type { ResourceUpdateCheckRequest, ResourceUpdateInfo, ContentUpdateCheckResult, } from '../../types/collections.js' import { NOMAD_API_DEFAULT_BASE_URL } from '../../constants/misc.js' const MAP_STORAGE_PATH = '/storage/maps' const ZIM_MIME_TYPES = ['application/x-zim', 'application/x-openzim', 'application/octet-stream'] const PMTILES_MIME_TYPES = ['application/vnd.pmtiles', 'application/octet-stream'] export class CollectionUpdateService { async checkForUpdates(): Promise { const nomadAPIURL = env.get('NOMAD_API_URL') || NOMAD_API_DEFAULT_BASE_URL if (!nomadAPIURL) { return { updates: [], checked_at: new Date().toISOString(), error: 'Nomad API is not configured. Set the NOMAD_API_URL environment variable.', } } const installed = await InstalledResource.all() if (installed.length === 0) { return { updates: [], checked_at: new Date().toISOString(), } } const requestBody: ResourceUpdateCheckRequest = { resources: installed.map((r) => ({ resource_id: r.resource_id, resource_type: r.resource_type, installed_version: r.version, })), } try { const response = await axios.post(`${nomadAPIURL}/api/v1/resources/check-updates`, requestBody, { timeout: 15000, }) logger.info( `[CollectionUpdateService] Update check complete: ${response.data.length} update(s) available` ) return { updates: response.data, checked_at: new Date().toISOString(), } } catch (error) { if (axios.isAxiosError(error) && error.response) { logger.error( `[CollectionUpdateService] Nomad API returned ${error.response.status}: ${JSON.stringify(error.response.data)}` ) return { updates: [], checked_at: new Date().toISOString(), error: `Nomad API returned status ${error.response.status}`, } } const message = error instanceof Error ? error.message : 'Unknown error contacting Nomad API' logger.error(`[CollectionUpdateService] Failed to check for updates: ${message}`) return { updates: [], checked_at: new Date().toISOString(), error: `Failed to contact Nomad API: ${message}`, } } } async applyUpdate( update: ResourceUpdateInfo ): Promise<{ success: boolean; jobId?: string; error?: string }> { // Check if a download is already in progress for this URL const existingJob = await RunDownloadJob.getByUrl(update.download_url) if (existingJob) { const state = await existingJob.getState() if (state === 'active' || state === 'waiting' || state === 'delayed') { return { success: false, error: `A download is already in progress for ${update.resource_id}`, } } } const filename = this.buildFilename(update) const filepath = this.buildFilepath(update, filename) const result = await RunDownloadJob.dispatch({ url: update.download_url, filepath, timeout: 30000, allowedMimeTypes: update.resource_type === 'zim' ? ZIM_MIME_TYPES : PMTILES_MIME_TYPES, forceNew: true, filetype: update.resource_type, resourceMetadata: { resource_id: update.resource_id, version: update.latest_version, collection_ref: null, }, }) if (!result || !result.job) { return { success: false, error: 'Failed to dispatch download job' } } logger.info( `[CollectionUpdateService] Dispatched update download for ${update.resource_id}: ${update.installed_version} → ${update.latest_version}` ) return { success: true, jobId: result.job.id } } async applyAllUpdates( updates: ResourceUpdateInfo[] ): Promise<{ results: Array<{ resource_id: string; success: boolean; jobId?: string; error?: string }> }> { const results: Array<{ resource_id: string success: boolean jobId?: string error?: string }> = [] for (const update of updates) { const result = await this.applyUpdate(update) results.push({ resource_id: update.resource_id, ...result }) } return { results } } private buildFilename(update: ResourceUpdateInfo): string { if (update.resource_type === 'zim') { return `${update.resource_id}_${update.latest_version}.zim` } return `${update.resource_id}_${update.latest_version}.pmtiles` } private buildFilepath(update: ResourceUpdateInfo, filename: string): string { if (update.resource_type === 'zim') { return join(process.cwd(), ZIM_STORAGE_PATH, filename) } return join(process.cwd(), MAP_STORAGE_PATH, 'pmtiles', filename) } } ================================================ FILE: admin/app/services/container_registry_service.ts ================================================ import logger from '@adonisjs/core/services/logger' import { isNewerVersion, parseMajorVersion } from '../utils/version.js' export interface ParsedImageReference { registry: string namespace: string repo: string tag: string /** Full name for registry API calls: namespace/repo */ fullName: string } export interface AvailableUpdate { tag: string isLatest: boolean releaseUrl?: string } interface TokenCacheEntry { token: string expiresAt: number } const SEMVER_TAG_PATTERN = /^v?(\d+\.\d+(?:\.\d+)?)$/ const PLATFORM_SUFFIXES = ['-arm64', '-amd64', '-alpine', '-slim', '-cuda', '-rocm'] const REJECTED_TAGS = new Set(['latest', 'nightly', 'edge', 'dev', 'beta', 'alpha', 'canary', 'rc', 'test', 'debug']) export class ContainerRegistryService { private tokenCache = new Map() private sourceUrlCache = new Map() private releaseTagPrefixCache = new Map() /** * Parse a Docker image reference string into its components. */ parseImageReference(image: string): ParsedImageReference { let registry: string let remainder: string let tag = 'latest' // Split off the tag const lastColon = image.lastIndexOf(':') if (lastColon > -1 && !image.substring(lastColon).includes('/')) { tag = image.substring(lastColon + 1) image = image.substring(0, lastColon) } // Determine registry vs image path const parts = image.split('/') if (parts.length === 1) { // e.g. "nginx" → Docker Hub library image registry = 'registry-1.docker.io' remainder = `library/${parts[0]}` } else if (parts.length === 2 && !parts[0].includes('.') && !parts[0].includes(':')) { // e.g. "ollama/ollama" → Docker Hub user image registry = 'registry-1.docker.io' remainder = image } else { // e.g. "ghcr.io/kiwix/kiwix-serve" → custom registry registry = parts[0] remainder = parts.slice(1).join('/') } const namespaceParts = remainder.split('/') const repo = namespaceParts.pop()! const namespace = namespaceParts.join('/') return { registry, namespace, repo, tag, fullName: remainder, } } /** * Get an anonymous auth token for the given registry and repository. * NOTE: This could be expanded in the future to support private repo authentication */ private async getToken(registry: string, fullName: string): Promise { const cacheKey = `${registry}/${fullName}` const cached = this.tokenCache.get(cacheKey) if (cached && cached.expiresAt > Date.now()) { return cached.token } let tokenUrl: string if (registry === 'registry-1.docker.io') { tokenUrl = `https://auth.docker.io/token?service=registry.docker.io&scope=repository:${fullName}:pull` } else if (registry === 'ghcr.io') { tokenUrl = `https://ghcr.io/token?service=ghcr.io&scope=repository:${fullName}:pull` } else { // For other registries, try the standard v2 token endpoint tokenUrl = `https://${registry}/token?service=${registry}&scope=repository:${fullName}:pull` } const response = await this.fetchWithRetry(tokenUrl) if (!response.ok) { throw new Error(`Failed to get auth token from ${registry}: ${response.status}`) } const data = (await response.json()) as { token?: string; access_token?: string } const token = data.token || data.access_token || '' if (!token) { throw new Error(`No token returned from ${registry}`) } // Cache for 5 minutes (tokens usually last longer, but be conservative) this.tokenCache.set(cacheKey, { token, expiresAt: Date.now() + 5 * 60 * 1000, }) return token } /** * List all tags for a given image from the registry. */ async listTags(parsed: ParsedImageReference): Promise { const token = await this.getToken(parsed.registry, parsed.fullName) const allTags: string[] = [] let url = `https://${parsed.registry}/v2/${parsed.fullName}/tags/list?n=1000` while (url) { const response = await this.fetchWithRetry(url, { headers: { Authorization: `Bearer ${token}` }, }) if (!response.ok) { throw new Error(`Failed to list tags for ${parsed.fullName}: ${response.status}`) } const data = (await response.json()) as { tags?: string[] } if (data.tags) { allTags.push(...data.tags) } // Handle pagination via Link header const linkHeader = response.headers.get('link') if (linkHeader) { const match = linkHeader.match(/<([^>]+)>;\s*rel="next"/) url = match ? match[1] : '' } else { url = '' } } return allTags } /** * Check if a specific tag supports the given architecture by fetching its manifest. */ async checkArchSupport(parsed: ParsedImageReference, tag: string, hostArch: string): Promise { try { const token = await this.getToken(parsed.registry, parsed.fullName) const url = `https://${parsed.registry}/v2/${parsed.fullName}/manifests/${tag}` const response = await this.fetchWithRetry(url, { headers: { Authorization: `Bearer ${token}`, Accept: [ 'application/vnd.oci.image.index.v1+json', 'application/vnd.docker.distribution.manifest.list.v2+json', 'application/vnd.oci.image.manifest.v1+json', 'application/vnd.docker.distribution.manifest.v2+json', ].join(', '), }, }) if (!response.ok) return true // If we can't check, assume it's compatible const manifest = (await response.json()) as { mediaType?: string manifests?: Array<{ platform?: { architecture?: string } }> } const mediaType = manifest.mediaType || response.headers.get('content-type') || '' // Manifest list — check if any platform matches if ( mediaType.includes('manifest.list') || mediaType.includes('image.index') || manifest.manifests ) { const manifests = manifest.manifests || [] return manifests.some( (m: any) => m.platform && m.platform.architecture === hostArch ) } // Single manifest — assume compatible (can't easily determine arch without fetching config blob) return true } catch (error) { logger.warn(`[ContainerRegistryService] Error checking arch for ${tag}: ${error.message}`) return true // Assume compatible on error } } /** * Extract the source repository URL from an image's OCI labels. * Uses the standardized `org.opencontainers.image.source` label. * Result is cached per image (not per tag). */ async getSourceUrl(parsed: ParsedImageReference): Promise { const cacheKey = `${parsed.registry}/${parsed.fullName}` if (this.sourceUrlCache.has(cacheKey)) { return this.sourceUrlCache.get(cacheKey)! } try { const token = await this.getToken(parsed.registry, parsed.fullName) // First get the manifest to find the config blob digest const manifestUrl = `https://${parsed.registry}/v2/${parsed.fullName}/manifests/${parsed.tag}` const manifestRes = await this.fetchWithRetry(manifestUrl, { headers: { Authorization: `Bearer ${token}`, Accept: [ 'application/vnd.oci.image.manifest.v1+json', 'application/vnd.docker.distribution.manifest.v2+json', 'application/vnd.oci.image.index.v1+json', 'application/vnd.docker.distribution.manifest.list.v2+json', ].join(', '), }, }) if (!manifestRes.ok) { this.sourceUrlCache.set(cacheKey, null) return null } const manifest = (await manifestRes.json()) as { config?: { digest?: string } manifests?: Array<{ digest?: string; mediaType?: string; platform?: { architecture?: string } }> } // If this is a manifest list, pick the first manifest to get the config let configDigest = manifest.config?.digest if (!configDigest && manifest.manifests?.length) { const firstManifest = manifest.manifests[0] if (firstManifest.digest) { const childRes = await this.fetchWithRetry( `https://${parsed.registry}/v2/${parsed.fullName}/manifests/${firstManifest.digest}`, { headers: { Authorization: `Bearer ${token}`, Accept: 'application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json', }, } ) if (childRes.ok) { const childManifest = (await childRes.json()) as { config?: { digest?: string } } configDigest = childManifest.config?.digest } } } if (!configDigest) { this.sourceUrlCache.set(cacheKey, null) return null } // Fetch the config blob to read labels const blobUrl = `https://${parsed.registry}/v2/${parsed.fullName}/blobs/${configDigest}` const blobRes = await this.fetchWithRetry(blobUrl, { headers: { Authorization: `Bearer ${token}` }, }) if (!blobRes.ok) { this.sourceUrlCache.set(cacheKey, null) return null } const config = (await blobRes.json()) as { config?: { Labels?: Record } } const sourceUrl = config.config?.Labels?.['org.opencontainers.image.source'] || null this.sourceUrlCache.set(cacheKey, sourceUrl) return sourceUrl } catch (error) { logger.warn(`[ContainerRegistryService] Failed to get source URL for ${cacheKey}: ${error.message}`) this.sourceUrlCache.set(cacheKey, null) return null } } /** * Detect whether a GitHub/GitLab repo uses a 'v' prefix on release tags. * Probes the GitHub API with the current tag to determine the convention, * then caches the result per source URL. */ async detectReleaseTagPrefix(sourceUrl: string, sampleTag: string): Promise { if (this.releaseTagPrefixCache.has(sourceUrl)) { return this.releaseTagPrefixCache.get(sourceUrl)! } try { const url = new URL(sourceUrl) if (url.hostname !== 'github.com') { this.releaseTagPrefixCache.set(sourceUrl, '') return '' } const cleanPath = url.pathname.replace(/\.git$/, '').replace(/\/$/, '') const strippedTag = sampleTag.replace(/^v/, '') const vTag = `v${strippedTag}` // Try both variants against GitHub's API — the one that 200s tells us the convention // Try v-prefixed first since it's more common const vRes = await this.fetchWithRetry( `https://api.github.com/repos${cleanPath}/releases/tags/${vTag}`, { headers: { Accept: 'application/vnd.github.v3+json', 'User-Agent': 'ProjectNomad' } }, 1 ) if (vRes.ok) { this.releaseTagPrefixCache.set(sourceUrl, 'v') return 'v' } const plainRes = await this.fetchWithRetry( `https://api.github.com/repos${cleanPath}/releases/tags/${strippedTag}`, { headers: { Accept: 'application/vnd.github.v3+json', 'User-Agent': 'ProjectNomad' } }, 1 ) if (plainRes.ok) { this.releaseTagPrefixCache.set(sourceUrl, '') return '' } } catch { // On error, fall through to default } // Default: no prefix modification this.releaseTagPrefixCache.set(sourceUrl, '') return '' } /** * Build a release URL for a specific tag given a source repository URL and * the detected release tag prefix convention. * Supports GitHub and GitLab URL patterns. */ buildReleaseUrl(sourceUrl: string, tag: string, releaseTagPrefix: string): string | undefined { try { const url = new URL(sourceUrl) if (url.hostname === 'github.com' || url.hostname.includes('gitlab')) { const cleanPath = url.pathname.replace(/\.git$/, '').replace(/\/$/, '') const strippedTag = tag.replace(/^v/, '') const releaseTag = releaseTagPrefix ? `${releaseTagPrefix}${strippedTag}` : strippedTag return `${url.origin}${cleanPath}/releases/tag/${releaseTag}` } } catch { // Invalid URL, skip } return undefined } /** * Filter and sort tags to find compatible updates for a service. */ filterCompatibleUpdates( tags: string[], currentTag: string, majorVersion: number ): string[] { return tags .filter((tag) => { // Must match semver pattern if (!SEMVER_TAG_PATTERN.test(tag)) return false // Reject known non-version tags if (REJECTED_TAGS.has(tag.toLowerCase())) return false // Reject platform suffixes if (PLATFORM_SUFFIXES.some((suffix) => tag.toLowerCase().endsWith(suffix))) return false // Must be same major version if (parseMajorVersion(tag) !== majorVersion) return false // Must be newer than current return isNewerVersion(tag, currentTag) }) .sort((a, b) => (isNewerVersion(a, b) ? -1 : 1)) // Newest first } /** * High-level method to get available updates for a service. * Returns a sorted list of compatible newer versions (newest first). */ async getAvailableUpdates( containerImage: string, hostArch: string, fallbackSourceRepo?: string | null ): Promise { const parsed = this.parseImageReference(containerImage) const currentTag = parsed.tag if (currentTag === 'latest') { logger.warn( `[ContainerRegistryService] Cannot check updates for ${containerImage} — using :latest tag` ) return [] } const majorVersion = parseMajorVersion(currentTag) // Fetch tags and source URL in parallel const [tags, ociSourceUrl] = await Promise.all([ this.listTags(parsed), this.getSourceUrl(parsed), ]) // OCI label takes precedence, fall back to DB-stored source_repo const sourceUrl = ociSourceUrl || fallbackSourceRepo || null const compatible = this.filterCompatibleUpdates(tags, currentTag, majorVersion) // Detect release tag prefix convention (e.g. 'v' vs no prefix) if we have a source URL let releaseTagPrefix = '' if (sourceUrl) { releaseTagPrefix = await this.detectReleaseTagPrefix(sourceUrl, currentTag) } // Check architecture support for the top candidates (limit checks to save API calls) const maxArchChecks = 10 const results: AvailableUpdate[] = [] for (const tag of compatible.slice(0, maxArchChecks)) { const supported = await this.checkArchSupport(parsed, tag, hostArch) if (supported) { results.push({ tag, isLatest: results.length === 0, releaseUrl: sourceUrl ? this.buildReleaseUrl(sourceUrl, tag, releaseTagPrefix) : undefined, }) } } // For remaining tags (beyond arch check limit), include them but mark as not latest for (const tag of compatible.slice(maxArchChecks)) { results.push({ tag, isLatest: false, releaseUrl: sourceUrl ? this.buildReleaseUrl(sourceUrl, tag, releaseTagPrefix) : undefined, }) } return results } /** * Fetch with retry and exponential backoff for rate limiting. */ private async fetchWithRetry( url: string, init?: RequestInit, maxRetries = 3 ): Promise { for (let attempt = 0; attempt <= maxRetries; attempt++) { const response = await fetch(url, init) if (response.status === 429 && attempt < maxRetries) { const retryAfter = response.headers.get('retry-after') const delay = retryAfter ? parseInt(retryAfter, 10) * 1000 : Math.pow(2, attempt) * 1000 logger.warn( `[ContainerRegistryService] Rate limited on ${url}, retrying in ${delay}ms` ) await new Promise((resolve) => setTimeout(resolve, delay)) continue } return response } throw new Error(`Failed to fetch ${url} after ${maxRetries} retries`) } } ================================================ FILE: admin/app/services/docker_service.ts ================================================ import Service from '#models/service' import Docker from 'dockerode' import logger from '@adonisjs/core/services/logger' import { inject } from '@adonisjs/core' import transmit from '@adonisjs/transmit/services/main' import { doResumableDownloadWithRetry } from '../utils/downloads.js' import { join } from 'path' import { ZIM_STORAGE_PATH } from '../utils/fs.js' import { SERVICE_NAMES } from '../../constants/service_names.js' import { exec } from 'child_process' import { promisify } from 'util' // import { readdir } from 'fs/promises' import KVStore from '#models/kv_store' import { BROADCAST_CHANNELS } from '../../constants/broadcast.js' @inject() export class DockerService { public docker: Docker private activeInstallations: Set = new Set() public static NOMAD_NETWORK = 'project-nomad_default' constructor() { // Support both Linux (production) and Windows (development with Docker Desktop) const isWindows = process.platform === 'win32' if (isWindows) { // Windows Docker Desktop uses named pipe this.docker = new Docker({ socketPath: '//./pipe/docker_engine' }) } else { // Linux uses Unix socket this.docker = new Docker({ socketPath: '/var/run/docker.sock' }) } } async affectContainer( serviceName: string, action: 'start' | 'stop' | 'restart' ): Promise<{ success: boolean; message: string }> { try { const service = await Service.query().where('service_name', serviceName).first() if (!service || !service.installed) { return { success: false, message: `Service ${serviceName} not found or not installed`, } } const containers = await this.docker.listContainers({ all: true }) const container = containers.find((c) => c.Names.includes(`/${serviceName}`)) if (!container) { return { success: false, message: `Container for service ${serviceName} not found`, } } const dockerContainer = this.docker.getContainer(container.Id) if (action === 'stop') { await dockerContainer.stop() return { success: true, message: `Service ${serviceName} stopped successfully`, } } if (action === 'restart') { await dockerContainer.restart() return { success: true, message: `Service ${serviceName} restarted successfully`, } } if (action === 'start') { if (container.State === 'running') { return { success: true, message: `Service ${serviceName} is already running`, } } await dockerContainer.start() return { success: true, message: `Service ${serviceName} started successfully`, } } return { success: false, message: `Invalid action: ${action}. Use 'start', 'stop', or 'restart'.`, } } catch (error) { logger.error(`Error starting service ${serviceName}: ${error.message}`) return { success: false, message: `Failed to start service ${serviceName}: ${error.message}`, } } } /** * Fetches the status of all Docker containers related to Nomad services. (those prefixed with 'nomad_') */ async getServicesStatus(): Promise< { service_name: string status: string }[] > { try { const containers = await this.docker.listContainers({ all: true }) const containerMap = new Map() containers.forEach((container) => { const name = container.Names[0]?.replace('/', '') if (name && name.startsWith('nomad_')) { containerMap.set(name, container) } }) return Array.from(containerMap.entries()).map(([name, container]) => ({ service_name: name, status: container.State, })) } catch (error) { logger.error(`Error fetching services status: ${error.message}`) return [] } } /** * Get the URL to access a service based on its configuration. * Attempts to return a docker-internal URL using the service name and exposed port. * @param serviceName - The name of the service to get the URL for. * @returns - The URL as a string, or null if it cannot be determined. */ async getServiceURL(serviceName: string): Promise { if (!serviceName || serviceName.trim() === '') { return null } const service = await Service.query() .where('service_name', serviceName) .andWhere('installed', true) .first() if (!service) { return null } const hostname = process.env.NODE_ENV === 'production' ? serviceName : 'localhost' // First, check if ui_location is set and is a valid port number if (service.ui_location && parseInt(service.ui_location, 10)) { return `http://${hostname}:${service.ui_location}` } // Next, try to extract a host port from container_config const parsedConfig = this._parseContainerConfig(service.container_config) if (parsedConfig?.HostConfig?.PortBindings) { const portBindings = parsedConfig.HostConfig.PortBindings const hostPorts = Object.values(portBindings) if (!hostPorts || !Array.isArray(hostPorts) || hostPorts.length === 0) { return null } const hostPortsArray = hostPorts.flat() as { HostPort: string }[] const hostPortsStrings = hostPortsArray.map((binding) => binding.HostPort) if (hostPortsStrings.length > 0) { return `http://${hostname}:${hostPortsStrings[0]}` } } // Otherwise, return null if we can't determine a URL return null } async createContainerPreflight( serviceName: string ): Promise<{ success: boolean; message: string }> { const service = await Service.query().where('service_name', serviceName).first() if (!service) { return { success: false, message: `Service ${serviceName} not found`, } } if (service.installed) { return { success: false, message: `Service ${serviceName} is already installed`, } } // Check if installation is already in progress (database-level) if (service.installation_status === 'installing') { return { success: false, message: `Service ${serviceName} installation is already in progress`, } } // Double-check with in-memory tracking (race condition protection) if (this.activeInstallations.has(serviceName)) { return { success: false, message: `Service ${serviceName} installation is already in progress`, } } // Mark installation as in progress this.activeInstallations.add(serviceName) service.installation_status = 'installing' await service.save() // Check if a service wasn't marked as installed but has an existing container // This can happen if the service was created but not properly installed // or if the container was removed manually without updating the service status. // if (await this._checkIfServiceContainerExists(serviceName)) { // const removeResult = await this._removeServiceContainer(serviceName); // if (!removeResult.success) { // return { // success: false, // message: `Failed to remove existing container for service ${serviceName}: ${removeResult.message}`, // }; // } // } const containerConfig = this._parseContainerConfig(service.container_config) // Execute installation asynchronously and handle cleanup this._createContainer(service, containerConfig).catch(async (error) => { logger.error(`Installation failed for ${serviceName}: ${error.message}`) await this._cleanupFailedInstallation(serviceName) }) return { success: true, message: `Service ${serviceName} installation initiated successfully. You can receive updates via server-sent events.`, } } /** * Force reinstall a service by stopping, removing, and recreating its container. * This method will also clear any associated volumes/data. * Handles edge cases gracefully (e.g., container not running, container not found). */ async forceReinstall(serviceName: string): Promise<{ success: boolean; message: string }> { try { const service = await Service.query().where('service_name', serviceName).first() if (!service) { return { success: false, message: `Service ${serviceName} not found`, } } // Check if installation is already in progress if (this.activeInstallations.has(serviceName)) { return { success: false, message: `Service ${serviceName} installation is already in progress`, } } // Mark as installing to prevent concurrent operations this.activeInstallations.add(serviceName) service.installation_status = 'installing' await service.save() this._broadcast( serviceName, 'reinstall-starting', `Starting force reinstall for ${serviceName}...` ) // Step 1: Try to stop and remove the container if it exists try { const containers = await this.docker.listContainers({ all: true }) const container = containers.find((c) => c.Names.includes(`/${serviceName}`)) if (container) { const dockerContainer = this.docker.getContainer(container.Id) // Only try to stop if it's running if (container.State === 'running') { this._broadcast(serviceName, 'stopping', `Stopping container...`) await dockerContainer.stop({ t: 10 }).catch((error) => { // If already stopped, continue if (!error.message.includes('already stopped')) { logger.warn(`Error stopping container: ${error.message}`) } }) } // Step 2: Remove the container this._broadcast(serviceName, 'removing', `Removing container...`) await dockerContainer.remove({ force: true }).catch((error) => { logger.warn(`Error removing container: ${error.message}`) }) } else { this._broadcast( serviceName, 'no-container', `No existing container found, proceeding with installation...` ) } } catch (error) { logger.warn(`Error during container cleanup: ${error.message}`) this._broadcast(serviceName, 'cleanup-warning', `Warning during cleanup: ${error.message}`) } // Step 3: Clear volumes/data if needed try { this._broadcast(serviceName, 'clearing-volumes', `Checking for volumes to clear...`) const volumes = await this.docker.listVolumes() const serviceVolumes = volumes.Volumes?.filter( (v) => v.Name.includes(serviceName) || v.Labels?.service === serviceName ) || [] for (const vol of serviceVolumes) { try { const volume = this.docker.getVolume(vol.Name) await volume.remove({ force: true }) this._broadcast(serviceName, 'volume-removed', `Removed volume: ${vol.Name}`) } catch (error) { logger.warn(`Failed to remove volume ${vol.Name}: ${error.message}`) } } if (serviceVolumes.length === 0) { this._broadcast(serviceName, 'no-volumes', `No volumes found to clear`) } } catch (error) { logger.warn(`Error during volume cleanup: ${error.message}`) this._broadcast( serviceName, 'volume-cleanup-warning', `Warning during volume cleanup: ${error.message}` ) } // Step 4: Mark service as uninstalled service.installed = false service.installation_status = 'installing' await service.save() // Step 5: Recreate the container this._broadcast(serviceName, 'recreating', `Recreating container...`) const containerConfig = this._parseContainerConfig(service.container_config) // Execute installation asynchronously and handle cleanup this._createContainer(service, containerConfig).catch(async (error) => { logger.error(`Reinstallation failed for ${serviceName}: ${error.message}`) await this._cleanupFailedInstallation(serviceName) }) return { success: true, message: `Service ${serviceName} force reinstall initiated successfully. You can receive updates via server-sent events.`, } } catch (error) { logger.error(`Force reinstall failed for ${serviceName}: ${error.message}`) await this._cleanupFailedInstallation(serviceName) return { success: false, message: `Failed to force reinstall service ${serviceName}: ${error.message}`, } } } /** * Handles the long-running process of creating a Docker container for a service. * NOTE: This method should not be called directly. Instead, use `createContainerPreflight` to check prerequisites first * This method will also transmit server-sent events to the client to notify of progress. * @param serviceName * @returns */ async _createContainer( service: Service & { dependencies?: Service[] }, containerConfig: any ): Promise { try { this._broadcast(service.service_name, 'initializing', '') let dependencies = [] if (service.depends_on) { const dependency = await Service.query().where('service_name', service.depends_on).first() if (dependency) { dependencies.push(dependency) } } // First, check if the service has any dependencies that need to be installed first if (dependencies && dependencies.length > 0) { this._broadcast( service.service_name, 'checking-dependencies', `Checking dependencies for service ${service.service_name}...` ) for (const dependency of dependencies) { if (!dependency.installed) { this._broadcast( service.service_name, 'dependency-not-installed', `Dependency service ${dependency.service_name} is not installed. Installing it first...` ) await this._createContainer( dependency, this._parseContainerConfig(dependency.container_config) ) } else { this._broadcast( service.service_name, 'dependency-installed', `Dependency service ${dependency.service_name} is already installed.` ) } } } const imageExists = await this._checkImageExists(service.container_image) if (imageExists) { this._broadcast( service.service_name, 'image-exists', `Docker image ${service.container_image} already exists locally. Skipping pull...` ) } else { // Start pulling the Docker image and wait for it to complete const pullStream = await this.docker.pull(service.container_image) this._broadcast( service.service_name, 'pulling', `Pulling Docker image ${service.container_image}...` ) await new Promise((res) => this.docker.modem.followProgress(pullStream, res)) } if (service.service_name === SERVICE_NAMES.KIWIX) { await this._runPreinstallActions__KiwixServe() this._broadcast( service.service_name, 'preinstall-complete', `Pre-install actions for Kiwix Serve completed successfully.` ) } // GPU-aware configuration for Ollama let finalImage = service.container_image let gpuHostConfig = containerConfig?.HostConfig || {} if (service.service_name === SERVICE_NAMES.OLLAMA) { const gpuResult = await this._detectGPUType() if (gpuResult.type === 'nvidia') { this._broadcast( service.service_name, 'gpu-config', `NVIDIA container runtime detected. Configuring container with GPU support...` ) // Add GPU support for NVIDIA gpuHostConfig = { ...gpuHostConfig, DeviceRequests: [ { Driver: 'nvidia', Count: -1, // -1 means all GPUs Capabilities: [['gpu']], }, ], } } else if (gpuResult.type === 'amd') { this._broadcast( service.service_name, 'gpu-config', `AMD GPU detected. ROCm GPU acceleration is not yet supported in this version — proceeding with CPU-only configuration. GPU support for AMD will be available in a future update.` ) logger.warn('[DockerService] AMD GPU detected but ROCm support is not yet enabled. Using CPU-only configuration.') // TODO: Re-enable AMD GPU support once ROCm image and device discovery are validated. // When re-enabling: // 1. Switch image to 'ollama/ollama:rocm' // 2. Restore _discoverAMDDevices() to map /dev/kfd and /dev/dri/* into the container } else if (gpuResult.toolkitMissing) { this._broadcast( service.service_name, 'gpu-config', `NVIDIA GPU detected but NVIDIA Container Toolkit is not installed. Using CPU-only configuration. Install the toolkit and reinstall AI Assistant for GPU acceleration: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html` ) } else { this._broadcast( service.service_name, 'gpu-config', `No GPU detected. Using CPU-only configuration...` ) } } this._broadcast( service.service_name, 'creating', `Creating Docker container for service ${service.service_name}...` ) const container = await this.docker.createContainer({ Image: finalImage, name: service.service_name, ...(containerConfig?.User && { User: containerConfig.User }), HostConfig: gpuHostConfig, ...(containerConfig?.WorkingDir && { WorkingDir: containerConfig.WorkingDir }), ...(containerConfig?.ExposedPorts && { ExposedPorts: containerConfig.ExposedPorts }), ...(containerConfig?.Env && { Env: containerConfig.Env }), ...(service.container_command ? { Cmd: service.container_command.split(' ') } : {}), // Ensure container is attached to the Nomad docker network in production ...(process.env.NODE_ENV === 'production' && { NetworkingConfig: { EndpointsConfig: { [DockerService.NOMAD_NETWORK]: {}, }, }, }), }) this._broadcast( service.service_name, 'starting', `Starting Docker container for service ${service.service_name}...` ) await container.start() this._broadcast( service.service_name, 'finalizing', `Finalizing installation of service ${service.service_name}...` ) service.installed = true service.installation_status = 'idle' await service.save() // Remove from active installs tracking this.activeInstallations.delete(service.service_name) // If Ollama was just installed, trigger Nomad docs discovery and embedding if (service.service_name === SERVICE_NAMES.OLLAMA) { logger.info('[DockerService] Ollama installation complete. Default behavior is to not enable chat suggestions.') await KVStore.setValue('chat.suggestionsEnabled', false) logger.info('[DockerService] Ollama installation complete. Triggering Nomad docs discovery...') // Need to use dynamic imports here to avoid circular dependency const ollamaService = new (await import('./ollama_service.js')).OllamaService() const ragService = new (await import('./rag_service.js')).RagService(this, ollamaService) ragService.discoverNomadDocs().catch((error) => { logger.error('[DockerService] Failed to discover Nomad docs:', error) }) } this._broadcast( service.service_name, 'completed', `Service ${service.service_name} installation completed successfully.` ) } catch (error) { this._broadcast( service.service_name, 'error', `Error installing service ${service.service_name}: ${error.message}` ) // Mark install as failed and cleanup await this._cleanupFailedInstallation(service.service_name) throw new Error(`Failed to install service ${service.service_name}: ${error.message}`) } } async _checkIfServiceContainerExists(serviceName: string): Promise { try { const containers = await this.docker.listContainers({ all: true }) return containers.some((container) => container.Names.includes(`/${serviceName}`)) } catch (error) { logger.error(`Error checking if service container exists: ${error.message}`) return false } } async _removeServiceContainer( serviceName: string ): Promise<{ success: boolean; message: string }> { try { const containers = await this.docker.listContainers({ all: true }) const container = containers.find((c) => c.Names.includes(`/${serviceName}`)) if (!container) { return { success: false, message: `Container for service ${serviceName} not found` } } const dockerContainer = this.docker.getContainer(container.Id) await dockerContainer.remove({ force: true }) return { success: true, message: `Service ${serviceName} container removed successfully` } } catch (error) { logger.error(`Error removing service container: ${error.message}`) return { success: false, message: `Failed to remove service ${serviceName} container: ${error.message}`, } } } private async _runPreinstallActions__KiwixServe(): Promise { /** * At least one .zim file must be available before we can start the kiwix container. * We'll download the lightweight mini Wikipedia Top 100 zim file for this purpose. **/ const WIKIPEDIA_ZIM_URL = 'https://github.com/Crosstalk-Solutions/project-nomad/raw/refs/heads/main/install/wikipedia_en_100_mini_2025-06.zim' const filename = 'wikipedia_en_100_mini_2025-06.zim' const filepath = join(process.cwd(), ZIM_STORAGE_PATH, filename) logger.info(`[DockerService] Kiwix Serve pre-install: Downloading ZIM file to ${filepath}`) this._broadcast( SERVICE_NAMES.KIWIX, 'preinstall', `Running pre-install actions for Kiwix Serve...` ) this._broadcast( SERVICE_NAMES.KIWIX, 'preinstall', `Downloading Wikipedia ZIM file from ${WIKIPEDIA_ZIM_URL}. This may take some time...` ) try { await doResumableDownloadWithRetry({ url: WIKIPEDIA_ZIM_URL, filepath, timeout: 60000, allowedMimeTypes: [ 'application/x-zim', 'application/x-openzim', 'application/octet-stream', ], }) this._broadcast( SERVICE_NAMES.KIWIX, 'preinstall', `Downloaded Wikipedia ZIM file to ${filepath}` ) } catch (error) { this._broadcast( SERVICE_NAMES.KIWIX, 'preinstall-error', `Failed to download Wikipedia ZIM file: ${error.message}` ) throw new Error(`Pre-install action failed: ${error.message}`) } } private async _cleanupFailedInstallation(serviceName: string): Promise { try { const service = await Service.query().where('service_name', serviceName).first() if (service) { service.installation_status = 'error' await service.save() } this.activeInstallations.delete(serviceName) // Ensure any partially created container is removed await this._removeServiceContainer(serviceName) logger.info(`[DockerService] Cleaned up failed installation for ${serviceName}`) } catch (error) { logger.error( `[DockerService] Failed to cleanup installation for ${serviceName}: ${error.message}` ) } } /** * Detect GPU type and toolkit availability. * Primary: Check Docker runtimes via docker.info() (works from inside containers). * Fallback: lspci for host-based installs and AMD detection. */ private async _detectGPUType(): Promise<{ type: 'nvidia' | 'amd' | 'none'; toolkitMissing?: boolean }> { try { // Primary: Check Docker daemon for nvidia runtime (works from inside containers) try { const dockerInfo = await this.docker.info() const runtimes = dockerInfo.Runtimes || {} if ('nvidia' in runtimes) { logger.info('[DockerService] NVIDIA container runtime detected via Docker API') await this._persistGPUType('nvidia') return { type: 'nvidia' } } } catch (error) { logger.warn(`[DockerService] Could not query Docker info for GPU runtimes: ${error.message}`) } // Fallback: lspci for host-based installs (not available inside Docker) const execAsync = promisify(exec) // Check for NVIDIA GPU via lspci try { const { stdout: nvidiaCheck } = await execAsync( 'lspci 2>/dev/null | grep -i nvidia || true' ) if (nvidiaCheck.trim()) { // GPU hardware found but no nvidia runtime — toolkit not installed logger.warn('[DockerService] NVIDIA GPU detected via lspci but NVIDIA Container Toolkit is not installed') return { type: 'none', toolkitMissing: true } } } catch (error) { // lspci not available (likely inside Docker container), continue } // Check for AMD GPU via lspci — restrict to display controller classes to avoid // false positives from AMD CPU host bridges, PCI bridges, and chipset devices. try { const { stdout: amdCheck } = await execAsync( 'lspci 2>/dev/null | grep -iE "VGA|3D controller|Display" | grep -iE "amd|radeon" || true' ) if (amdCheck.trim()) { logger.info('[DockerService] AMD GPU detected via lspci') await this._persistGPUType('amd') return { type: 'amd' } } } catch (error) { // lspci not available, continue } // Last resort: check if we previously detected a GPU and it's likely still present. // This handles cases where live detection fails transiently (e.g., Docker daemon // hiccup, runtime temporarily unavailable) but the hardware hasn't changed. try { const savedType = await KVStore.getValue('gpu.type') if (savedType === 'nvidia' || savedType === 'amd') { logger.info(`[DockerService] No GPU detected live, but KV store has '${savedType}' from previous detection. Using saved value.`) return { type: savedType as 'nvidia' | 'amd' } } } catch { // KV store not available, continue } logger.info('[DockerService] No GPU detected') return { type: 'none' } } catch (error) { logger.warn(`[DockerService] Error detecting GPU type: ${error.message}`) return { type: 'none' } } } private async _persistGPUType(type: 'nvidia' | 'amd'): Promise { try { await KVStore.setValue('gpu.type', type) logger.info(`[DockerService] Persisted GPU type '${type}' to KV store`) } catch (error) { logger.warn(`[DockerService] Failed to persist GPU type: ${error.message}`) } } /** * Discover AMD GPU DRI devices dynamically. * Returns an array of device configurations for Docker. */ // private async _discoverAMDDevices(): Promise< // Array<{ PathOnHost: string; PathInContainer: string; CgroupPermissions: string }> // > { // try { // const devices: Array<{ // PathOnHost: string // PathInContainer: string // CgroupPermissions: string // }> = [] // // Always add /dev/kfd (Kernel Fusion Driver) // devices.push({ // PathOnHost: '/dev/kfd', // PathInContainer: '/dev/kfd', // CgroupPermissions: 'rwm', // }) // // Discover DRI devices in /dev/dri/ // try { // const driDevices = await readdir('/dev/dri') // for (const device of driDevices) { // const devicePath = `/dev/dri/${device}` // devices.push({ // PathOnHost: devicePath, // PathInContainer: devicePath, // CgroupPermissions: 'rwm', // }) // } // logger.info( // `[DockerService] Discovered ${driDevices.length} DRI devices: ${driDevices.join(', ')}` // ) // } catch (error) { // logger.warn(`[DockerService] Could not read /dev/dri directory: ${error.message}`) // // Fallback to common device names if directory read fails // const fallbackDevices = ['card0', 'renderD128'] // for (const device of fallbackDevices) { // devices.push({ // PathOnHost: `/dev/dri/${device}`, // PathInContainer: `/dev/dri/${device}`, // CgroupPermissions: 'rwm', // }) // } // logger.info(`[DockerService] Using fallback DRI devices: ${fallbackDevices.join(', ')}`) // } // return devices // } catch (error) { // logger.error(`[DockerService] Error discovering AMD devices: ${error.message}`) // return [] // } // } /** * Update a service container to a new image version while preserving volumes and data. * Includes automatic rollback if the new container fails health checks. */ async updateContainer( serviceName: string, targetVersion: string ): Promise<{ success: boolean; message: string }> { try { const service = await Service.query().where('service_name', serviceName).first() if (!service) { return { success: false, message: `Service ${serviceName} not found` } } if (!service.installed) { return { success: false, message: `Service ${serviceName} is not installed` } } if (this.activeInstallations.has(serviceName)) { return { success: false, message: `Service ${serviceName} already has an operation in progress` } } this.activeInstallations.add(serviceName) // Compute new image string const currentImage = service.container_image const imageBase = currentImage.includes(':') ? currentImage.substring(0, currentImage.lastIndexOf(':')) : currentImage const newImage = `${imageBase}:${targetVersion}` // Step 1: Pull new image this._broadcast(serviceName, 'update-pulling', `Pulling image ${newImage}...`) const pullStream = await this.docker.pull(newImage) await new Promise((res) => this.docker.modem.followProgress(pullStream, res)) // Step 2: Find and stop existing container this._broadcast(serviceName, 'update-stopping', `Stopping current container...`) const containers = await this.docker.listContainers({ all: true }) const existingContainer = containers.find((c) => c.Names.includes(`/${serviceName}`)) if (!existingContainer) { this.activeInstallations.delete(serviceName) return { success: false, message: `Container for ${serviceName} not found` } } const oldContainer = this.docker.getContainer(existingContainer.Id) // Inspect to capture full config before stopping const inspectData = await oldContainer.inspect() if (existingContainer.State === 'running') { await oldContainer.stop({ t: 15 }) } // Step 3: Rename old container as safety net const oldName = `${serviceName}_old` await oldContainer.rename({ name: oldName }) // Step 4: Create new container with inspected config + new image this._broadcast(serviceName, 'update-creating', `Creating updated container...`) const hostConfig = inspectData.HostConfig || {} // Re-run GPU detection for Ollama so updates always reflect the current GPU environment. // This handles cases where the NVIDIA Container Toolkit was installed after the initial // Ollama setup, and ensures DeviceRequests are always built fresh rather than relying on // round-tripping the Docker inspect format back into the create API. let updatedDeviceRequests: any[] | undefined = undefined if (serviceName === SERVICE_NAMES.OLLAMA) { const gpuResult = await this._detectGPUType() if (gpuResult.type === 'nvidia') { this._broadcast( serviceName, 'update-gpu-config', `NVIDIA container runtime detected. Configuring updated container with GPU support...` ) updatedDeviceRequests = [ { Driver: 'nvidia', Count: -1, Capabilities: [['gpu']], }, ] } else if (gpuResult.type === 'amd') { this._broadcast( serviceName, 'update-gpu-config', `AMD GPU detected. ROCm GPU acceleration is not yet supported — using CPU-only configuration.` ) } else if (gpuResult.toolkitMissing) { this._broadcast( serviceName, 'update-gpu-config', `NVIDIA GPU detected but NVIDIA Container Toolkit is not installed. Using CPU-only configuration. Install the toolkit and reinstall AI Assistant for GPU acceleration: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html` ) } else { this._broadcast(serviceName, 'update-gpu-config', `No GPU detected. Using CPU-only configuration.`) } } const newContainerConfig: any = { Image: newImage, name: serviceName, Env: inspectData.Config?.Env || undefined, Cmd: inspectData.Config?.Cmd || undefined, ExposedPorts: inspectData.Config?.ExposedPorts || undefined, WorkingDir: inspectData.Config?.WorkingDir || undefined, User: inspectData.Config?.User || undefined, HostConfig: { Binds: hostConfig.Binds || undefined, PortBindings: hostConfig.PortBindings || undefined, RestartPolicy: hostConfig.RestartPolicy || undefined, DeviceRequests: serviceName === SERVICE_NAMES.OLLAMA ? updatedDeviceRequests : (hostConfig.DeviceRequests || undefined), Devices: hostConfig.Devices || undefined, }, NetworkingConfig: inspectData.NetworkSettings?.Networks ? { EndpointsConfig: Object.fromEntries( Object.keys(inspectData.NetworkSettings.Networks).map((net) => [net, {}]) ), } : undefined, } // Remove undefined values from HostConfig Object.keys(newContainerConfig.HostConfig).forEach((key) => { if (newContainerConfig.HostConfig[key] === undefined) { delete newContainerConfig.HostConfig[key] } }) let newContainer: any try { newContainer = await this.docker.createContainer(newContainerConfig) } catch (createError) { // Rollback: rename old container back this._broadcast(serviceName, 'update-rollback', `Failed to create new container: ${createError.message}. Rolling back...`) const rollbackContainer = this.docker.getContainer((await this.docker.listContainers({ all: true })).find((c) => c.Names.includes(`/${oldName}`))!.Id) await rollbackContainer.rename({ name: serviceName }) await rollbackContainer.start() this.activeInstallations.delete(serviceName) return { success: false, message: `Failed to create updated container: ${createError.message}` } } // Step 5: Start new container this._broadcast(serviceName, 'update-starting', `Starting updated container...`) await newContainer.start() // Step 6: Health check — verify container stays running for 5 seconds await new Promise((resolve) => setTimeout(resolve, 5000)) const newContainerInfo = await newContainer.inspect() if (newContainerInfo.State?.Running) { // Healthy — clean up old container try { const oldContainerRef = this.docker.getContainer( (await this.docker.listContainers({ all: true })).find((c) => c.Names.includes(`/${oldName}`) )?.Id || '' ) await oldContainerRef.remove({ force: true }) } catch { // Old container may already be gone } // Update DB service.container_image = newImage service.available_update_version = null await service.save() this.activeInstallations.delete(serviceName) this._broadcast( serviceName, 'update-complete', `Successfully updated ${serviceName} to ${targetVersion}` ) return { success: true, message: `Service ${serviceName} updated to ${targetVersion}` } } else { // Unhealthy — rollback this._broadcast( serviceName, 'update-rollback', `New container failed health check. Rolling back to previous version...` ) try { await newContainer.stop({ t: 5 }).catch(() => {}) await newContainer.remove({ force: true }) } catch { // Best effort cleanup } // Restore old container const oldContainers = await this.docker.listContainers({ all: true }) const oldRef = oldContainers.find((c) => c.Names.includes(`/${oldName}`)) if (oldRef) { const rollbackContainer = this.docker.getContainer(oldRef.Id) await rollbackContainer.rename({ name: serviceName }) await rollbackContainer.start() } this.activeInstallations.delete(serviceName) return { success: false, message: `Update failed: new container did not stay running. Rolled back to previous version.`, } } } catch (error) { this.activeInstallations.delete(serviceName) this._broadcast( serviceName, 'update-rollback', `Update failed: ${error.message}` ) logger.error(`[DockerService] Update failed for ${serviceName}: ${error.message}`) return { success: false, message: `Update failed: ${error.message}` } } } private _broadcast(service: string, status: string, message: string) { transmit.broadcast(BROADCAST_CHANNELS.SERVICE_INSTALLATION, { service_name: service, timestamp: new Date().toISOString(), status, message, }) logger.info(`[DockerService] [${service}] ${status}: ${message}`) } private _parseContainerConfig(containerConfig: any): any { if (!containerConfig) { return {} } try { // Handle the case where containerConfig is returned as an object by DB instead of a string let toParse = containerConfig if (typeof containerConfig === 'object') { toParse = JSON.stringify(containerConfig) } return JSON.parse(toParse) } catch (error) { logger.error(`Failed to parse container configuration: ${error.message}`) throw new Error(`Invalid container configuration: ${error.message}`) } } /** * Check if a Docker image exists locally. * @param imageName - The name and tag of the image (e.g., "nginx:latest") * @returns - True if the image exists locally, false otherwise */ private async _checkImageExists(imageName: string): Promise { try { const images = await this.docker.listImages() // Check if any image has a RepoTag that matches the requested image return images.some((image) => image.RepoTags && image.RepoTags.includes(imageName)) } catch (error) { logger.warn(`Error checking if image exists: ${error.message}`) // If run into an error, assume the image does not exist return false } } } ================================================ FILE: admin/app/services/docs_service.ts ================================================ import Markdoc from '@markdoc/markdoc' import { streamToString } from '../../util/docs.js' import { getFile, getFileStatsIfExists, listDirectoryContentsRecursive } from '../utils/fs.js' import path from 'path' import InternalServerErrorException from '#exceptions/internal_server_error_exception' import logger from '@adonisjs/core/services/logger' export class DocsService { private docsPath = path.join(process.cwd(), 'docs') private static readonly DOC_ORDER: Record = { 'home': 1, 'getting-started': 2, 'use-cases': 3, 'faq': 4, 'about': 5, 'release-notes': 6, } async getDocs() { const contents = await listDirectoryContentsRecursive(this.docsPath) const files: Array<{ title: string; slug: string }> = [] for (const item of contents) { if (item.type === 'file' && item.name.endsWith('.md')) { const cleaned = this.prettify(item.name) files.push({ title: cleaned, slug: item.name.replace(/\.md$/, ''), }) } } return files.sort((a, b) => { const orderA = DocsService.DOC_ORDER[a.slug] ?? 999 const orderB = DocsService.DOC_ORDER[b.slug] ?? 999 return orderA - orderB }) } parse(content: string) { try { const ast = Markdoc.parse(content) const config = this.getConfig() const errors = Markdoc.validate(ast, config) // Filter out attribute-undefined errors which may be caused by emojis and special characters const criticalErrors = errors.filter((e) => e.error.id !== 'attribute-undefined') if (criticalErrors.length > 0) { logger.error('Markdoc validation errors:', errors.map((e) => JSON.stringify(e.error)).join(', ')) throw new Error('Markdoc validation failed') } return Markdoc.transform(ast, config) } catch (error) { logger.error('Error parsing Markdoc content:', error) throw new InternalServerErrorException(`Error parsing content: ${(error as Error).message}`) } } async parseFile(_filename: string) { try { if (!_filename) { throw new Error('Filename is required') } const filename = _filename.endsWith('.md') ? _filename : `${_filename}.md` // Prevent path traversal — resolved path must stay within the docs directory const basePath = path.resolve(this.docsPath) const fullPath = path.resolve(path.join(this.docsPath, filename)) if (!fullPath.startsWith(basePath + path.sep)) { throw new Error('Invalid document slug') } const fileExists = await getFileStatsIfExists(fullPath) if (!fileExists) { throw new Error(`File not found: ${filename}`) } const fileStream = await getFile(fullPath, 'stream') if (!fileStream) { throw new Error(`Failed to read file stream: ${filename}`) } const content = await streamToString(fileStream) return this.parse(content) } catch (error) { throw new InternalServerErrorException(`Error parsing file: ${(error as Error).message}`) } } private static readonly TITLE_OVERRIDES: Record = { 'faq': 'FAQ', } private prettify(filename: string) { const slug = filename.replace(/\.md$/, '') if (DocsService.TITLE_OVERRIDES[slug]) { return DocsService.TITLE_OVERRIDES[slug] } // Remove hyphens, underscores, and file extension const cleaned = slug.replace(/_/g, ' ').replace(/-/g, ' ') // Convert to Title Case const titleCased = cleaned.replace(/\b\w/g, (char) => char.toUpperCase()) return titleCased.charAt(0).toUpperCase() + titleCased.slice(1) } private getConfig() { return { tags: { callout: { render: 'Callout', attributes: { type: { type: String, default: 'info', matches: ['info', 'warning', 'error', 'success'], }, title: { type: String, }, }, }, }, nodes: { heading: { render: 'Heading', attributes: { level: { type: Number, required: true }, id: { type: String }, }, }, list: { render: 'List', attributes: { ordered: { type: Boolean }, start: { type: Number }, }, }, list_item: { render: 'ListItem', attributes: { marker: { type: String }, className: { type: String }, class: { type: String } } }, table: { render: 'Table', }, thead: { render: 'TableHead', }, tbody: { render: 'TableBody', }, tr: { render: 'TableRow', }, th: { render: 'TableHeader', }, td: { render: 'TableCell', }, paragraph: { render: 'Paragraph', }, image: { render: 'Image', attributes: { src: { type: String, required: true }, alt: { type: String }, title: { type: String }, }, }, link: { render: 'Link', attributes: { href: { type: String, required: true }, title: { type: String }, }, }, fence: { render: 'CodeBlock', attributes: { content: { type: String }, language: { type: String }, }, }, code: { render: 'InlineCode', attributes: { content: { type: String }, }, }, hr: { render: 'HorizontalRule', }, }, } } } ================================================ FILE: admin/app/services/download_service.ts ================================================ import { inject } from '@adonisjs/core' import { QueueService } from './queue_service.js' import { RunDownloadJob } from '#jobs/run_download_job' import { DownloadModelJob } from '#jobs/download_model_job' import { DownloadJobWithProgress } from '../../types/downloads.js' import { normalize } from 'path' @inject() export class DownloadService { constructor(private queueService: QueueService) {} async listDownloadJobs(filetype?: string): Promise { // Get regular file download jobs (zim, map, etc.) const queue = this.queueService.getQueue(RunDownloadJob.queue) const fileJobs = await queue.getJobs(['waiting', 'active', 'delayed', 'failed']) const fileDownloads = fileJobs.map((job) => ({ jobId: job.id!.toString(), url: job.data.url, progress: parseInt(job.progress.toString(), 10), filepath: normalize(job.data.filepath), filetype: job.data.filetype, status: (job.failedReason ? 'failed' : 'active') as 'active' | 'failed', failedReason: job.failedReason || undefined, })) // Get Ollama model download jobs const modelQueue = this.queueService.getQueue(DownloadModelJob.queue) const modelJobs = await modelQueue.getJobs(['waiting', 'active', 'delayed', 'failed']) const modelDownloads = modelJobs.map((job) => ({ jobId: job.id!.toString(), url: job.data.modelName || 'Unknown Model', // Use model name as url progress: parseInt(job.progress.toString(), 10), filepath: job.data.modelName || 'Unknown Model', // Use model name as filepath filetype: 'model', status: (job.failedReason ? 'failed' : 'active') as 'active' | 'failed', failedReason: job.failedReason || undefined, })) const allDownloads = [...fileDownloads, ...modelDownloads] // Filter by filetype if specified const filtered = allDownloads.filter((job) => !filetype || job.filetype === filetype) // Sort: active downloads first (by progress desc), then failed at the bottom return filtered.sort((a, b) => { if (a.status === 'failed' && b.status !== 'failed') return 1 if (a.status !== 'failed' && b.status === 'failed') return -1 return b.progress - a.progress }) } async removeFailedJob(jobId: string): Promise { for (const queueName of [RunDownloadJob.queue, DownloadModelJob.queue]) { const queue = this.queueService.getQueue(queueName) const job = await queue.getJob(jobId) if (job) { await job.remove() return } } } } ================================================ FILE: admin/app/services/map_service.ts ================================================ import { BaseStylesFile, MapLayer } from '../../types/maps.js' import { DownloadRemoteSuccessCallback, FileEntry, } from '../../types/files.js' import { doResumableDownloadWithRetry } from '../utils/downloads.js' import { extract } from 'tar' import env from '#start/env' import { listDirectoryContentsRecursive, getFileStatsIfExists, deleteFileIfExists, getFile, ensureDirectoryExists, } from '../utils/fs.js' import { join, resolve, sep } from 'path' import urlJoin from 'url-join' import { RunDownloadJob } from '#jobs/run_download_job' import logger from '@adonisjs/core/services/logger' import InstalledResource from '#models/installed_resource' import { CollectionManifestService } from './collection_manifest_service.js' import type { CollectionWithStatus, MapsSpec } from '../../types/collections.js' const BASE_ASSETS_MIME_TYPES = [ 'application/gzip', 'application/x-gzip', 'application/octet-stream', ] const PMTILES_ATTRIBUTION = 'Protomaps © OpenStreetMap' const PMTILES_MIME_TYPES = ['application/vnd.pmtiles', 'application/octet-stream'] interface IMapService { downloadRemoteSuccessCallback: DownloadRemoteSuccessCallback } export class MapService implements IMapService { private readonly mapStoragePath = '/storage/maps' private readonly baseStylesFile = 'nomad-base-styles.json' private readonly basemapsAssetsDir = 'basemaps-assets' private readonly baseAssetsTarFile = 'base-assets.tar.gz' private readonly baseDirPath = join(process.cwd(), this.mapStoragePath) private baseAssetsExistCache: boolean | null = null async listRegions() { const files = (await this.listAllMapStorageItems()).filter( (item) => item.type === 'file' && item.name.endsWith('.pmtiles') ) return { files, } } async downloadBaseAssets(url?: string) { const tempTarPath = join(this.baseDirPath, this.baseAssetsTarFile) const defaultTarFileURL = new URL( this.baseAssetsTarFile, 'https://github.com/Crosstalk-Solutions/project-nomad-maps/raw/refs/heads/master/' ) const resolvedURL = url ? new URL(url) : defaultTarFileURL await doResumableDownloadWithRetry({ url: resolvedURL.toString(), filepath: tempTarPath, timeout: 30000, max_retries: 2, allowedMimeTypes: BASE_ASSETS_MIME_TYPES, onAttemptError(error, attempt) { console.error(`Attempt ${attempt} to download tar file failed: ${error.message}`) }, }) const tarFileBuffer = await getFileStatsIfExists(tempTarPath) if (!tarFileBuffer) { throw new Error(`Failed to download tar file`) } await extract({ cwd: join(process.cwd(), this.mapStoragePath), file: tempTarPath, strip: 1, }) await deleteFileIfExists(tempTarPath) // Invalidate cache since we just downloaded new assets this.baseAssetsExistCache = true return true } async downloadCollection(slug: string): Promise { const manifestService = new CollectionManifestService() const spec = await manifestService.getSpecWithFallback('maps') if (!spec) return null const collection = spec.collections.find((c) => c.slug === slug) if (!collection) return null // Filter out already installed const installed = await InstalledResource.query().where('resource_type', 'map') const installedIds = new Set(installed.map((r) => r.resource_id)) const toDownload = collection.resources.filter((r) => !installedIds.has(r.id)) if (toDownload.length === 0) return null const downloadFilenames: string[] = [] for (const resource of toDownload) { const existing = await RunDownloadJob.getByUrl(resource.url) if (existing) { logger.warn(`[MapService] Download already in progress for URL ${resource.url}, skipping.`) continue } const filename = resource.url.split('/').pop() if (!filename) { logger.warn(`[MapService] Could not determine filename from URL ${resource.url}, skipping.`) continue } downloadFilenames.push(filename) const filepath = join(process.cwd(), this.mapStoragePath, 'pmtiles', filename) await RunDownloadJob.dispatch({ url: resource.url, filepath, timeout: 30000, allowedMimeTypes: PMTILES_MIME_TYPES, forceNew: true, filetype: 'map', resourceMetadata: { resource_id: resource.id, version: resource.version, collection_ref: slug, }, }) } return downloadFilenames.length > 0 ? downloadFilenames : null } async downloadRemoteSuccessCallback(urls: string[], _: boolean) { // Create InstalledResource entries for downloaded map files for (const url of urls) { const filename = url.split('/').pop() if (!filename) continue const parsed = CollectionManifestService.parseMapFilename(filename) if (!parsed) continue const filepath = join(process.cwd(), this.mapStoragePath, 'pmtiles', filename) const stats = await getFileStatsIfExists(filepath) try { const { DateTime } = await import('luxon') await InstalledResource.updateOrCreate( { resource_id: parsed.resource_id, resource_type: 'map' }, { version: parsed.version, url: url, file_path: filepath, file_size_bytes: stats ? Number(stats.size) : null, installed_at: DateTime.now(), } ) logger.info(`[MapService] Created InstalledResource entry for: ${parsed.resource_id}`) } catch (error) { logger.error(`[MapService] Failed to create InstalledResource for ${filename}:`, error) } } } async downloadRemote(url: string): Promise<{ filename: string; jobId?: string }> { const parsed = new URL(url) if (!parsed.pathname.endsWith('.pmtiles')) { throw new Error(`Invalid PMTiles file URL: ${url}. URL must end with .pmtiles`) } const existing = await RunDownloadJob.getByUrl(url) if (existing) { throw new Error(`Download already in progress for URL ${url}`) } const filename = url.split('/').pop() if (!filename) { throw new Error('Could not determine filename from URL') } const filepath = join(process.cwd(), this.mapStoragePath, 'pmtiles', filename) // First, ensure base assets are present - regions depend on them const baseAssetsExist = await this.ensureBaseAssets() if (!baseAssetsExist) { throw new Error( 'Base map assets are missing and could not be downloaded. Please check your connection and try again.' ) } // Parse resource metadata const parsedFilename = CollectionManifestService.parseMapFilename(filename) const resourceMetadata = parsedFilename ? { resource_id: parsedFilename.resource_id, version: parsedFilename.version, collection_ref: null } : undefined // Dispatch background job const result = await RunDownloadJob.dispatch({ url, filepath, timeout: 30000, allowedMimeTypes: PMTILES_MIME_TYPES, forceNew: true, filetype: 'map', resourceMetadata, }) if (!result.job) { throw new Error('Failed to dispatch download job') } logger.info(`[MapService] Dispatched download job ${result.job.id} for URL ${url}`) return { filename, jobId: result.job?.id, } } async downloadRemotePreflight( url: string ): Promise<{ filename: string; size: number } | { message: string }> { try { const parsed = new URL(url) if (!parsed.pathname.endsWith('.pmtiles')) { throw new Error(`Invalid PMTiles file URL: ${url}. URL must end with .pmtiles`) } const filename = url.split('/').pop() if (!filename) { throw new Error('Could not determine filename from URL') } // Perform a HEAD request to get the content length const { default: axios } = await import('axios') const response = await axios.head(url) if (response.status !== 200) { throw new Error(`Failed to fetch file info: ${response.status} ${response.statusText}`) } const contentLength = response.headers['content-length'] const size = contentLength ? parseInt(contentLength, 10) : 0 return { filename, size } } catch (error: any) { return { message: `Preflight check failed: ${error.message}` } } } async generateStylesJSON(host: string | null = null, protocol: string = 'http'): Promise { if (!(await this.checkBaseAssetsExist())) { throw new Error('Base map assets are missing from storage/maps') } const baseStylePath = join(this.baseDirPath, this.baseStylesFile) const baseStyle = await getFile(baseStylePath, 'string') if (!baseStyle) { throw new Error('Base styles file not found in storage/maps') } const rawStyles = JSON.parse(baseStyle.toString()) as BaseStylesFile const regions = (await this.listRegions()).files /** If we have the host, use it to build public URLs, otherwise we'll fallback to defaults * This is mainly useful because we need to know what host the user is accessing from in order to * properly generate URLs in the styles file * e.g. user is accessing from "example.com", but we would by default generate "localhost:8080/..." so maps would * fail to load. */ const sources = this.generateSourcesArray(host, regions, protocol) const baseUrl = this.getPublicFileBaseUrl(host, this.basemapsAssetsDir, protocol) const styles = await this.generateStylesFile( rawStyles, sources, urlJoin(baseUrl, 'sprites/v4/light'), urlJoin(baseUrl, 'fonts/{fontstack}/{range}.pbf') ) return styles } async listCuratedCollections(): Promise { const manifestService = new CollectionManifestService() return manifestService.getMapCollectionsWithStatus() } async fetchLatestCollections(): Promise { const manifestService = new CollectionManifestService() return manifestService.fetchAndCacheSpec('maps') } async ensureBaseAssets(): Promise { const exists = await this.checkBaseAssetsExist() if (exists) { return true } return await this.downloadBaseAssets() } private async checkBaseAssetsExist(useCache: boolean = true): Promise { // Return cached result if available and caching is enabled if (useCache && this.baseAssetsExistCache !== null) { return this.baseAssetsExistCache } await ensureDirectoryExists(this.baseDirPath) const baseStylePath = join(this.baseDirPath, this.baseStylesFile) const basemapsAssetsPath = join(this.baseDirPath, this.basemapsAssetsDir) const [baseStyleExists, basemapsAssetsExists] = await Promise.all([ getFileStatsIfExists(baseStylePath), getFileStatsIfExists(basemapsAssetsPath), ]) const exists = !!baseStyleExists && !!basemapsAssetsExists // update cache this.baseAssetsExistCache = exists return exists } private async listAllMapStorageItems(): Promise { await ensureDirectoryExists(this.baseDirPath) return await listDirectoryContentsRecursive(this.baseDirPath) } private generateSourcesArray(host: string | null, regions: FileEntry[], protocol: string = 'http'): BaseStylesFile['sources'][] { const sources: BaseStylesFile['sources'][] = [] const baseUrl = this.getPublicFileBaseUrl(host, 'pmtiles', protocol) for (const region of regions) { if (region.type === 'file' && region.name.endsWith('.pmtiles')) { // Strip .pmtiles and date suffix (e.g. "alaska_2025-12" -> "alaska") for stable source names const parsed = CollectionManifestService.parseMapFilename(region.name) const regionName = parsed ? parsed.resource_id : region.name.replace('.pmtiles', '') const source: BaseStylesFile['sources'] = {} const sourceUrl = urlJoin(baseUrl, region.name) source[regionName] = { type: 'vector', attribution: PMTILES_ATTRIBUTION, url: `pmtiles://${sourceUrl}`, } sources.push(source) } } return sources } private async generateStylesFile( template: BaseStylesFile, sources: BaseStylesFile['sources'][], sprites: string, glyphs: string ): Promise { const layersTemplates = template.layers.filter((layer) => layer.source) const withoutSources = template.layers.filter((layer) => !layer.source) template.sources = {} // Clear existing sources template.layers = [...withoutSources] // Start with layers that don't depend on sources for (const source of sources) { for (const layerTemplate of layersTemplates) { const layer: MapLayer = { ...layerTemplate, id: `${layerTemplate.id}-${Object.keys(source)[0]}`, type: layerTemplate.type, source: Object.keys(source)[0], } template.layers.push(layer) } template.sources = Object.assign(template.sources, source) } template.sprite = sprites template.glyphs = glyphs return template } async delete(file: string): Promise { let fileName = file if (!fileName.endsWith('.pmtiles')) { fileName += '.pmtiles' } const basePath = resolve(join(this.baseDirPath, 'pmtiles')) const fullPath = resolve(join(basePath, fileName)) // Prevent path traversal — resolved path must stay within the storage directory if (!fullPath.startsWith(basePath + sep)) { throw new Error('Invalid filename') } const exists = await getFileStatsIfExists(fullPath) if (!exists) { throw new Error('not_found') } await deleteFileIfExists(fullPath) // Clean up InstalledResource entry const parsed = CollectionManifestService.parseMapFilename(fileName) if (parsed) { await InstalledResource.query() .where('resource_id', parsed.resource_id) .where('resource_type', 'map') .delete() logger.info(`[MapService] Deleted InstalledResource entry for: ${parsed.resource_id}`) } } /* * Gets the appropriate public URL for a map asset depending on environment */ private getPublicFileBaseUrl(specifiedHost: string | null, childPath: string, protocol: string = 'http'): string { function getHost() { try { const localUrlRaw = env.get('URL') if (!localUrlRaw) return 'localhost' const localUrl = new URL(localUrlRaw) return localUrl.host } catch (error) { return 'localhost' } } const host = specifiedHost || getHost() const withProtocol = host.startsWith('http') ? host : `${protocol}://${host}` const baseUrlPath = process.env.NODE_ENV === 'production' ? childPath : urlJoin(this.mapStoragePath, childPath) const baseUrl = new URL(baseUrlPath, withProtocol).toString() return baseUrl } } ================================================ FILE: admin/app/services/ollama_service.ts ================================================ import { inject } from '@adonisjs/core' import { ChatRequest, Ollama } from 'ollama' import { NomadOllamaModel } from '../../types/ollama.js' import { FALLBACK_RECOMMENDED_OLLAMA_MODELS } from '../../constants/ollama.js' import fs from 'node:fs/promises' import path from 'node:path' import logger from '@adonisjs/core/services/logger' import axios from 'axios' import { DownloadModelJob } from '#jobs/download_model_job' import { SERVICE_NAMES } from '../../constants/service_names.js' import transmit from '@adonisjs/transmit/services/main' import Fuse, { IFuseOptions } from 'fuse.js' import { BROADCAST_CHANNELS } from '../../constants/broadcast.js' import env from '#start/env' import { NOMAD_API_DEFAULT_BASE_URL } from '../../constants/misc.js' const NOMAD_MODELS_API_PATH = '/api/v1/ollama/models' const MODELS_CACHE_FILE = path.join(process.cwd(), 'storage', 'ollama-models-cache.json') const CACHE_MAX_AGE_MS = 24 * 60 * 60 * 1000 // 24 hours @inject() export class OllamaService { private ollama: Ollama | null = null private ollamaInitPromise: Promise | null = null constructor() { } private async _initializeOllamaClient() { if (!this.ollamaInitPromise) { this.ollamaInitPromise = (async () => { const dockerService = new (await import('./docker_service.js')).DockerService() const qdrantUrl = await dockerService.getServiceURL(SERVICE_NAMES.OLLAMA) if (!qdrantUrl) { throw new Error('Ollama service is not installed or running.') } this.ollama = new Ollama({ host: qdrantUrl }) })() } return this.ollamaInitPromise } private async _ensureDependencies() { if (!this.ollama) { await this._initializeOllamaClient() } } /** * Downloads a model from the Ollama service with progress tracking. Where possible, * one should dispatch a background job instead of calling this method directly to avoid long blocking. * @param model Model name to download * @returns Success status and message */ async downloadModel(model: string, progressCallback?: (percent: number) => void): Promise<{ success: boolean; message: string }> { try { await this._ensureDependencies() if (!this.ollama) { throw new Error('Ollama client is not initialized.') } // See if model is already installed const installedModels = await this.getModels() if (installedModels && installedModels.some((m) => m.name === model)) { logger.info(`[OllamaService] Model "${model}" is already installed.`) return { success: true, message: 'Model is already installed.' } } // Returns AbortableAsyncIterator const downloadStream = await this.ollama.pull({ model, stream: true, }) for await (const chunk of downloadStream) { if (chunk.completed && chunk.total) { const percent = ((chunk.completed / chunk.total) * 100).toFixed(2) const percentNum = parseFloat(percent) this.broadcastDownloadProgress(model, percentNum) if (progressCallback) { progressCallback(percentNum) } } } logger.info(`[OllamaService] Model "${model}" downloaded successfully.`) return { success: true, message: 'Model downloaded successfully.' } } catch (error) { logger.error( `[OllamaService] Failed to download model "${model}": ${error instanceof Error ? error.message : error }` ) return { success: false, message: 'Failed to download model.' } } } async dispatchModelDownload(modelName: string): Promise<{ success: boolean; message: string }> { try { logger.info(`[OllamaService] Dispatching model download for ${modelName} via job queue`) await DownloadModelJob.dispatch({ modelName, }) return { success: true, message: 'Model download has been queued successfully. It will start shortly after Ollama and Open WebUI are ready (if not already).', } } catch (error) { logger.error( `[OllamaService] Failed to dispatch model download for ${modelName}: ${error instanceof Error ? error.message : error}` ) return { success: false, message: 'Failed to queue model download. Please try again.', } } } public async getClient() { await this._ensureDependencies() return this.ollama! } public async chat(chatRequest: ChatRequest & { stream?: boolean }) { await this._ensureDependencies() if (!this.ollama) { throw new Error('Ollama client is not initialized.') } return await this.ollama.chat({ ...chatRequest, stream: false, }) } public async chatStream(chatRequest: ChatRequest) { await this._ensureDependencies() if (!this.ollama) { throw new Error('Ollama client is not initialized.') } return await this.ollama.chat({ ...chatRequest, stream: true, }) } public async checkModelHasThinking(modelName: string): Promise { await this._ensureDependencies() if (!this.ollama) { throw new Error('Ollama client is not initialized.') } const modelInfo = await this.ollama.show({ model: modelName, }) return modelInfo.capabilities.includes('thinking') } public async deleteModel(modelName: string) { await this._ensureDependencies() if (!this.ollama) { throw new Error('Ollama client is not initialized.') } return await this.ollama.delete({ model: modelName, }) } public async getModels(includeEmbeddings = false) { await this._ensureDependencies() if (!this.ollama) { throw new Error('Ollama client is not initialized.') } const response = await this.ollama.list() if (includeEmbeddings) { return response.models } // Filter out embedding models return response.models.filter((model) => !model.name.includes('embed')) } async getAvailableModels( { sort, recommendedOnly, query, limit, force }: { sort?: 'pulls' | 'name'; recommendedOnly?: boolean, query: string | null, limit?: number, force?: boolean } = { sort: 'pulls', recommendedOnly: false, query: null, limit: 15, } ): Promise<{ models: NomadOllamaModel[], hasMore: boolean } | null> { try { const models = await this.retrieveAndRefreshModels(sort, force) if (!models) { // If we fail to get models from the API, return the fallback recommended models logger.warn( '[OllamaService] Returning fallback recommended models due to failure in fetching available models' ) return { models: FALLBACK_RECOMMENDED_OLLAMA_MODELS, hasMore: false } } if (!recommendedOnly) { const filteredModels = query ? this.fuseSearchModels(models, query) : models return { models: filteredModels.slice(0, limit || 15), hasMore: filteredModels.length > (limit || 15) } } // If recommendedOnly is true, only return the first three models (if sorted by pulls, these will be the top 3) const sortedByPulls = sort === 'pulls' ? models : this.sortModels(models, 'pulls') const firstThree = sortedByPulls.slice(0, 3) // Only return the first tag of each of these models (should be the most lightweight variant) const recommendedModels = firstThree.map((model) => { return { ...model, tags: model.tags && model.tags.length > 0 ? [model.tags[0]] : [], } }) if (query) { const filteredRecommendedModels = this.fuseSearchModels(recommendedModels, query) return { models: filteredRecommendedModels, hasMore: filteredRecommendedModels.length > (limit || 15) } } return { models: recommendedModels, hasMore: recommendedModels.length > (limit || 15) } } catch (error) { logger.error( `[OllamaService] Failed to get available models: ${error instanceof Error ? error.message : error}` ) return null } } private async retrieveAndRefreshModels( sort?: 'pulls' | 'name', force?: boolean ): Promise { try { if (!force) { const cachedModels = await this.readModelsFromCache() if (cachedModels) { logger.info('[OllamaService] Using cached available models data') return this.sortModels(cachedModels, sort) } } else { logger.info('[OllamaService] Force refresh requested, bypassing cache') } logger.info('[OllamaService] Fetching fresh available models from API') const baseUrl = env.get('NOMAD_API_URL') || NOMAD_API_DEFAULT_BASE_URL const fullUrl = new URL(NOMAD_MODELS_API_PATH, baseUrl).toString() const response = await axios.get(fullUrl) if (!response.data || !Array.isArray(response.data.models)) { logger.warn( `[OllamaService] Invalid response format when fetching available models: ${JSON.stringify(response.data)}` ) return null } const rawModels = response.data.models as NomadOllamaModel[] // Filter out tags where cloud is truthy, then remove models with no remaining tags const noCloud = rawModels .map((model) => ({ ...model, tags: model.tags.filter((tag) => !tag.cloud), })) .filter((model) => model.tags.length > 0) await this.writeModelsToCache(noCloud) return this.sortModels(noCloud, sort) } catch (error) { logger.error( `[OllamaService] Failed to retrieve models from Nomad API: ${error instanceof Error ? error.message : error }` ) return null } } private async readModelsFromCache(): Promise { try { const stats = await fs.stat(MODELS_CACHE_FILE) const cacheAge = Date.now() - stats.mtimeMs if (cacheAge > CACHE_MAX_AGE_MS) { logger.info('[OllamaService] Cache is stale, will fetch fresh data') return null } const cacheData = await fs.readFile(MODELS_CACHE_FILE, 'utf-8') const models = JSON.parse(cacheData) as NomadOllamaModel[] if (!Array.isArray(models)) { logger.warn('[OllamaService] Invalid cache format, will fetch fresh data') return null } return models } catch (error) { // Cache doesn't exist or is invalid if ((error as NodeJS.ErrnoException).code !== 'ENOENT') { logger.warn( `[OllamaService] Error reading cache: ${error instanceof Error ? error.message : error}` ) } return null } } private async writeModelsToCache(models: NomadOllamaModel[]): Promise { try { await fs.mkdir(path.dirname(MODELS_CACHE_FILE), { recursive: true }) await fs.writeFile(MODELS_CACHE_FILE, JSON.stringify(models, null, 2), 'utf-8') logger.info('[OllamaService] Successfully cached available models') } catch (error) { logger.warn( `[OllamaService] Failed to write models cache: ${error instanceof Error ? error.message : error}` ) } } private sortModels(models: NomadOllamaModel[], sort?: 'pulls' | 'name'): NomadOllamaModel[] { if (sort === 'pulls') { // Sort by estimated pulls (it should be a string like "1.2K", "500", "4M" etc.) models.sort((a, b) => { const parsePulls = (pulls: string) => { const multiplier = pulls.endsWith('K') ? 1_000 : pulls.endsWith('M') ? 1_000_000 : pulls.endsWith('B') ? 1_000_000_000 : 1 return parseFloat(pulls) * multiplier } return parsePulls(b.estimated_pulls) - parsePulls(a.estimated_pulls) }) } else if (sort === 'name') { models.sort((a, b) => a.name.localeCompare(b.name)) } // Always sort model.tags by the size field in descending order // Size is a string like '75GB', '8.5GB', '2GB' etc. Smaller models first models.forEach((model) => { if (model.tags && Array.isArray(model.tags)) { model.tags.sort((a, b) => { const parseSize = (size: string) => { const multiplier = size.endsWith('KB') ? 1 / 1_000 : size.endsWith('MB') ? 1 / 1_000_000 : size.endsWith('GB') ? 1 : size.endsWith('TB') ? 1_000 : 0 // Unknown size format return parseFloat(size) * multiplier } return parseSize(a.size) - parseSize(b.size) }) } }) return models } private broadcastDownloadProgress(model: string, percent: number) { transmit.broadcast(BROADCAST_CHANNELS.OLLAMA_MODEL_DOWNLOAD, { model, percent, timestamp: new Date().toISOString(), }) logger.info(`[OllamaService] Download progress for model "${model}": ${percent}%`) } private fuseSearchModels(models: NomadOllamaModel[], query: string): NomadOllamaModel[] { const options: IFuseOptions = { ignoreDiacritics: true, keys: ['name', 'description', 'tags.name'], threshold: 0.3, // lower threshold for stricter matching } const fuse = new Fuse(models, options) return fuse.search(query).map(result => result.item) } } ================================================ FILE: admin/app/services/queue_service.ts ================================================ import { Queue } from 'bullmq' import queueConfig from '#config/queue' export class QueueService { private queues: Map = new Map() getQueue(name: string): Queue { if (!this.queues.has(name)) { const queue = new Queue(name, { connection: queueConfig.connection, }) this.queues.set(name, queue) } return this.queues.get(name)! } async close() { for (const queue of this.queues.values()) { await queue.close() } } } ================================================ FILE: admin/app/services/rag_service.ts ================================================ import { QdrantClient } from '@qdrant/js-client-rest' import { DockerService } from './docker_service.js' import { inject } from '@adonisjs/core' import logger from '@adonisjs/core/services/logger' import { TokenChunker } from '@chonkiejs/core' import sharp from 'sharp' import { deleteFileIfExists, determineFileType, getFile, getFileStatsIfExists, listDirectoryContentsRecursive, ZIM_STORAGE_PATH } from '../utils/fs.js' import { PDFParse } from 'pdf-parse' import { createWorker } from 'tesseract.js' import { fromBuffer } from 'pdf2pic' import { OllamaService } from './ollama_service.js' import { SERVICE_NAMES } from '../../constants/service_names.js' import { removeStopwords } from 'stopword' import { randomUUID } from 'node:crypto' import { join, resolve, sep } from 'node:path' import KVStore from '#models/kv_store' import { ZIMExtractionService } from './zim_extraction_service.js' import { ZIM_BATCH_SIZE } from '../../constants/zim_extraction.js' import { ProcessAndEmbedFileResponse, ProcessZIMFileResponse, RAGResult, RerankedRAGResult } from '../../types/rag.js' @inject() export class RagService { private qdrant: QdrantClient | null = null private qdrantInitPromise: Promise | null = null private embeddingModelVerified = false public static UPLOADS_STORAGE_PATH = 'storage/kb_uploads' public static CONTENT_COLLECTION_NAME = 'nomad_knowledge_base' public static EMBEDDING_MODEL = 'nomic-embed-text:v1.5' public static EMBEDDING_DIMENSION = 768 // Nomic Embed Text v1.5 dimension is 768 public static MODEL_CONTEXT_LENGTH = 2048 // nomic-embed-text has 2K token context public static MAX_SAFE_TOKENS = 1800 // Leave buffer for prefix and tokenization variance public static TARGET_TOKENS_PER_CHUNK = 1700 // Target 1700 tokens per chunk for embedding public static PREFIX_TOKEN_BUDGET = 10 // Reserve ~10 tokens for prefixes public static CHAR_TO_TOKEN_RATIO = 3 // Approximate chars per token // Nomic Embed Text v1.5 uses task-specific prefixes for optimal performance public static SEARCH_DOCUMENT_PREFIX = 'search_document: ' public static SEARCH_QUERY_PREFIX = 'search_query: ' public static EMBEDDING_BATCH_SIZE = 8 // Conservative batch size for low-end hardware constructor( private dockerService: DockerService, private ollamaService: OllamaService ) { } private async _initializeQdrantClient() { if (!this.qdrantInitPromise) { this.qdrantInitPromise = (async () => { const qdrantUrl = await this.dockerService.getServiceURL(SERVICE_NAMES.QDRANT) if (!qdrantUrl) { throw new Error('Qdrant service is not installed or running.') } this.qdrant = new QdrantClient({ url: qdrantUrl }) })() } return this.qdrantInitPromise } private async _ensureDependencies() { if (!this.qdrant) { await this._initializeQdrantClient() } } private async _ensureCollection( collectionName: string, dimensions: number = RagService.EMBEDDING_DIMENSION ) { try { await this._ensureDependencies() const collections = await this.qdrant!.getCollections() const collectionExists = collections.collections.some((col) => col.name === collectionName) if (!collectionExists) { await this.qdrant!.createCollection(collectionName, { vectors: { size: dimensions, distance: 'Cosine', }, }) } // Create payload indexes for faster filtering (idempotent — Qdrant ignores duplicates) await this.qdrant!.createPayloadIndex(collectionName, { field_name: 'source', field_schema: 'keyword', }) await this.qdrant!.createPayloadIndex(collectionName, { field_name: 'content_type', field_schema: 'keyword', }) } catch (error) { logger.error('Error ensuring Qdrant collection:', error) throw error } } /** * Sanitizes text to ensure it's safe for JSON encoding and Qdrant storage. * Removes problematic characters that can cause "unexpected end of hex escape" errors: * - Null bytes (\x00) * - Invalid Unicode sequences * - Control characters (except newlines, tabs, and carriage returns) */ private sanitizeText(text: string): string { return text // Null bytes .replace(/\x00/g, '') // Problematic control characters (keep \n, \r, \t) .replace(/[\x01-\x08\x0B-\x0C\x0E-\x1F\x7F]/g, '') // Invalid Unicode surrogates .replace(/[\uD800-\uDFFF]/g, '') // Trim extra whitespace .trim() } /** * Estimates token count for text. This is a conservative approximation: * - English text: ~1 token per 3 characters * - Adds buffer for special characters and tokenization variance * * Note: This is approximate and realistic english * tokenization is ~4 chars/token, but we use 3 here to be safe. * Actual tokenization may differ, but being * conservative prevents context length errors. */ private estimateTokenCount(text: string): number { // This accounts for special characters, numbers, and punctuation return Math.ceil(text.length / RagService.CHAR_TO_TOKEN_RATIO) } /** * Truncates text to fit within token limit, preserving word boundaries. * Ensures the text + prefix won't exceed the model's context window. */ private truncateToTokenLimit(text: string, maxTokens: number): string { const estimatedTokens = this.estimateTokenCount(text) if (estimatedTokens <= maxTokens) { return text } // Calculate how many characters we can keep using our ratio const maxChars = Math.floor(maxTokens * RagService.CHAR_TO_TOKEN_RATIO) // Truncate at word boundary let truncated = text.substring(0, maxChars) const lastSpace = truncated.lastIndexOf(' ') if (lastSpace > maxChars * 0.8) { // If we found a space in the last 20%, use it truncated = truncated.substring(0, lastSpace) } logger.warn( `[RAG] Truncated text from ${text.length} to ${truncated.length} chars (est. ${estimatedTokens} → ${this.estimateTokenCount(truncated)} tokens)` ) return truncated } /** * Preprocesses a query to improve retrieval by expanding it with context. * This helps match documents even when using different terminology. * TODO: We could probably move this to a separate QueryPreprocessor class if it grows more complex, but for now it's manageable here. */ private static QUERY_EXPANSION_DICTIONARY: Record = { 'bob': 'bug out bag', 'bov': 'bug out vehicle', 'bol': 'bug out location', 'edc': 'every day carry', 'mre': 'meal ready to eat', 'shtf': 'shit hits the fan', 'teotwawki': 'the end of the world as we know it', 'opsec': 'operational security', 'ifak': 'individual first aid kit', 'ghb': 'get home bag', 'ghi': 'get home in', 'wrol': 'without rule of law', 'emp': 'electromagnetic pulse', 'ham': 'ham amateur radio', 'nbr': 'nuclear biological radiological', 'cbrn': 'chemical biological radiological nuclear', 'sar': 'search and rescue', 'comms': 'communications radio', 'fifo': 'first in first out', 'mylar': 'mylar bag food storage', 'paracord': 'paracord 550 cord', 'ferro': 'ferro rod fire starter', 'bivvy': 'bivvy bivy emergency shelter', 'bdu': 'battle dress uniform', 'gmrs': 'general mobile radio service', 'frs': 'family radio service', 'nbc': 'nuclear biological chemical', } private preprocessQuery(query: string): string { let expanded = query.trim() // Expand known domain abbreviations/acronyms const words = expanded.toLowerCase().split(/\s+/) const expansions: string[] = [] for (const word of words) { const cleaned = word.replace(/[^\w]/g, '') if (RagService.QUERY_EXPANSION_DICTIONARY[cleaned]) { expansions.push(RagService.QUERY_EXPANSION_DICTIONARY[cleaned]) } } if (expansions.length > 0) { expanded = `${expanded} ${expansions.join(' ')}` logger.debug(`[RAG] Query expanded with domain terms: "${expanded}"`) } logger.debug(`[RAG] Original query: "${query}"`) logger.debug(`[RAG] Preprocessed query: "${expanded}"`) return expanded } /** * Extract keywords from query for hybrid search */ private extractKeywords(query: string): string[] { const split = query.split(' ') const noStopWords = removeStopwords(split) // Future: This is basic normalization, could be improved with stemming/lemmatization later const keywords = noStopWords .map((word) => word.replace(/[^\w]/g, '').toLowerCase()) .filter((word) => word.length > 2) return [...new Set(keywords)] } public async embedAndStoreText( text: string, metadata: Record = {}, onProgress?: (percent: number) => Promise ): Promise<{ chunks: number } | null> { try { await this._ensureCollection( RagService.CONTENT_COLLECTION_NAME, RagService.EMBEDDING_DIMENSION ) if (!this.embeddingModelVerified) { const allModels = await this.ollamaService.getModels(true) const embeddingModel = allModels.find((model) => model.name === RagService.EMBEDDING_MODEL) if (!embeddingModel) { try { const downloadResult = await this.ollamaService.downloadModel(RagService.EMBEDDING_MODEL) if (!downloadResult.success) { throw new Error(downloadResult.message || 'Unknown error during model download') } } catch (modelError) { logger.error( `[RAG] Embedding model ${RagService.EMBEDDING_MODEL} not found locally and failed to download:`, modelError ) this.embeddingModelVerified = false return null } } this.embeddingModelVerified = true } // TokenChunker uses character-based tokenization (1 char = 1 token) // We need to convert our embedding model's token counts to character counts // since nomic-embed-text tokenizer uses ~3 chars per token const targetCharsPerChunk = Math.floor(RagService.TARGET_TOKENS_PER_CHUNK * RagService.CHAR_TO_TOKEN_RATIO) const overlapChars = Math.floor(150 * RagService.CHAR_TO_TOKEN_RATIO) const chunker = await TokenChunker.create({ chunkSize: targetCharsPerChunk, chunkOverlap: overlapChars, }) const chunkResults = await chunker.chunk(text) if (!chunkResults || chunkResults.length === 0) { throw new Error('No text chunks generated for embedding.') } // Extract text from chunk results const chunks = chunkResults.map((chunk) => chunk.text) const ollamaClient = await this.ollamaService.getClient() // Prepare all chunk texts with prefix and truncation const prefixedChunks: string[] = [] for (let i = 0; i < chunks.length; i++) { let chunkText = chunks[i] // Final safety check: ensure chunk + prefix fits const prefixText = RagService.SEARCH_DOCUMENT_PREFIX const withPrefix = prefixText + chunkText const estimatedTokens = this.estimateTokenCount(withPrefix) if (estimatedTokens > RagService.MAX_SAFE_TOKENS) { const prefixTokens = this.estimateTokenCount(prefixText) const maxTokensForText = RagService.MAX_SAFE_TOKENS - prefixTokens logger.warn( `[RAG] Chunk ${i} estimated at ${estimatedTokens} tokens (${chunkText.length} chars), truncating to ${maxTokensForText} tokens` ) chunkText = this.truncateToTokenLimit(chunkText, maxTokensForText) } prefixedChunks.push(RagService.SEARCH_DOCUMENT_PREFIX + chunkText) } // Batch embed chunks for performance const embeddings: number[][] = [] const batchSize = RagService.EMBEDDING_BATCH_SIZE const totalBatches = Math.ceil(prefixedChunks.length / batchSize) for (let batchIdx = 0; batchIdx < totalBatches; batchIdx++) { const batchStart = batchIdx * batchSize const batch = prefixedChunks.slice(batchStart, batchStart + batchSize) logger.debug(`[RAG] Embedding batch ${batchIdx + 1}/${totalBatches} (${batch.length} chunks)`) const response = await ollamaClient.embed({ model: RagService.EMBEDDING_MODEL, input: batch, }) embeddings.push(...response.embeddings) if (onProgress) { const progress = ((batchStart + batch.length) / prefixedChunks.length) * 100 await onProgress(progress) } } const timestamp = Date.now() const points = chunks.map((chunkText, index) => { // Sanitize text to prevent JSON encoding errors const sanitizedText = this.sanitizeText(chunkText) // Extract keywords from content const contentKeywords = this.extractKeywords(sanitizedText) // For ZIM content, also extract keywords from structural metadata let structuralKeywords: string[] = [] if (metadata.full_title) { structuralKeywords = this.extractKeywords(metadata.full_title as string) } else if (metadata.article_title) { structuralKeywords = this.extractKeywords(metadata.article_title as string) } // Combine and dedup keywords const allKeywords = [...new Set([...structuralKeywords, ...contentKeywords])] logger.debug(`[RAG] Extracted keywords for chunk ${index}: [${allKeywords.join(', ')}]`) if (structuralKeywords.length > 0) { logger.debug(`[RAG] - Structural: [${structuralKeywords.join(', ')}], Content: [${contentKeywords.join(', ')}]`) } // Sanitize source metadata as well const sanitizedSource = typeof metadata.source === 'string' ? this.sanitizeText(metadata.source) : 'unknown' return { id: randomUUID(), // qdrant requires either uuid or unsigned int vector: embeddings[index], payload: { ...metadata, text: sanitizedText, chunk_index: index, total_chunks: chunks.length, keywords: allKeywords.join(' '), // store as space-separated string for text search char_count: sanitizedText.length, created_at: timestamp, source: sanitizedSource }, } }) await this.qdrant!.upsert(RagService.CONTENT_COLLECTION_NAME, { points }) logger.debug(`[RAG] Successfully embedded and stored ${chunks.length} chunks`) logger.debug(`[RAG] First chunk preview: "${chunks[0].substring(0, 100)}..."`) return { chunks: chunks.length } } catch (error) { console.error(error) logger.error('[RAG] Error embedding text:', error) return null } } private async preprocessImage(filebuffer: Buffer): Promise { return await sharp(filebuffer) .grayscale() .normalize() .sharpen() .resize({ width: 2000, fit: 'inside' }) .toBuffer() } private async convertPDFtoImages(filebuffer: Buffer): Promise { const converted = await fromBuffer(filebuffer, { quality: 50, density: 200, format: 'png', }).bulk(-1, { responseType: 'buffer', }) return converted.filter((res) => res.buffer).map((res) => res.buffer!) } private async extractPDFText(filebuffer: Buffer): Promise { const parser = new PDFParse({ data: filebuffer }) const data = await parser.getText() await parser.destroy() return data.text } private async extractTXTText(filebuffer: Buffer): Promise { return filebuffer.toString('utf-8') } private async extractImageText(filebuffer: Buffer): Promise { const worker = await createWorker('eng') const result = await worker.recognize(filebuffer) await worker.terminate() return result.data.text } private async processImageFile(fileBuffer: Buffer): Promise { const preprocessedBuffer = await this.preprocessImage(fileBuffer) return await this.extractImageText(preprocessedBuffer) } /** * Will process the PDF and attempt to extract text. * If the extracted text is minimal, it will fallback to OCR on each page. */ private async processPDFFile(fileBuffer: Buffer): Promise { let extractedText = await this.extractPDFText(fileBuffer) // Check if there was no extracted text or it was very minimal if (!extractedText || extractedText.trim().length < 100) { logger.debug('[RAG] PDF text extraction minimal, attempting OCR on pages') // Convert PDF pages to images for OCR if text extraction was poor const imageBuffers = await this.convertPDFtoImages(fileBuffer) extractedText = '' for (const imgBuffer of imageBuffers) { const preprocessedImg = await this.preprocessImage(imgBuffer) const pageText = await this.extractImageText(preprocessedImg) extractedText += pageText + '\n' } } return extractedText } /** * Process a ZIM file: extract content with metadata and embed each chunk. * Returns early with complete result since ZIM processing is self-contained. * Supports batch processing to prevent lock timeouts on large ZIM files. */ private async processZIMFile( filepath: string, deleteAfterEmbedding: boolean, batchOffset?: number, onProgress?: (percent: number) => Promise ): Promise { const zimExtractionService = new ZIMExtractionService() // Process in batches to avoid lock timeout const startOffset = batchOffset || 0 logger.info( `[RAG] Extracting ZIM content (batch: offset=${startOffset}, size=${ZIM_BATCH_SIZE})` ) const zimChunks = await zimExtractionService.extractZIMContent(filepath, { startOffset, batchSize: ZIM_BATCH_SIZE, }) logger.info( `[RAG] Extracted ${zimChunks.length} chunks from ZIM file with enhanced metadata` ) // Process each chunk individually with its metadata let totalChunks = 0 for (let i = 0; i < zimChunks.length; i++) { const zimChunk = zimChunks[i] const result = await this.embedAndStoreText(zimChunk.text, { source: filepath, content_type: 'zim_article', // Article-level context article_title: zimChunk.articleTitle, article_path: zimChunk.articlePath, // Section-level context section_title: zimChunk.sectionTitle, full_title: zimChunk.fullTitle, hierarchy: zimChunk.hierarchy, section_level: zimChunk.sectionLevel, // Use the same document ID for all chunks from the same article for grouping in search results document_id: zimChunk.documentId, // Archive metadata archive_title: zimChunk.archiveMetadata.title, archive_creator: zimChunk.archiveMetadata.creator, archive_publisher: zimChunk.archiveMetadata.publisher, archive_date: zimChunk.archiveMetadata.date, archive_language: zimChunk.archiveMetadata.language, archive_description: zimChunk.archiveMetadata.description, // Extraction metadata - not overly relevant for search, but could be useful for debugging and future features... extraction_strategy: zimChunk.strategy, }) if (result) { totalChunks += result.chunks } if (onProgress) { await onProgress(((i + 1) / zimChunks.length) * 100) } } // Count unique articles processed in this batch const articlesInBatch = new Set(zimChunks.map((c) => c.documentId)).size const hasMoreBatches = zimChunks.length === ZIM_BATCH_SIZE logger.info( `[RAG] Successfully embedded ${totalChunks} total chunks from ${articlesInBatch} articles (hasMore: ${hasMoreBatches})` ) // Only delete the file when: // 1. deleteAfterEmbedding is true (caller wants deletion) // 2. No more batches remain (this is the final batch) // This prevents race conditions where early batches complete after later ones const shouldDelete = deleteAfterEmbedding && !hasMoreBatches if (shouldDelete) { logger.info(`[RAG] Final batch complete, deleting ZIM file: ${filepath}`) await deleteFileIfExists(filepath) } else if (!hasMoreBatches) { logger.info(`[RAG] Final batch complete, but file deletion was not requested`) } return { success: true, message: hasMoreBatches ? 'ZIM batch processed successfully. More batches remain.' : 'ZIM file processed and embedded successfully with enhanced metadata.', chunks: totalChunks, hasMoreBatches, articlesProcessed: articlesInBatch, } } private async processTextFile(fileBuffer: Buffer): Promise { return await this.extractTXTText(fileBuffer) } private async embedTextAndCleanup( extractedText: string, filepath: string, deleteAfterEmbedding: boolean = false, onProgress?: (percent: number) => Promise ): Promise<{ success: boolean; message: string; chunks?: number }> { if (!extractedText || extractedText.trim().length === 0) { return { success: false, message: 'Process completed succesfully, but no text was found to embed.' } } const embedResult = await this.embedAndStoreText(extractedText, { source: filepath }, onProgress) if (!embedResult) { return { success: false, message: 'Failed to embed and store the extracted text.' } } if (deleteAfterEmbedding) { logger.info(`[RAG] Embedding complete, deleting uploaded file: ${filepath}`) await deleteFileIfExists(filepath) } return { success: true, message: 'File processed and embedded successfully.', chunks: embedResult.chunks, } } /** * Main pipeline to process and embed an uploaded file into the RAG knowledge base. * This includes text extraction, chunking, embedding, and storing in Qdrant. * * Orchestrates file type detection and delegates to specialized processors. * For ZIM files, supports batch processing via batchOffset parameter. */ public async processAndEmbedFile( filepath: string, deleteAfterEmbedding: boolean = false, batchOffset?: number, onProgress?: (percent: number) => Promise ): Promise { try { const fileType = determineFileType(filepath) logger.debug(`[RAG] Processing file: ${filepath} (detected type: ${fileType})`) if (fileType === 'unknown') { return { success: false, message: 'Unsupported file type.' } } // Read file buffer (not needed for ZIM as it reads directly) const fileBuffer = fileType !== 'zim' ? await getFile(filepath, 'buffer') : null if (fileType !== 'zim' && !fileBuffer) { return { success: false, message: 'Failed to read the uploaded file.' } } // Process based on file type // ZIM files are handled specially since they have their own embedding workflow if (fileType === 'zim') { return await this.processZIMFile(filepath, deleteAfterEmbedding, batchOffset, onProgress) } // Extract text based on file type // Report ~10% when extraction begins; actual embedding progress follows via callback if (onProgress) await onProgress(10) let extractedText: string switch (fileType) { case 'image': extractedText = await this.processImageFile(fileBuffer!) break case 'pdf': extractedText = await this.processPDFFile(fileBuffer!) break case 'text': default: extractedText = await this.processTextFile(fileBuffer!) break } // Extraction done — scale remaining embedding progress from 15% to 100% if (onProgress) await onProgress(15) const scaledProgress = onProgress ? (p: number) => onProgress(15 + p * 0.85) : undefined // Embed extracted text and cleanup return await this.embedTextAndCleanup(extractedText, filepath, deleteAfterEmbedding, scaledProgress) } catch (error) { logger.error('[RAG] Error processing and embedding file:', error) return { success: false, message: 'Error processing and embedding file.' } } } /** * Search for documents similar to the query text in the Qdrant knowledge base. * Uses a hybrid approach combining semantic similarity and keyword matching. * Implements adaptive thresholds and result reranking for optimal retrieval. * @param query - The search query text * @param limit - Maximum number of results to return (default: 5) * @param scoreThreshold - Minimum similarity score threshold (default: 0.3, much lower than before) * @returns Array of relevant text chunks with their scores */ public async searchSimilarDocuments( query: string, limit: number = 5, scoreThreshold: number = 0.3 // Lower default threshold - was 0.7, now 0.3 ): Promise }>> { try { logger.debug(`[RAG] Starting similarity search for query: "${query}"`) await this._ensureCollection( RagService.CONTENT_COLLECTION_NAME, RagService.EMBEDDING_DIMENSION ) // Check if collection has any points const collectionInfo = await this.qdrant!.getCollection(RagService.CONTENT_COLLECTION_NAME) const pointCount = collectionInfo.points_count || 0 logger.debug(`[RAG] Knowledge base contains ${pointCount} document chunks`) if (pointCount === 0) { logger.debug('[RAG] Knowledge base is empty. Could not perform search.') return [] } if (!this.embeddingModelVerified) { const allModels = await this.ollamaService.getModels(true) const embeddingModel = allModels.find((model) => model.name === RagService.EMBEDDING_MODEL) if (!embeddingModel) { logger.warn( `[RAG] ${RagService.EMBEDDING_MODEL} not found. Cannot perform similarity search.` ) this.embeddingModelVerified = false return [] } this.embeddingModelVerified = true } // Preprocess query for better matching const processedQuery = this.preprocessQuery(query) const keywords = this.extractKeywords(processedQuery) logger.debug(`[RAG] Extracted keywords: [${keywords.join(', ')}]`) // Generate embedding for the query with search_query prefix const ollamaClient = await this.ollamaService.getClient() // Ensure query doesn't exceed token limit const prefixTokens = this.estimateTokenCount(RagService.SEARCH_QUERY_PREFIX) const maxQueryTokens = RagService.MAX_SAFE_TOKENS - prefixTokens const truncatedQuery = this.truncateToTokenLimit(processedQuery, maxQueryTokens) const prefixedQuery = RagService.SEARCH_QUERY_PREFIX + truncatedQuery logger.debug(`[RAG] Generating embedding with prefix: "${RagService.SEARCH_QUERY_PREFIX}"`) // Validate final token count const queryTokenCount = this.estimateTokenCount(prefixedQuery) if (queryTokenCount > RagService.MAX_SAFE_TOKENS) { logger.error( `[RAG] Query too long even after truncation: ${queryTokenCount} tokens (max: ${RagService.MAX_SAFE_TOKENS})` ) return [] } const response = await ollamaClient.embed({ model: RagService.EMBEDDING_MODEL, input: [prefixedQuery], }) // Perform semantic search with a higher limit to enable reranking const searchLimit = limit * 3 // Get more results for reranking logger.debug( `[RAG] Searching for top ${searchLimit} semantic matches (threshold: ${scoreThreshold})` ) const searchResults = await this.qdrant!.search(RagService.CONTENT_COLLECTION_NAME, { vector: response.embeddings[0], limit: searchLimit, score_threshold: scoreThreshold, with_payload: true, }) logger.debug(`[RAG] Found ${searchResults.length} results above threshold ${scoreThreshold}`) // Map results with metadata for reranking const resultsWithMetadata: RAGResult[] = searchResults.map((result) => ({ text: (result.payload?.text as string) || '', score: result.score, keywords: (result.payload?.keywords as string) || '', chunk_index: (result.payload?.chunk_index as number) || 0, created_at: (result.payload?.created_at as number) || 0, // Enhanced ZIM metadata (likely be undefined for non-ZIM content) article_title: result.payload?.article_title as string | undefined, section_title: result.payload?.section_title as string | undefined, full_title: result.payload?.full_title as string | undefined, hierarchy: result.payload?.hierarchy as string | undefined, document_id: result.payload?.document_id as string | undefined, content_type: result.payload?.content_type as string | undefined, source: result.payload?.source as string | undefined, })) const rerankedResults = this.rerankResults(resultsWithMetadata, keywords, query) logger.debug(`[RAG] Top 3 results after reranking:`) rerankedResults.slice(0, 3).forEach((result, idx) => { logger.debug( `[RAG] ${idx + 1}. Score: ${result.finalScore.toFixed(4)} (semantic: ${result.score.toFixed(4)}) - "${result.text.substring(0, 100)}..."` ) }) // Apply source diversity penalty to avoid all results from the same document const diverseResults = this.applySourceDiversity(rerankedResults) // Return top N results with enhanced metadata return diverseResults.slice(0, limit).map((result) => ({ text: result.text, score: result.finalScore, metadata: { chunk_index: result.chunk_index, created_at: result.created_at, semantic_score: result.score, // Enhanced ZIM metadata (likely be undefined for non-ZIM content) article_title: result.article_title, section_title: result.section_title, full_title: result.full_title, hierarchy: result.hierarchy, document_id: result.document_id, content_type: result.content_type, }, })) } catch (error) { logger.error('[RAG] Error searching similar documents:', error) return [] } } /** * Rerank search results using hybrid scoring that combines: * 1. Semantic similarity score (primary signal) * 2. Keyword overlap bonus (conservative, quality-gated) * 3. Direct term matches (conservative) * * Tries to boost only already-relevant results, not promote * low-quality results just because they have keyword matches. * * Future: this is a decent feature-based approach, but we could * switch to a python-based reranker in the future if the benefits * outweigh the overhead. */ private rerankResults( results: Array, queryKeywords: string[], originalQuery: string ): Array { return results .map((result) => { let finalScore = result.score // Quality gate: Only apply boosts if semantic score is reasonable // Try to prevent promoting irrelevant results that just happen to have keyword matches const MIN_SEMANTIC_THRESHOLD = 0.35 if (result.score < MIN_SEMANTIC_THRESHOLD) { // For low-scoring results, use semantic score as-is // This prevents false positives from keyword gaming logger.debug( `[RAG] Skipping boost for low semantic score: ${result.score.toFixed(3)} (threshold: ${MIN_SEMANTIC_THRESHOLD})` ) return { ...result, finalScore, } } // Boost score based on keyword overlap (diminishing returns - overlap goes down, so does boost) const docKeywords = result.keywords .toLowerCase() .split(' ') .filter((k) => k.length > 0) const matchingKeywords = queryKeywords.filter( (kw) => docKeywords.includes(kw.toLowerCase()) || result.text.toLowerCase().includes(kw.toLowerCase()) ) const keywordOverlap = matchingKeywords.length / Math.max(queryKeywords.length, 1) // Use square root for diminishing returns: 100% overlap = sqrt(1.0) = 1.0, 25% = 0.5 // Then scale conservatively (max 10% boost instead of 20%) const keywordBoost = Math.sqrt(keywordOverlap) * 0.1 * result.score if (keywordOverlap > 0) { logger.debug( `[RAG] Keyword overlap: ${matchingKeywords.length}/${queryKeywords.length} - Boost: ${keywordBoost.toFixed(3)}` ) } // Boost if original query terms appear in text (case-insensitive) // Scale boost proportionally to base score to avoid over-promoting weak matches const queryTerms = originalQuery .toLowerCase() .split(/\s+/) .filter((t) => t.length > 3) const directMatches = queryTerms.filter((term) => result.text.toLowerCase().includes(term) ).length if (queryTerms.length > 0) { const directMatchRatio = directMatches / queryTerms.length // Conservative boost: max 7.5% of the base score const directMatchBoost = Math.sqrt(directMatchRatio) * 0.075 * result.score if (directMatches > 0) { logger.debug( `[RAG] Direct term matches: ${directMatches}/${queryTerms.length} - Boost: ${directMatchBoost.toFixed(3)}` ) finalScore += directMatchBoost } } finalScore = Math.min(1.0, finalScore + keywordBoost) return { ...result, finalScore, } }) .sort((a, b) => b.finalScore - a.finalScore) } /** * Applies a diversity penalty so results from the same source are down-weighted. * Uses greedy selection: for each result, apply 0.85^n penalty where n is the * number of results already selected from the same source. */ private applySourceDiversity( results: Array ) { const sourceCounts = new Map() const DIVERSITY_PENALTY = 0.85 return results .map((result) => { const sourceKey = result.document_id || result.source || 'unknown' const count = sourceCounts.get(sourceKey) || 0 const penalty = Math.pow(DIVERSITY_PENALTY, count) const diverseScore = result.finalScore * penalty sourceCounts.set(sourceKey, count + 1) if (count > 0) { logger.debug( `[RAG] Source diversity penalty for "${sourceKey}": ${result.finalScore.toFixed(4)} → ${diverseScore.toFixed(4)} (seen ${count}x)` ) } return { ...result, finalScore: diverseScore } }) .sort((a, b) => b.finalScore - a.finalScore) } /** * Retrieve all unique source files that have been stored in the knowledge base. * @returns Array of unique full source paths */ public async getStoredFiles(): Promise { try { await this._ensureCollection( RagService.CONTENT_COLLECTION_NAME, RagService.EMBEDDING_DIMENSION ) const sources = new Set() let offset: string | number | null | Record = null const batchSize = 100 // Scroll through all points in the collection (only fetch source field) do { const scrollResult = await this.qdrant!.scroll(RagService.CONTENT_COLLECTION_NAME, { limit: batchSize, offset: offset, with_payload: ['source'], with_vector: false, }) // Extract unique source values from payloads scrollResult.points.forEach((point) => { const source = point.payload?.source if (source && typeof source === 'string') { sources.add(source) } }) offset = scrollResult.next_page_offset || null } while (offset !== null) return Array.from(sources) } catch (error) { logger.error('Error retrieving stored files:', error) return [] } } /** * Delete all Qdrant points associated with a given source path and remove * the corresponding file from disk if it lives under the uploads directory. * @param source - Full source path as stored in Qdrant payloads */ public async deleteFileBySource(source: string): Promise<{ success: boolean; message: string }> { try { await this._ensureCollection( RagService.CONTENT_COLLECTION_NAME, RagService.EMBEDDING_DIMENSION ) await this.qdrant!.delete(RagService.CONTENT_COLLECTION_NAME, { filter: { must: [{ key: 'source', match: { value: source } }], }, }) logger.info(`[RAG] Deleted all points for source: ${source}`) /** Delete the physical file only if it lives inside the uploads directory. * resolve() normalises path traversal sequences (e.g. "/../..") before the * check to prevent path traversal vulns * The trailing sep is to ensure a prefix like "kb_uploads_{something_incorrect}" can't slip through. */ const uploadsAbsPath = join(process.cwd(), RagService.UPLOADS_STORAGE_PATH) const resolvedSource = resolve(source) if (resolvedSource.startsWith(uploadsAbsPath + sep)) { await deleteFileIfExists(resolvedSource) logger.info(`[RAG] Deleted uploaded file from disk: ${resolvedSource}`) } else { logger.warn(`[RAG] File was removed from knowledge base but doesn't live in Nomad's uploads directory, so it can't be safely removed. Skipping deletion of physical file...`) } return { success: true, message: 'File removed from knowledge base.' } } catch (error) { logger.error('[RAG] Error deleting file from knowledge base:', error) return { success: false, message: 'Error deleting file from knowledge base.' } } } public async discoverNomadDocs(force?: boolean): Promise<{ success: boolean; message: string }> { try { const README_PATH = join(process.cwd(), 'README.md') const DOCS_DIR = join(process.cwd(), 'docs') const alreadyEmbeddedRaw = await KVStore.getValue('rag.docsEmbedded') if (alreadyEmbeddedRaw && !force) { logger.info('[RAG] Nomad docs have already been discovered and queued. Skipping.') return { success: true, message: 'Nomad docs have already been discovered and queued. Skipping.' } } const filesToEmbed: Array<{ path: string; source: string }> = [] const readmeExists = await getFileStatsIfExists(README_PATH) if (readmeExists) { filesToEmbed.push({ path: README_PATH, source: 'README.md' }) } const dirContents = await listDirectoryContentsRecursive(DOCS_DIR) for (const entry of dirContents) { if (entry.type === 'file') { filesToEmbed.push({ path: entry.key, source: join('docs', entry.name) }) } } logger.info(`[RAG] Discovered ${filesToEmbed.length} Nomad doc files to embed`) // Import EmbedFileJob dynamically to avoid circular dependencies const { EmbedFileJob } = await import('#jobs/embed_file_job') // Dispatch an EmbedFileJob for each discovered file for (const fileInfo of filesToEmbed) { try { logger.info(`[RAG] Dispatching embed job for: ${fileInfo.source}`) await EmbedFileJob.dispatch({ filePath: fileInfo.path, fileName: fileInfo.source, }) logger.info(`[RAG] Successfully dispatched job for ${fileInfo.source}`) } catch (fileError) { logger.error( `[RAG] Error dispatching job for file ${fileInfo.source}:`, fileError ) } } // Update KV store to mark docs as discovered so we don't redo this unnecessarily await KVStore.setValue('rag.docsEmbedded', true) return { success: true, message: `Nomad docs discovery completed. Dispatched ${filesToEmbed.length} embedding jobs.` } } catch (error) { logger.error('Error discovering Nomad docs:', error) return { success: false, message: 'Error discovering Nomad docs.' } } } /** * Scans the knowledge base storage directories and syncs with Qdrant. * Identifies files that exist in storage but haven't been embedded yet, * and dispatches EmbedFileJob for each missing file. * * @returns Object containing success status, message, and counts of scanned/queued files */ public async scanAndSyncStorage(): Promise<{ success: boolean message: string filesScanned?: number filesQueued?: number }> { try { logger.info('[RAG] Starting knowledge base sync scan') const KB_UPLOADS_PATH = join(process.cwd(), RagService.UPLOADS_STORAGE_PATH) const ZIM_PATH = join(process.cwd(), ZIM_STORAGE_PATH) const filesInStorage: string[] = [] // Force resync of Nomad docs await this.discoverNomadDocs(true).catch((error) => { logger.error('[RAG] Error during Nomad docs discovery in sync process:', error) }) // Scan kb_uploads directory try { const kbContents = await listDirectoryContentsRecursive(KB_UPLOADS_PATH) kbContents.forEach((entry) => { if (entry.type === 'file') { filesInStorage.push(entry.key) } }) logger.debug(`[RAG] Found ${kbContents.length} files in ${RagService.UPLOADS_STORAGE_PATH}`) } catch (error) { if (error.code === 'ENOENT') { logger.debug(`[RAG] ${RagService.UPLOADS_STORAGE_PATH} directory does not exist, skipping`) } else { throw error } } // Scan zim directory try { const zimContents = await listDirectoryContentsRecursive(ZIM_PATH) zimContents.forEach((entry) => { if (entry.type === 'file') { filesInStorage.push(entry.key) } }) logger.debug(`[RAG] Found ${zimContents.length} files in ${ZIM_STORAGE_PATH}`) } catch (error) { if (error.code === 'ENOENT') { logger.debug(`[RAG] ${ZIM_STORAGE_PATH} directory does not exist, skipping`) } else { throw error } } logger.info(`[RAG] Found ${filesInStorage.length} total files in storage directories`) // Get all stored sources from Qdrant await this._ensureCollection( RagService.CONTENT_COLLECTION_NAME, RagService.EMBEDDING_DIMENSION ) const sourcesInQdrant = new Set() let offset: string | number | null | Record = null const batchSize = 100 // Scroll through all points to get sources do { const scrollResult = await this.qdrant!.scroll(RagService.CONTENT_COLLECTION_NAME, { limit: batchSize, offset: offset, with_payload: ['source'], // Only fetch source field for efficiency with_vector: false, }) scrollResult.points.forEach((point) => { const source = point.payload?.source if (source && typeof source === 'string') { sourcesInQdrant.add(source) } }) offset = scrollResult.next_page_offset || null } while (offset !== null) logger.info(`[RAG] Found ${sourcesInQdrant.size} unique sources in Qdrant`) // Find files that are in storage but not in Qdrant const filesToEmbed = filesInStorage.filter((filePath) => !sourcesInQdrant.has(filePath)) logger.info(`[RAG] Found ${filesToEmbed.length} files that need embedding`) if (filesToEmbed.length === 0) { return { success: true, message: 'Knowledge base is already in sync', filesScanned: filesInStorage.length, filesQueued: 0, } } // Import EmbedFileJob dynamically to avoid circular dependencies const { EmbedFileJob } = await import('#jobs/embed_file_job') // Dispatch jobs for files that need embedding let queuedCount = 0 for (const filePath of filesToEmbed) { try { const fileName = filePath.split(/[/\\]/).pop() || filePath const stats = await getFileStatsIfExists(filePath) logger.info(`[RAG] Dispatching embed job for: ${fileName}`) await EmbedFileJob.dispatch({ filePath: filePath, fileName: fileName, fileSize: stats?.size, }) queuedCount++ logger.debug(`[RAG] Successfully dispatched job for ${fileName}`) } catch (fileError) { logger.error(`[RAG] Error dispatching job for file ${filePath}:`, fileError) } } return { success: true, message: `Scanned ${filesInStorage.length} files, queued ${queuedCount} for embedding`, filesScanned: filesInStorage.length, filesQueued: queuedCount, } } catch (error) { logger.error('[RAG] Error scanning and syncing knowledge base:', error) return { success: false, message: 'Error scanning and syncing knowledge base', } } } } ================================================ FILE: admin/app/services/system_service.ts ================================================ import Service from '#models/service' import { inject } from '@adonisjs/core' import { DockerService } from '#services/docker_service' import { ServiceSlim } from '../../types/services.js' import logger from '@adonisjs/core/services/logger' import si from 'systeminformation' import { GpuHealthStatus, NomadDiskInfo, NomadDiskInfoRaw, SystemInformationResponse } from '../../types/system.js' import { SERVICE_NAMES } from '../../constants/service_names.js' import { readFileSync } from 'fs' import path, { join } from 'path' import { getAllFilesystems, getFile } from '../utils/fs.js' import axios from 'axios' import env from '#start/env' import KVStore from '#models/kv_store' import { KV_STORE_SCHEMA, KVStoreKey } from '../../types/kv_store.js' import { isNewerVersion } from '../utils/version.js' @inject() export class SystemService { private static appVersion: string | null = null private static diskInfoFile = '/storage/nomad-disk-info.json' constructor(private dockerService: DockerService) { } async checkServiceInstalled(serviceName: string): Promise { const services = await this.getServices({ installedOnly: true }); return services.some(service => service.service_name === serviceName); } async getInternetStatus(): Promise { const DEFAULT_TEST_URL = 'https://1.1.1.1/cdn-cgi/trace' const MAX_ATTEMPTS = 3 let testUrl = DEFAULT_TEST_URL let customTestUrl = env.get('INTERNET_STATUS_TEST_URL')?.trim() // check that customTestUrl is a valid URL, if provided if (customTestUrl && customTestUrl !== '') { try { new URL(customTestUrl) testUrl = customTestUrl } catch (error) { logger.warn( `Invalid INTERNET_STATUS_TEST_URL: ${customTestUrl}. Falling back to default URL.` ) } } for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) { try { const res = await axios.get(testUrl, { timeout: 5000 }) return res.status === 200 } catch (error) { logger.warn( `Internet status check attempt ${attempt}/${MAX_ATTEMPTS} failed: ${error instanceof Error ? error.message : error}` ) if (attempt < MAX_ATTEMPTS) { // delay before next attempt await new Promise((resolve) => setTimeout(resolve, 1000)) } } } logger.warn('All internet status check attempts failed.') return false } async getNvidiaSmiInfo(): Promise | { error: string } | 'OLLAMA_NOT_FOUND' | 'BAD_RESPONSE' | 'UNKNOWN_ERROR'> { try { const containers = await this.dockerService.docker.listContainers({ all: false }) const ollamaContainer = containers.find((c) => c.Names.includes(`/${SERVICE_NAMES.OLLAMA}`) ) if (!ollamaContainer) { logger.info('Ollama container not found for nvidia-smi info retrieval. This is expected if Ollama is not installed.') return 'OLLAMA_NOT_FOUND' } // Execute nvidia-smi inside the Ollama container to get GPU info const container = this.dockerService.docker.getContainer(ollamaContainer.Id) const exec = await container.exec({ Cmd: ['nvidia-smi', '--query-gpu=name,memory.total', '--format=csv,noheader,nounits'], AttachStdout: true, AttachStderr: true, Tty: true, }) // Read the output stream with a timeout to prevent hanging if nvidia-smi fails const stream = await exec.start({ Tty: true }) const output = await new Promise((resolve) => { let data = '' const timeout = setTimeout(() => resolve(data), 5000) stream.on('data', (chunk: Buffer) => { data += chunk.toString() }) stream.on('end', () => { clearTimeout(timeout); resolve(data) }) }) // Remove any non-printable characters and trim the output const cleaned = output.replace(/[\x00-\x08]/g, '').trim() if (cleaned && !cleaned.toLowerCase().includes('error') && !cleaned.toLowerCase().includes('not found')) { // Split by newlines to handle multiple GPUs installed const lines = cleaned.split('\n').filter(line => line.trim()) // Map each line out to a useful structure for us const gpus = lines.map(line => { const parts = line.split(',').map((s) => s.trim()) return { vendor: 'NVIDIA', model: parts[0] || 'NVIDIA GPU', vram: parts[1] ? parseInt(parts[1], 10) : 0, } }) return gpus.length > 0 ? gpus : 'BAD_RESPONSE' } // If we got output but looks like an error, consider it a bad response from nvidia-smi return 'BAD_RESPONSE' } catch (error) { logger.error('Error getting nvidia-smi info:', error) if (error instanceof Error && error.message) { return { error: error.message } } return 'UNKNOWN_ERROR' } } async getServices({ installedOnly = true }: { installedOnly?: boolean }): Promise { await this._syncContainersWithDatabase() // Sync up before fetching to ensure we have the latest status const query = Service.query() .orderBy('display_order', 'asc') .orderBy('friendly_name', 'asc') .select( 'id', 'service_name', 'installed', 'installation_status', 'ui_location', 'friendly_name', 'description', 'icon', 'powered_by', 'display_order', 'container_image', 'available_update_version' ) .where('is_dependency_service', false) if (installedOnly) { query.where('installed', true) } const services = await query if (!services || services.length === 0) { return [] } const statuses = await this.dockerService.getServicesStatus() const toReturn: ServiceSlim[] = [] for (const service of services) { const status = statuses.find((s) => s.service_name === service.service_name) toReturn.push({ id: service.id, service_name: service.service_name, friendly_name: service.friendly_name, description: service.description, icon: service.icon, installed: service.installed, installation_status: service.installation_status, status: status ? status.status : 'unknown', ui_location: service.ui_location || '', powered_by: service.powered_by, display_order: service.display_order, container_image: service.container_image, available_update_version: service.available_update_version, }) } return toReturn } static getAppVersion(): string { try { if (this.appVersion) { return this.appVersion } // Return 'dev' for development environment (version.json won't exist) if (process.env.NODE_ENV === 'development') { this.appVersion = 'dev' return 'dev' } const packageJson = readFileSync(join(process.cwd(), 'version.json'), 'utf-8') const packageData = JSON.parse(packageJson) const version = packageData.version || '0.0.0' this.appVersion = version return version } catch (error) { logger.error('Error getting app version:', error) return '0.0.0' } } async getSystemInfo(): Promise { try { const [cpu, mem, os, currentLoad, fsSize, uptime, graphics] = await Promise.all([ si.cpu(), si.mem(), si.osInfo(), si.currentLoad(), si.fsSize(), si.time(), si.graphics(), ]) let diskInfo: NomadDiskInfoRaw | undefined let disk: NomadDiskInfo[] = [] try { const diskInfoRawString = await getFile( path.join(process.cwd(), SystemService.diskInfoFile), 'string' ) diskInfo = ( diskInfoRawString ? JSON.parse(diskInfoRawString.toString()) : { diskLayout: { blockdevices: [] }, fsSize: [] } ) as NomadDiskInfoRaw disk = this.calculateDiskUsage(diskInfo) } catch (error) { logger.error('Error reading disk info file:', error) } // GPU health tracking — detect when host has NVIDIA GPU but Ollama can't access it let gpuHealth: GpuHealthStatus = { status: 'no_gpu', hasNvidiaRuntime: false, ollamaGpuAccessible: false, } // Query Docker API for host-level info (hostname, OS, GPU runtime) // si.osInfo() returns the container's info inside Docker, not the host's try { const dockerInfo = await this.dockerService.docker.info() if (dockerInfo.Name) { os.hostname = dockerInfo.Name } if (dockerInfo.OperatingSystem) { os.distro = dockerInfo.OperatingSystem } if (dockerInfo.KernelVersion) { os.kernel = dockerInfo.KernelVersion } // If si.graphics() returned no controllers (common inside Docker), // fall back to nvidia runtime + nvidia-smi detection if (!graphics.controllers || graphics.controllers.length === 0) { const runtimes = dockerInfo.Runtimes || {} if ('nvidia' in runtimes) { gpuHealth.hasNvidiaRuntime = true const nvidiaInfo = await this.getNvidiaSmiInfo() if (Array.isArray(nvidiaInfo)) { graphics.controllers = nvidiaInfo.map((gpu) => ({ model: gpu.model, vendor: gpu.vendor, bus: "", vram: gpu.vram, vramDynamic: false, // assume false here, we don't actually use this field for our purposes. })) gpuHealth.status = 'ok' gpuHealth.ollamaGpuAccessible = true } else if (nvidiaInfo === 'OLLAMA_NOT_FOUND') { gpuHealth.status = 'ollama_not_installed' } else { gpuHealth.status = 'passthrough_failed' logger.warn(`NVIDIA runtime detected but GPU passthrough failed: ${typeof nvidiaInfo === 'string' ? nvidiaInfo : JSON.stringify(nvidiaInfo)}`) } } } else { // si.graphics() returned controllers (host install, not Docker) — GPU is working gpuHealth.status = 'ok' gpuHealth.ollamaGpuAccessible = true } } catch { // Docker info query failed, skip host-level enrichment } return { cpu, mem, os, disk, currentLoad, fsSize, uptime, graphics, gpuHealth, } } catch (error) { logger.error('Error getting system info:', error) return undefined } } async checkLatestVersion(force?: boolean): Promise<{ success: boolean updateAvailable: boolean currentVersion: string latestVersion: string message?: string }> { try { const currentVersion = SystemService.getAppVersion() const cachedUpdateAvailable = await KVStore.getValue('system.updateAvailable') const cachedLatestVersion = await KVStore.getValue('system.latestVersion') // Use cached values if not forcing a fresh check. // the CheckUpdateJob will update these values every 12 hours if (!force) { return { success: true, updateAvailable: cachedUpdateAvailable ?? false, currentVersion, latestVersion: cachedLatestVersion || '', } } const earlyAccess = (await KVStore.getValue('system.earlyAccess')) ?? false let latestVersion: string if (earlyAccess) { const response = await axios.get( 'https://api.github.com/repos/Crosstalk-Solutions/project-nomad/releases', { headers: { Accept: 'application/vnd.github+json' }, timeout: 5000 } ) if (!response?.data?.length) throw new Error('No releases found') latestVersion = response.data[0].tag_name.replace(/^v/, '').trim() } else { const response = await axios.get( 'https://api.github.com/repos/Crosstalk-Solutions/project-nomad/releases/latest', { headers: { Accept: 'application/vnd.github+json' }, timeout: 5000 } ) if (!response?.data?.tag_name) throw new Error('Invalid response from GitHub API') latestVersion = response.data.tag_name.replace(/^v/, '').trim() } logger.info(`Current version: ${currentVersion}, Latest version: ${latestVersion}`) const updateAvailable = process.env.NODE_ENV === 'development' ? false : isNewerVersion(latestVersion, currentVersion.trim(), earlyAccess) // Cache the results in KVStore for frontend checks await KVStore.setValue('system.updateAvailable', updateAvailable) await KVStore.setValue('system.latestVersion', latestVersion) return { success: true, updateAvailable, currentVersion, latestVersion, } } catch (error) { logger.error('Error checking latest version:', error) return { success: false, updateAvailable: false, currentVersion: '', latestVersion: '', message: `Failed to check latest version: ${error instanceof Error ? error.message : error}`, } } } async subscribeToReleaseNotes(email: string): Promise<{ success: boolean; message: string }> { try { const response = await axios.post( 'https://api.projectnomad.us/api/v1/lists/release-notes/subscribe', { email }, { timeout: 5000 } ) if (response.status === 200) { return { success: true, message: 'Successfully subscribed to release notes', } } return { success: false, message: `Failed to subscribe: ${response.statusText}`, } } catch (error) { logger.error('Error subscribing to release notes:', error) return { success: false, message: `Failed to subscribe: ${error instanceof Error ? error.message : error}`, } } } async getDebugInfo(): Promise { const appVersion = SystemService.getAppVersion() const environment = process.env.NODE_ENV || 'unknown' const [systemInfo, services, internetStatus, versionCheck] = await Promise.all([ this.getSystemInfo(), this.getServices({ installedOnly: false }), this.getInternetStatus().catch(() => null), this.checkLatestVersion().catch(() => null), ]) const lines: string[] = [ 'Project NOMAD Debug Info', '========================', `App Version: ${appVersion}`, `Environment: ${environment}`, ] if (systemInfo) { const { cpu, mem, os, disk, fsSize, uptime, graphics } = systemInfo lines.push('') lines.push('System:') if (os.distro) lines.push(` OS: ${os.distro}`) if (os.hostname) lines.push(` Hostname: ${os.hostname}`) if (os.kernel) lines.push(` Kernel: ${os.kernel}`) if (os.arch) lines.push(` Architecture: ${os.arch}`) if (uptime?.uptime) lines.push(` Uptime: ${this._formatUptime(uptime.uptime)}`) lines.push('') lines.push('Hardware:') if (cpu.brand) { lines.push(` CPU: ${cpu.brand} (${cpu.cores} cores)`) } if (mem.total) { const total = this._formatBytes(mem.total) const used = this._formatBytes(mem.total - (mem.available || 0)) const available = this._formatBytes(mem.available || 0) lines.push(` RAM: ${total} total, ${used} used, ${available} available`) } if (graphics.controllers && graphics.controllers.length > 0) { for (const gpu of graphics.controllers) { const vram = gpu.vram ? ` (${gpu.vram} MB VRAM)` : '' lines.push(` GPU: ${gpu.model}${vram}`) } } else { lines.push(' GPU: None detected') } // Disk info — try disk array first, fall back to fsSize const diskEntries = disk.filter((d) => d.totalSize > 0) if (diskEntries.length > 0) { for (const d of diskEntries) { const size = this._formatBytes(d.totalSize) const type = d.tran?.toUpperCase() || (d.rota ? 'HDD' : 'SSD') lines.push(` Disk: ${size}, ${Math.round(d.percentUsed)}% used, ${type}`) } } else if (fsSize.length > 0) { const realFs = fsSize.filter((f) => f.fs.startsWith('/dev/')) const seen = new Set() for (const f of realFs) { if (seen.has(f.size)) continue seen.add(f.size) lines.push(` Disk: ${this._formatBytes(f.size)}, ${Math.round(f.use)}% used`) } } } const installed = services.filter((s) => s.installed) lines.push('') if (installed.length > 0) { lines.push('Installed Services:') for (const svc of installed) { lines.push(` ${svc.friendly_name} (${svc.service_name}): ${svc.status}`) } } else { lines.push('Installed Services: None') } if (internetStatus !== null) { lines.push('') lines.push(`Internet Status: ${internetStatus ? 'Online' : 'Offline'}`) } if (versionCheck?.success) { const updateMsg = versionCheck.updateAvailable ? `Yes (${versionCheck.latestVersion} available)` : `No (${versionCheck.currentVersion} is latest)` lines.push(`Update Available: ${updateMsg}`) } return lines.join('\n') } private _formatUptime(seconds: number): string { const days = Math.floor(seconds / 86400) const hours = Math.floor((seconds % 86400) / 3600) const minutes = Math.floor((seconds % 3600) / 60) if (days > 0) return `${days}d ${hours}h ${minutes}m` if (hours > 0) return `${hours}h ${minutes}m` return `${minutes}m` } private _formatBytes(bytes: number, decimals = 1): string { if (bytes === 0) return '0 Bytes' const k = 1024 const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB'] const i = Math.floor(Math.log(bytes) / Math.log(k)) return parseFloat((bytes / Math.pow(k, i)).toFixed(decimals)) + ' ' + sizes[i] } async updateSetting(key: KVStoreKey, value: any): Promise { if ((value === '' || value === undefined || value === null) && KV_STORE_SCHEMA[key] === 'string') { await KVStore.clearValue(key) } else { await KVStore.setValue(key, value) } } /** * Checks the current state of Docker containers against the database records and updates the database accordingly. * It will mark services as not installed if their corresponding containers do not exist, regardless of their running state. * Handles cases where a container might have been manually removed, ensuring the database reflects the actual existence of containers. * Containers that exist but are stopped, paused, or restarting will still be considered installed. */ private async _syncContainersWithDatabase() { try { const allServices = await Service.all() const serviceStatusList = await this.dockerService.getServicesStatus() for (const service of allServices) { const containerExists = serviceStatusList.find( (s) => s.service_name === service.service_name ) if (service.installed) { // If marked as installed but container doesn't exist, mark as not installed if (!containerExists) { logger.warn( `Service ${service.service_name} is marked as installed but container does not exist. Marking as not installed.` ) service.installed = false service.installation_status = 'idle' await service.save() } } else { // If marked as not installed but container exists (any state), mark as installed if (containerExists) { logger.warn( `Service ${service.service_name} is marked as not installed but container exists. Marking as installed.` ) service.installed = true service.installation_status = 'idle' await service.save() } } } } catch (error) { logger.error('Error syncing containers with database:', error) } } private calculateDiskUsage(diskInfo: NomadDiskInfoRaw): NomadDiskInfo[] { const { diskLayout, fsSize } = diskInfo if (!diskLayout?.blockdevices || !fsSize) { return [] } // Deduplicate: same device path mounted in multiple places (Docker bind-mounts) // Keep the entry with the largest size — that's the real partition const deduped = new Map() for (const entry of fsSize) { const existing = deduped.get(entry.fs) if (!existing || entry.size > existing.size) { deduped.set(entry.fs, entry) } } const dedupedFsSize = Array.from(deduped.values()) return diskLayout.blockdevices .filter((disk) => disk.type === 'disk') // Only physical disks .map((disk) => { const filesystems = getAllFilesystems(disk, dedupedFsSize) // Across all partitions const totalUsed = filesystems.reduce((sum, p) => sum + (p.used || 0), 0) const totalSize = filesystems.reduce((sum, p) => sum + (p.size || 0), 0) const percentUsed = totalSize > 0 ? (totalUsed / totalSize) * 100 : 0 return { name: disk.name, model: disk.model || 'Unknown', vendor: disk.vendor || '', rota: disk.rota || false, tran: disk.tran || '', size: disk.size, totalUsed, totalSize, percentUsed: Math.round(percentUsed * 100) / 100, filesystems: filesystems.map((p) => ({ fs: p.fs, mount: p.mount, used: p.used, size: p.size, percentUsed: p.use, })), } }) } } ================================================ FILE: admin/app/services/system_update_service.ts ================================================ import logger from '@adonisjs/core/services/logger' import { readFileSync, existsSync } from 'fs' import { writeFile } from 'fs/promises' import { join } from 'path' import KVStore from '#models/kv_store' interface UpdateStatus { stage: 'idle' | 'starting' | 'pulling' | 'pulled' | 'recreating' | 'complete' | 'error' progress: number message: string timestamp: string } export class SystemUpdateService { private static SHARED_DIR = '/app/update-shared' private static REQUEST_FILE = join(SystemUpdateService.SHARED_DIR, 'update-request') private static STATUS_FILE = join(SystemUpdateService.SHARED_DIR, 'update-status') private static LOG_FILE = join(SystemUpdateService.SHARED_DIR, 'update-log') /** * Requests a system update by creating a request file that the sidecar will detect */ async requestUpdate(): Promise<{ success: boolean; message: string }> { try { const currentStatus = this.getUpdateStatus() if (currentStatus && !['idle', 'complete', 'error'].includes(currentStatus.stage)) { return { success: false, message: `Update already in progress (stage: ${currentStatus.stage})`, } } // Determine the Docker image tag to install. const latestVersion = await KVStore.getValue('system.latestVersion') const requestData = { requested_at: new Date().toISOString(), requester: 'admin-api', target_tag: latestVersion ? `v${latestVersion}` : 'latest', } await writeFile(SystemUpdateService.REQUEST_FILE, JSON.stringify(requestData, null, 2)) logger.info(`[SystemUpdateService]: System update requested (target tag: ${requestData.target_tag}) - sidecar will process shortly`) return { success: true, message: 'System update initiated. The admin container will restart during the process.', } } catch (error) { logger.error('[SystemUpdateService]: Failed to request system update:', error) return { success: false, message: `Failed to request update: ${error.message}`, } } } getUpdateStatus(): UpdateStatus | null { try { if (!existsSync(SystemUpdateService.STATUS_FILE)) { return { stage: 'idle', progress: 0, message: 'No update in progress', timestamp: new Date().toISOString(), } } const statusContent = readFileSync(SystemUpdateService.STATUS_FILE, 'utf-8') return JSON.parse(statusContent) as UpdateStatus } catch (error) { logger.error('[SystemUpdateService]: Failed to read update status:', error) return null } } getUpdateLogs(): string { try { if (!existsSync(SystemUpdateService.LOG_FILE)) { return 'No update logs available' } return readFileSync(SystemUpdateService.LOG_FILE, 'utf-8') } catch (error) { logger.error('[SystemUpdateService]: Failed to read update logs:', error) return `Error reading logs: ${error.message}` } } /** * Check if the update sidecar is reachable (i.e. shared volume is mounted) */ isSidecarAvailable(): boolean { try { return existsSync(SystemUpdateService.SHARED_DIR) } catch (error) { return false } } } ================================================ FILE: admin/app/services/zim_extraction_service.ts ================================================ import { Archive, Entry } from '@openzim/libzim' import * as cheerio from 'cheerio' import { HTML_SELECTORS_TO_REMOVE, NON_CONTENT_HEADING_PATTERNS } from '../../constants/zim_extraction.js' import logger from '@adonisjs/core/services/logger' import { ExtractZIMChunkingStrategy, ExtractZIMContentOptions, ZIMContentChunk, ZIMArchiveMetadata } from '../../types/zim.js' import { randomUUID } from 'node:crypto' import { access } from 'node:fs/promises' export class ZIMExtractionService { private extractArchiveMetadata(archive: Archive): ZIMArchiveMetadata { try { return { title: archive.getMetadata('Title') || archive.getMetadata('Name') || 'Unknown', creator: archive.getMetadata('Creator') || 'Unknown', publisher: archive.getMetadata('Publisher') || 'Unknown', date: archive.getMetadata('Date') || 'Unknown', language: archive.getMetadata('Language') || 'Unknown', description: archive.getMetadata('Description') || '', } } catch (error) { logger.warn('[ZIMExtractionService]: Could not extract all metadata, using defaults', error) return { title: 'Unknown', creator: 'Unknown', publisher: 'Unknown', date: 'Unknown', language: 'Unknown', description: '', } } } /** * Breaks out a ZIM file's entries into their structured content form * to facilitate better indexing and retrieval. * Returns enhanced chunks with full article context and metadata. * * @param filePath - Path to the ZIM file * @param opts - Options including maxArticles, strategy, onProgress, startOffset, and batchSize */ async extractZIMContent(filePath: string, opts: ExtractZIMContentOptions = {}): Promise { try { logger.info(`[ZIMExtractionService]: Processing ZIM file at path: ${filePath}`) // defensive - check if file still exists before opening // could have been deleted by another process or batch try { await access(filePath) } catch (error) { logger.error(`[ZIMExtractionService]: ZIM file not accessible: ${filePath}`) throw new Error(`ZIM file not found or not accessible: ${filePath}`) } const archive = new Archive(filePath) // Extract archive-level metadata once const archiveMetadata = this.extractArchiveMetadata(archive) logger.info(`[ZIMExtractionService]: Archive metadata - Title: ${archiveMetadata.title}, Language: ${archiveMetadata.language}`) let articlesProcessed = 0 let articlesSkipped = 0 const processedPaths = new Set() const toReturn: ZIMContentChunk[] = [] // Support batch processing to avoid lock timeouts on large ZIM files const startOffset = opts.startOffset || 0 const batchSize = opts.batchSize || (opts.maxArticles || Infinity) for (const entry of archive.iterByPath()) { // Skip articles until we reach the start offset if (articlesSkipped < startOffset) { if (this.isArticleEntry(entry) && !processedPaths.has(entry.path)) { articlesSkipped++ } continue } if (articlesProcessed >= batchSize) { break } if (!this.isArticleEntry(entry)) { logger.debug(`[ZIMExtractionService]: Skipping non-article entry at path: ${entry.path}`) continue } if (processedPaths.has(entry.path)) { logger.debug(`[ZIMExtractionService]: Skipping duplicate entry at path: ${entry.path}`) continue } processedPaths.add(entry.path) const item = entry.item const blob = item.data const html = this.getCleanedHTMLString(blob.data) const strategy = opts.strategy || this.chooseChunkingStrategy(html); logger.debug(`[ZIMExtractionService]: Chosen chunking strategy for path ${entry.path}: ${strategy}`) // Generate a unique document ID. All chunks from same article will share it const documentId = randomUUID() const articleTitle = entry.title || entry.path let chunks: ZIMContentChunk[] if (strategy === 'structured') { const structured = this.extractStructuredContent(html) chunks = structured.sections.map(s => ({ text: s.text, articleTitle, articlePath: entry.path, sectionTitle: s.heading, fullTitle: `${articleTitle} - ${s.heading}`, hierarchy: `${articleTitle} > ${s.heading}`, sectionLevel: s.level, documentId, archiveMetadata, strategy, })) } else { // Simple strategy - entire article as one chunk const text = this.extractTextFromHTML(html) || '' chunks = [{ text, articleTitle, articlePath: entry.path, sectionTitle: articleTitle, // Same as article for simple strategy fullTitle: articleTitle, hierarchy: articleTitle, documentId, archiveMetadata, strategy, }] } logger.debug(`Extracted ${chunks.length} chunks from article at path: ${entry.path} using strategy: ${strategy}`) const nonEmptyChunks = chunks.filter(c => c.text.trim().length > 0) logger.debug(`After filtering empty chunks, ${nonEmptyChunks.length} chunks remain for article at path: ${entry.path}`) toReturn.push(...nonEmptyChunks) articlesProcessed++ if (opts.onProgress) { opts.onProgress(articlesProcessed, archive.articleCount) } } logger.info(`[ZIMExtractionService]: Completed processing ZIM file. Total articles processed: ${articlesProcessed}`) logger.debug("Final structured content sample:", toReturn.slice(0, 3).map(c => ({ articleTitle: c.articleTitle, sectionTitle: c.sectionTitle, hierarchy: c.hierarchy, textPreview: c.text.substring(0, 100) }))) logger.debug("Total structured sections extracted:", toReturn.length) return toReturn } catch (error) { logger.error('Error processing ZIM file:', error) throw error } } private chooseChunkingStrategy(html: string, options = { forceStrategy: null as ExtractZIMChunkingStrategy | null, }): ExtractZIMChunkingStrategy { const { forceStrategy = null, } = options; if (forceStrategy) return forceStrategy; // Use a simple analysis to determin if the HTML has any meaningful structure // that we can leverage for better chunking. If not, we'll just chunk it as one big piece of text. return this.hasStructuredHeadings(html) ? 'structured' : 'simple'; } private getCleanedHTMLString(buff: Buffer): string { const rawString = buff.toString('utf-8'); const $ = cheerio.load(rawString); HTML_SELECTORS_TO_REMOVE.forEach((selector) => { $(selector).remove() }); return $.html(); } private extractTextFromHTML(html: string): string | null { try { const $ = cheerio.load(html) // Search body first, then root if body is absent const text = $('body').length ? $('body').text() : $.root().text() return text.replace(/\s+/g, ' ').replace(/\n\s*\n/g, '\n').trim() } catch (error) { logger.error('Error extracting text from HTML:', error) return null } } private extractStructuredContent(html: string) { const $ = cheerio.load(html); const title = $('h1').first().text().trim() || $('title').text().trim(); // Extract sections with their headings and heading levels const sections: Array<{ heading: string; text: string; level: number }> = []; let currentSection = { heading: 'Introduction', content: [] as string[], level: 2 }; $('body').children().each((_, element) => { const $el = $(element); const tagName = element.tagName?.toLowerCase(); if (['h2', 'h3', 'h4'].includes(tagName)) { // Save current section if it has content if (currentSection.content.length > 0) { sections.push({ heading: currentSection.heading, text: currentSection.content.join(' ').replace(/\s+/g, ' ').trim(), level: currentSection.level, }); } // Start new section const level = parseInt(tagName.substring(1)); // Extract number from h2, h3, h4 currentSection = { heading: $el.text().replace(/\[edit\]/gi, '').trim(), content: [], level, }; } else if (['p', 'ul', 'ol', 'dl', 'table'].includes(tagName)) { const text = $el.text().trim(); if (text.length > 0) { currentSection.content.push(text); } } }); // Push the last section if it has content if (currentSection.content.length > 0) { sections.push({ heading: currentSection.heading, text: currentSection.content.join(' ').replace(/\s+/g, ' ').trim(), level: currentSection.level, }); } return { title, sections, fullText: sections.map(s => `${s.heading}\n${s.text}`).join('\n\n'), }; } private hasStructuredHeadings(html: string): boolean { const $ = cheerio.load(html); const headings = $('h2, h3').toArray(); // Consider it structured if it has at least 2 headings to break content into meaningful sections if (headings.length < 2) return false; // Check that headings have substantial content between them let sectionsWithContent = 0; for (const heading of headings) { const $heading = $(heading); const headingText = $heading.text().trim(); // Skip empty or very short headings, likely not meaningful if (headingText.length < 3) continue; // Skip common non-content headings if (NON_CONTENT_HEADING_PATTERNS.some(pattern => pattern.test(headingText))) { continue; } // Content until next heading let contentLength = 0; let $next = $heading.next(); while ($next.length && !$next.is('h1, h2, h3, h4')) { contentLength += $next.text().trim().length; $next = $next.next(); } // Consider it a real section if it has at least 100 chars of content if (contentLength >= 100) { sectionsWithContent++; } } // Require at least 2 sections with substantial content return sectionsWithContent >= 2; } private isArticleEntry(entry: Entry): boolean { try { if (entry.isRedirect) return false; const item = entry.item; const mimeType = item.mimetype; return mimeType === 'text/html' || mimeType === 'application/xhtml+xml'; } catch { return false; } } } ================================================ FILE: admin/app/services/zim_service.ts ================================================ import { ListRemoteZimFilesResponse, RawRemoteZimFileEntry, RemoteZimFileEntry, } from '../../types/zim.js' import axios from 'axios' import { XMLParser } from 'fast-xml-parser' import { isRawListRemoteZimFilesResponse, isRawRemoteZimFileEntry } from '../../util/zim.js' import logger from '@adonisjs/core/services/logger' import { DockerService } from './docker_service.js' import { inject } from '@adonisjs/core' import { deleteFileIfExists, ensureDirectoryExists, getFileStatsIfExists, listDirectoryContents, ZIM_STORAGE_PATH, } from '../utils/fs.js' import { join, resolve, sep } from 'path' import { WikipediaOption, WikipediaState } from '../../types/downloads.js' import vine from '@vinejs/vine' import { wikipediaOptionsFileSchema } from '#validators/curated_collections' import WikipediaSelection from '#models/wikipedia_selection' import InstalledResource from '#models/installed_resource' import { RunDownloadJob } from '#jobs/run_download_job' import { SERVICE_NAMES } from '../../constants/service_names.js' import { CollectionManifestService } from './collection_manifest_service.js' import type { CategoryWithStatus } from '../../types/collections.js' const ZIM_MIME_TYPES = ['application/x-zim', 'application/x-openzim', 'application/octet-stream'] const WIKIPEDIA_OPTIONS_URL = 'https://raw.githubusercontent.com/Crosstalk-Solutions/project-nomad/refs/heads/main/collections/wikipedia.json' @inject() export class ZimService { constructor(private dockerService: DockerService) { } async list() { const dirPath = join(process.cwd(), ZIM_STORAGE_PATH) await ensureDirectoryExists(dirPath) const all = await listDirectoryContents(dirPath) const files = all.filter((item) => item.name.endsWith('.zim')) return { files, } } async listRemote({ start, count, query, }: { start: number count: number query?: string }): Promise { const LIBRARY_BASE_URL = 'https://browse.library.kiwix.org/catalog/v2/entries' const res = await axios.get(LIBRARY_BASE_URL, { params: { start: start, count: count, lang: 'eng', ...(query ? { q: query } : {}), }, responseType: 'text', }) const data = res.data const parser = new XMLParser({ ignoreAttributes: false, attributeNamePrefix: '', textNodeName: '#text', }) const result = parser.parse(data) if (!isRawListRemoteZimFilesResponse(result)) { throw new Error('Invalid response format from remote library') } const entries = result.feed.entry ? Array.isArray(result.feed.entry) ? result.feed.entry : [result.feed.entry] : [] const filtered = entries.filter((entry: any) => { return isRawRemoteZimFileEntry(entry) }) const mapped: (RemoteZimFileEntry | null)[] = filtered.map((entry: RawRemoteZimFileEntry) => { const downloadLink = entry.link.find((link: any) => { return ( typeof link === 'object' && 'rel' in link && 'length' in link && 'href' in link && 'type' in link && link.type === 'application/x-zim' ) }) if (!downloadLink) { return null } // downloadLink['href'] will end with .meta4, we need to remove that to get the actual download URL const download_url = downloadLink['href'].substring(0, downloadLink['href'].length - 6) const file_name = download_url.split('/').pop() || `${entry.title}.zim` const sizeBytes = parseInt(downloadLink['length'], 10) return { id: entry.id, title: entry.title, updated: entry.updated, summary: entry.summary, size_bytes: sizeBytes || 0, download_url: download_url, author: entry.author.name, file_name: file_name, } }) // Filter out any null entries (those without a valid download link) // or files that already exist in the local storage const existing = await this.list() const existingKeys = new Set(existing.files.map((file) => file.name)) const withoutExisting = mapped.filter( (entry): entry is RemoteZimFileEntry => entry !== null && !existingKeys.has(entry.file_name) ) return { items: withoutExisting, has_more: result.feed.totalResults > start, total_count: result.feed.totalResults, } } async downloadRemote(url: string): Promise<{ filename: string; jobId?: string }> { const parsed = new URL(url) if (!parsed.pathname.endsWith('.zim')) { throw new Error(`Invalid ZIM file URL: ${url}. URL must end with .zim`) } const existing = await RunDownloadJob.getByUrl(url) if (existing) { throw new Error('A download for this URL is already in progress') } // Extract the filename from the URL const filename = url.split('/').pop() if (!filename) { throw new Error('Could not determine filename from URL') } const filepath = join(process.cwd(), ZIM_STORAGE_PATH, filename) // Parse resource metadata for the download job const parsedFilename = CollectionManifestService.parseZimFilename(filename) const resourceMetadata = parsedFilename ? { resource_id: parsedFilename.resource_id, version: parsedFilename.version, collection_ref: null } : undefined // Dispatch a background download job const result = await RunDownloadJob.dispatch({ url, filepath, timeout: 30000, allowedMimeTypes: ZIM_MIME_TYPES, forceNew: true, filetype: 'zim', resourceMetadata, }) if (!result || !result.job) { throw new Error('Failed to dispatch download job') } logger.info(`[ZimService] Dispatched background download job for ZIM file: ${filename}`) return { filename, jobId: result.job.id, } } async listCuratedCategories(): Promise { const manifestService = new CollectionManifestService() return manifestService.getCategoriesWithStatus() } async downloadCategoryTier(categorySlug: string, tierSlug: string): Promise { const manifestService = new CollectionManifestService() const spec = await manifestService.getSpecWithFallback('zim_categories') if (!spec) { throw new Error('Could not load ZIM categories spec') } const category = spec.categories.find((c) => c.slug === categorySlug) if (!category) { throw new Error(`Category not found: ${categorySlug}`) } const tier = category.tiers.find((t) => t.slug === tierSlug) if (!tier) { throw new Error(`Tier not found: ${tierSlug}`) } const allResources = CollectionManifestService.resolveTierResources(tier, category.tiers) // Filter out already installed const installed = await InstalledResource.query().where('resource_type', 'zim') const installedIds = new Set(installed.map((r) => r.resource_id)) const toDownload = allResources.filter((r) => !installedIds.has(r.id)) if (toDownload.length === 0) return null const downloadFilenames: string[] = [] for (const resource of toDownload) { const existingJob = await RunDownloadJob.getByUrl(resource.url) if (existingJob) { logger.warn(`[ZimService] Download already in progress for ${resource.url}, skipping.`) continue } const filename = resource.url.split('/').pop() if (!filename) continue downloadFilenames.push(filename) const filepath = join(process.cwd(), ZIM_STORAGE_PATH, filename) await RunDownloadJob.dispatch({ url: resource.url, filepath, timeout: 30000, allowedMimeTypes: ZIM_MIME_TYPES, forceNew: true, filetype: 'zim', resourceMetadata: { resource_id: resource.id, version: resource.version, collection_ref: categorySlug, }, }) } return downloadFilenames.length > 0 ? downloadFilenames : null } async downloadRemoteSuccessCallback(urls: string[], restart = true) { // Check if any URL is a Wikipedia download and handle it for (const url of urls) { if (url.includes('wikipedia_en_')) { await this.onWikipediaDownloadComplete(url, true) } } if (restart) { // Check if there are any remaining ZIM download jobs before restarting const { QueueService } = await import('./queue_service.js') const queueService = new QueueService() const queue = queueService.getQueue('downloads') // Get all active and waiting jobs const [activeJobs, waitingJobs] = await Promise.all([ queue.getActive(), queue.getWaiting(), ]) // Filter out completed jobs (progress === 100) to avoid race condition // where this job itself is still in the active queue const activeIncompleteJobs = activeJobs.filter((job) => { const progress = typeof job.progress === 'number' ? job.progress : 0 return progress < 100 }) // Check if any remaining incomplete jobs are ZIM downloads const allJobs = [...activeIncompleteJobs, ...waitingJobs] const hasRemainingZimJobs = allJobs.some((job) => job.data.filetype === 'zim') if (hasRemainingZimJobs) { logger.info('[ZimService] Skipping container restart - more ZIM downloads pending') } else { // Restart KIWIX container to pick up new ZIM file logger.info('[ZimService] No more ZIM downloads pending - restarting KIWIX container') await this.dockerService .affectContainer(SERVICE_NAMES.KIWIX, 'restart') .catch((error) => { logger.error(`[ZimService] Failed to restart KIWIX container:`, error) // Don't stop the download completion, just log the error. }) } } // Create InstalledResource entries for downloaded files for (const url of urls) { // Skip Wikipedia files (managed separately) if (url.includes('wikipedia_en_')) continue const filename = url.split('/').pop() if (!filename) continue const parsed = CollectionManifestService.parseZimFilename(filename) if (!parsed) continue const filepath = join(process.cwd(), ZIM_STORAGE_PATH, filename) const stats = await getFileStatsIfExists(filepath) try { const { DateTime } = await import('luxon') await InstalledResource.updateOrCreate( { resource_id: parsed.resource_id, resource_type: 'zim' }, { version: parsed.version, url: url, file_path: filepath, file_size_bytes: stats ? Number(stats.size) : null, installed_at: DateTime.now(), } ) logger.info(`[ZimService] Created InstalledResource entry for: ${parsed.resource_id}`) } catch (error) { logger.error(`[ZimService] Failed to create InstalledResource for ${filename}:`, error) } } } async delete(file: string): Promise { let fileName = file if (!fileName.endsWith('.zim')) { fileName += '.zim' } const basePath = resolve(join(process.cwd(), ZIM_STORAGE_PATH)) const fullPath = resolve(join(basePath, fileName)) // Prevent path traversal — resolved path must stay within the storage directory if (!fullPath.startsWith(basePath + sep)) { throw new Error('Invalid filename') } const exists = await getFileStatsIfExists(fullPath) if (!exists) { throw new Error('not_found') } await deleteFileIfExists(fullPath) // Clean up InstalledResource entry const parsed = CollectionManifestService.parseZimFilename(fileName) if (parsed) { await InstalledResource.query() .where('resource_id', parsed.resource_id) .where('resource_type', 'zim') .delete() logger.info(`[ZimService] Deleted InstalledResource entry for: ${parsed.resource_id}`) } } // Wikipedia selector methods async getWikipediaOptions(): Promise { try { const response = await axios.get(WIKIPEDIA_OPTIONS_URL) const data = response.data const validated = await vine.validate({ schema: wikipediaOptionsFileSchema, data, }) return validated.options } catch (error) { logger.error(`[ZimService] Failed to fetch Wikipedia options:`, error) throw new Error('Failed to fetch Wikipedia options') } } async getWikipediaSelection(): Promise { // Get the single row from wikipedia_selections (there should only ever be one) return WikipediaSelection.query().first() } async getWikipediaState(): Promise { const options = await this.getWikipediaOptions() const selection = await this.getWikipediaSelection() return { options, currentSelection: selection ? { optionId: selection.option_id, status: selection.status, filename: selection.filename, url: selection.url, } : null, } } async selectWikipedia(optionId: string): Promise<{ success: boolean; jobId?: string; message?: string }> { const options = await this.getWikipediaOptions() const selectedOption = options.find((opt) => opt.id === optionId) if (!selectedOption) { throw new Error(`Invalid Wikipedia option: ${optionId}`) } const currentSelection = await this.getWikipediaSelection() // If same as currently installed, no action needed if (currentSelection?.option_id === optionId && currentSelection.status === 'installed') { return { success: true, message: 'Already installed' } } // Handle "none" option - delete current Wikipedia file and update DB if (optionId === 'none') { if (currentSelection?.filename) { try { await this.delete(currentSelection.filename) logger.info(`[ZimService] Deleted Wikipedia file: ${currentSelection.filename}`) } catch (error) { // File might already be deleted, that's OK logger.warn(`[ZimService] Could not delete Wikipedia file (may already be gone): ${currentSelection.filename}`) } } // Update or create the selection record (always use first record) if (currentSelection) { currentSelection.option_id = 'none' currentSelection.url = null currentSelection.filename = null currentSelection.status = 'none' await currentSelection.save() } else { await WikipediaSelection.create({ option_id: 'none', url: null, filename: null, status: 'none', }) } // Restart Kiwix to reflect the change await this.dockerService .affectContainer(SERVICE_NAMES.KIWIX, 'restart') .catch((error) => { logger.error(`[ZimService] Failed to restart Kiwix after Wikipedia removal:`, error) }) return { success: true, message: 'Wikipedia removed' } } // Start download for the new Wikipedia option if (!selectedOption.url) { throw new Error('Selected Wikipedia option has no download URL') } // Check if already downloading const existingJob = await RunDownloadJob.getByUrl(selectedOption.url) if (existingJob) { return { success: false, message: 'Download already in progress' } } // Extract filename from URL const filename = selectedOption.url.split('/').pop() if (!filename) { throw new Error('Could not determine filename from URL') } const filepath = join(process.cwd(), ZIM_STORAGE_PATH, filename) // Update or create selection record to show downloading status let selection: WikipediaSelection if (currentSelection) { currentSelection.option_id = optionId currentSelection.url = selectedOption.url currentSelection.filename = filename currentSelection.status = 'downloading' await currentSelection.save() selection = currentSelection } else { selection = await WikipediaSelection.create({ option_id: optionId, url: selectedOption.url, filename: filename, status: 'downloading', }) } // Dispatch download job const result = await RunDownloadJob.dispatch({ url: selectedOption.url, filepath, timeout: 30000, allowedMimeTypes: ZIM_MIME_TYPES, forceNew: true, filetype: 'zim', }) if (!result || !result.job) { // Revert status on failure to dispatch selection.option_id = currentSelection?.option_id || 'none' selection.url = currentSelection?.url || null selection.filename = currentSelection?.filename || null selection.status = currentSelection?.status || 'none' await selection.save() throw new Error('Failed to dispatch download job') } logger.info(`[ZimService] Started Wikipedia download for ${optionId}: ${filename}`) return { success: true, jobId: result.job.id, message: 'Download started', } } async onWikipediaDownloadComplete(url: string, success: boolean): Promise { const selection = await this.getWikipediaSelection() if (!selection || selection.url !== url) { logger.warn(`[ZimService] Wikipedia download complete callback for unknown URL: ${url}`) return } if (success) { // Update status to installed selection.status = 'installed' await selection.save() logger.info(`[ZimService] Wikipedia download completed successfully: ${selection.filename}`) // Delete the old Wikipedia file if it exists and is different // We need to find what was previously installed const existingFiles = await this.list() const wikipediaFiles = existingFiles.files.filter((f) => f.name.startsWith('wikipedia_en_') && f.name !== selection.filename ) for (const oldFile of wikipediaFiles) { try { await this.delete(oldFile.name) logger.info(`[ZimService] Deleted old Wikipedia file: ${oldFile.name}`) } catch (error) { logger.warn(`[ZimService] Could not delete old Wikipedia file: ${oldFile.name}`, error) } } } else { // Download failed - keep the selection record but mark as failed selection.status = 'failed' await selection.save() logger.error(`[ZimService] Wikipedia download failed for: ${selection.filename}`) } } } ================================================ FILE: admin/app/utils/downloads.ts ================================================ import { DoResumableDownloadParams, DoResumableDownloadWithRetryParams, } from '../../types/downloads.js' import axios from 'axios' import { Transform } from 'stream' import { deleteFileIfExists, ensureDirectoryExists, getFileStatsIfExists } from './fs.js' import { createWriteStream } from 'fs' import path from 'path' /** * Perform a resumable download with progress tracking * @param param0 - Download parameters. Leave allowedMimeTypes empty to skip mime type checking. * Otherwise, mime types should be in the format "application/pdf", "image/png", etc. * @returns Path to the downloaded file */ export async function doResumableDownload({ url, filepath, timeout = 30000, signal, onProgress, onComplete, forceNew = false, allowedMimeTypes, }: DoResumableDownloadParams): Promise { const dirname = path.dirname(filepath) await ensureDirectoryExists(dirname) // Check if partial file exists for resume let startByte = 0 let appendMode = false const existingStats = await getFileStatsIfExists(filepath) if (existingStats && !forceNew) { startByte = existingStats.size appendMode = true } // Get file info with HEAD request first const headResponse = await axios.head(url, { signal, timeout, }) const contentType = headResponse.headers['content-type'] || '' const totalBytes = parseInt(headResponse.headers['content-length'] || '0') const supportsRangeRequests = headResponse.headers['accept-ranges'] === 'bytes' // If allowedMimeTypes is provided, check content type if (allowedMimeTypes && allowedMimeTypes.length > 0) { const isMimeTypeAllowed = allowedMimeTypes.some((mimeType) => contentType.includes(mimeType)) if (!isMimeTypeAllowed) { throw new Error(`MIME type ${contentType} is not allowed`) } } // If file is already complete and not forcing overwrite just return filepath if (startByte === totalBytes && totalBytes > 0 && !forceNew) { return filepath } // If server doesn't support range requests and we have a partial file, delete it if (!supportsRangeRequests && startByte > 0) { await deleteFileIfExists(filepath) startByte = 0 appendMode = false } const headers: Record = {} if (supportsRangeRequests && startByte > 0) { headers.Range = `bytes=${startByte}-` } const response = await axios.get(url, { responseType: 'stream', headers, signal, timeout, }) if (response.status !== 200 && response.status !== 206) { throw new Error(`Failed to download: HTTP ${response.status}`) } return new Promise((resolve, reject) => { let downloadedBytes = startByte let lastProgressTime = Date.now() let lastDownloadedBytes = startByte // Stall detection: if no data arrives for 5 minutes, abort the download const STALL_TIMEOUT_MS = 5 * 60 * 1000 let stallTimer: ReturnType | null = null const clearStallTimer = () => { if (stallTimer) { clearTimeout(stallTimer) stallTimer = null } } const resetStallTimer = () => { clearStallTimer() stallTimer = setTimeout(() => { cleanup(new Error('Download stalled - no data received for 5 minutes')) }, STALL_TIMEOUT_MS) } // Progress tracking stream to monitor data flow const progressStream = new Transform({ transform(chunk: Buffer, _: any, callback: Function) { downloadedBytes += chunk.length resetStallTimer() // Update progress tracking const now = Date.now() if (onProgress && now - lastProgressTime >= 500) { lastProgressTime = now lastDownloadedBytes = downloadedBytes onProgress({ downloadedBytes, totalBytes, lastProgressTime, lastDownloadedBytes, url, }) } this.push(chunk) callback() }, }) const writeStream = createWriteStream(filepath, { flags: appendMode ? 'a' : 'w', }) // Handle errors and cleanup const cleanup = (error?: Error) => { clearStallTimer() progressStream.destroy() response.data.destroy() writeStream.destroy() if (error) { reject(error) } } response.data.on('error', cleanup) progressStream.on('error', cleanup) writeStream.on('error', cleanup) writeStream.on('error', cleanup) signal?.addEventListener('abort', () => { cleanup(new Error('Download aborted')) }) writeStream.on('finish', async () => { clearStallTimer() if (onProgress) { onProgress({ downloadedBytes, totalBytes, lastProgressTime: Date.now(), lastDownloadedBytes: downloadedBytes, url, }) } if (onComplete) { await onComplete(url, filepath) } resolve(filepath) }) // Start stall timer and pipe: response -> progressStream -> writeStream resetStallTimer() response.data.pipe(progressStream).pipe(writeStream) }) } export async function doResumableDownloadWithRetry({ url, filepath, signal, timeout = 30000, onProgress, max_retries = 3, retry_delay = 2000, onAttemptError, allowedMimeTypes, }: DoResumableDownloadWithRetryParams): Promise { const dirname = path.dirname(filepath) await ensureDirectoryExists(dirname) let attempt = 0 let lastError: Error | null = null while (attempt < max_retries) { try { const result = await doResumableDownload({ url, filepath, signal, timeout, allowedMimeTypes, onProgress, }) return result // return on success } catch (error) { attempt++ lastError = error as Error const isAborted = error.name === 'AbortError' || error.code === 'ABORT_ERR' const isNetworkError = error.code === 'ECONNRESET' || error.code === 'ENOTFOUND' || error.code === 'ETIMEDOUT' onAttemptError?.(error, attempt) if (isAborted) { throw new Error(`Download aborted for URL: ${url}`) } if (attempt < max_retries && isNetworkError) { await delay(retry_delay) continue } // If max retries reached or non-retriable error, throw if (attempt >= max_retries || !isNetworkError) { throw error } } } // should not reach here, but TypeScript needs a return throw lastError || new Error('Unknown error during download') } async function delay(ms: number): Promise { return new Promise((resolve) => setTimeout(resolve, ms)) } ================================================ FILE: admin/app/utils/fs.ts ================================================ import { mkdir, readdir, readFile, stat, unlink } from 'fs/promises' import path, { join } from 'path' import { FileEntry } from '../../types/files.js' import { createReadStream } from 'fs' import { LSBlockDevice, NomadDiskInfoRaw } from '../../types/system.js' export const ZIM_STORAGE_PATH = '/storage/zim' export async function listDirectoryContents(path: string): Promise { const entries = await readdir(path, { withFileTypes: true }) const results: FileEntry[] = [] for (const entry of entries) { if (entry.isFile()) { results.push({ type: 'file', key: join(path, entry.name), name: entry.name, }) } else if (entry.isDirectory()) { results.push({ type: 'directory', prefix: join(path, entry.name), name: entry.name, }) } } return results } export async function listDirectoryContentsRecursive(path: string): Promise { let results: FileEntry[] = [] const entries = await readdir(path, { withFileTypes: true }) for (const entry of entries) { const fullPath = join(path, entry.name) if (entry.isDirectory()) { const subdirectoryContents = await listDirectoryContentsRecursive(fullPath) results = results.concat(subdirectoryContents) } else { results.push({ type: 'file', key: fullPath, name: entry.name, }) } } return results } export async function ensureDirectoryExists(path: string): Promise { try { await stat(path) } catch (error) { if (error.code === 'ENOENT') { await mkdir(path, { recursive: true }) } } } export async function getFile(path: string, returnType: 'buffer'): Promise export async function getFile( path: string, returnType: 'stream' ): Promise export async function getFile(path: string, returnType: 'string'): Promise export async function getFile( path: string, returnType: 'buffer' | 'string' | 'stream' = 'buffer' ): Promise { try { if (returnType === 'string') { return await readFile(path, 'utf-8') } else if (returnType === 'stream') { return createReadStream(path) } return await readFile(path) } catch (error) { if (error.code === 'ENOENT') { return null } throw error } } export async function getFileStatsIfExists( path: string ): Promise<{ size: number; modifiedTime: Date } | null> { try { const stats = await stat(path) return { size: stats.size, modifiedTime: stats.mtime, } } catch (error) { if (error.code === 'ENOENT') { return null } throw error } } export async function deleteFileIfExists(path: string): Promise { try { await unlink(path) } catch (error) { if (error.code !== 'ENOENT') { throw error } } } export function getAllFilesystems( device: LSBlockDevice, fsSize: NomadDiskInfoRaw['fsSize'] ): NomadDiskInfoRaw['fsSize'] { const filesystems: NomadDiskInfoRaw['fsSize'] = [] const seen = new Set() function traverse(dev: LSBlockDevice) { // Try to find matching filesystem const fs = fsSize.find((f) => matchesDevice(f.fs, dev.name)) if (fs && !seen.has(fs.fs)) { filesystems.push(fs) seen.add(fs.fs) } // Traverse children recursively if (dev.children) { dev.children.forEach((child) => traverse(child)) } } traverse(device) return filesystems } export function matchesDevice(fsPath: string, deviceName: string): boolean { // Remove /dev/ and /dev/mapper/ prefixes const normalized = fsPath.replace('/dev/mapper/', '').replace('/dev/', '') // Direct match (covers /dev/sda1 ↔ sda1, /dev/nvme0n1p1 ↔ nvme0n1p1) if (normalized === deviceName) { return true } // LVM/device-mapper: e.g., /dev/mapper/ubuntu--vg-ubuntu--lv contains "ubuntu--lv" if (fsPath.startsWith('/dev/mapper/') && fsPath.includes(deviceName)) { return true } return false } export function determineFileType(filename: string): 'image' | 'pdf' | 'text' | 'zim' | 'unknown' { const ext = path.extname(filename).toLowerCase() if (['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp'].includes(ext)) { return 'image' } else if (ext === '.pdf') { return 'pdf' } else if (['.txt', '.md', '.docx', '.rtf'].includes(ext)) { return 'text' } else if (ext === '.zim') { return 'zim' } else { return 'unknown' } } /** * Sanitize a filename by removing potentially dangerous characters. * @param filename The original filename * @returns The sanitized filename */ export function sanitizeFilename(filename: string): string { return filename.replace(/[^a-zA-Z0-9._-]/g, '_') } ================================================ FILE: admin/app/utils/misc.ts ================================================ export function formatSpeed(bytesPerSecond: number): string { if (bytesPerSecond < 1024) return `${bytesPerSecond.toFixed(0)} B/s` if (bytesPerSecond < 1024 * 1024) return `${(bytesPerSecond / 1024).toFixed(1)} KB/s` return `${(bytesPerSecond / (1024 * 1024)).toFixed(1)} MB/s` } export function toTitleCase(str: string): string { return str .toLowerCase() .split(' ') .map((word) => word.charAt(0).toUpperCase() + word.slice(1)) .join(' ') } export function parseBoolean(value: any): boolean { if (typeof value === 'boolean') return value if (typeof value === 'string') { const lower = value.toLowerCase() return lower === 'true' || lower === '1' } if (typeof value === 'number') { return value === 1 } return false } ================================================ FILE: admin/app/utils/version.ts ================================================ /** * Compare two semantic version strings to determine if the first is newer than the second. * @param version1 - The version to check (e.g., "1.25.0") * @param version2 - The current version (e.g., "1.24.0") * @returns true if version1 is newer than version2 */ export function isNewerVersion(version1: string, version2: string, includePreReleases = false): boolean { const normalize = (v: string) => v.replace(/^v/, '') const [base1, pre1] = normalize(version1).split('-') const [base2, pre2] = normalize(version2).split('-') // If pre-releases are not included and version1 is a pre-release, don't consider it newer if (!includePreReleases && pre1) { return false } const v1Parts = base1.split('.').map((p) => parseInt(p, 10) || 0) const v2Parts = base2.split('.').map((p) => parseInt(p, 10) || 0) const maxLen = Math.max(v1Parts.length, v2Parts.length) for (let i = 0; i < maxLen; i++) { const a = v1Parts[i] || 0 const b = v2Parts[i] || 0 if (a > b) return true if (a < b) return false } // Base versions equal — GA > RC, RC.n+1 > RC.n if (!pre1 && pre2) return true // v1 is GA, v2 is RC → v1 is newer if (pre1 && !pre2) return false // v1 is RC, v2 is GA → v2 is newer if (!pre1 && !pre2) return false // both GA, equal // Both prerelease: compare numeric suffix (e.g. "rc.2" vs "rc.1") const pre1Num = parseInt(pre1.split('.')[1], 10) || 0 const pre2Num = parseInt(pre2.split('.')[1], 10) || 0 return pre1Num > pre2Num } /** * Parse the major version number from a tag string. * Strips the 'v' prefix if present. * @param tag - Version tag (e.g., "v3.8.1", "10.19.4") * @returns The major version number */ export function parseMajorVersion(tag: string): number { const normalized = tag.replace(/^v/, '') const major = parseInt(normalized.split('.')[0], 10) return isNaN(major) ? 0 : major } ================================================ FILE: admin/app/validators/benchmark.ts ================================================ import vine from '@vinejs/vine' export const runBenchmarkValidator = vine.compile( vine.object({ benchmark_type: vine.enum(['full', 'system', 'ai']).optional(), }) ) export const submitBenchmarkValidator = vine.compile( vine.object({ benchmark_id: vine.string().optional(), }) ) ================================================ FILE: admin/app/validators/chat.ts ================================================ import vine from '@vinejs/vine' export const createSessionSchema = vine.compile( vine.object({ title: vine.string().trim().minLength(1).maxLength(200), model: vine.string().trim().optional(), }) ) export const updateSessionSchema = vine.compile( vine.object({ title: vine.string().trim().minLength(1).maxLength(200).optional(), model: vine.string().trim().optional(), }) ) export const addMessageSchema = vine.compile( vine.object({ role: vine.enum(['system', 'user', 'assistant'] as const), content: vine.string().trim().minLength(1), }) ) ================================================ FILE: admin/app/validators/common.ts ================================================ import vine from '@vinejs/vine' /** * Checks whether a URL points to a loopback or link-local address. * Used to prevent SSRF — the server should not fetch from localhost * or link-local/metadata endpoints (e.g. cloud instance metadata at 169.254.x.x). * * RFC1918 private ranges (10.x, 172.16-31.x, 192.168.x) are intentionally * ALLOWED because NOMAD is a LAN appliance and users may host content * mirrors on their local network. * * Throws an error if the URL is a loopback or link-local address. */ export function assertNotPrivateUrl(urlString: string): void { const parsed = new URL(urlString) const hostname = parsed.hostname.toLowerCase() const blockedPatterns = [ /^localhost$/, /^127\.\d+\.\d+\.\d+$/, /^0\.0\.0\.0$/, /^169\.254\.\d+\.\d+$/, // Link-local / cloud metadata /^\[::1\]$/, /^\[?fe80:/i, // IPv6 link-local ] if (blockedPatterns.some((re) => re.test(hostname))) { throw new Error(`Download URL must not point to a loopback or link-local address: ${hostname}`) } } export const remoteDownloadValidator = vine.compile( vine.object({ url: vine .string() .url({ require_tld: false }) // Allow LAN URLs (e.g. http://my-nas:8080/file.zim) .trim(), }) ) export const remoteDownloadWithMetadataValidator = vine.compile( vine.object({ url: vine .string() .url({ require_tld: false }) // Allow LAN URLs .trim(), metadata: vine .object({ title: vine.string().trim().minLength(1), summary: vine.string().trim().optional(), author: vine.string().trim().optional(), size_bytes: vine.number().optional(), }) .optional(), }) ) export const remoteDownloadValidatorOptional = vine.compile( vine.object({ url: vine .string() .url({ require_tld: false }) // Allow LAN URLs .trim() .optional(), }) ) export const filenameParamValidator = vine.compile( vine.object({ params: vine.object({ filename: vine.string().trim().minLength(1).maxLength(4096), }), }) ) export const downloadCollectionValidator = vine.compile( vine.object({ slug: vine.string(), }) ) export const downloadCategoryTierValidator = vine.compile( vine.object({ categorySlug: vine.string().trim().minLength(1), tierSlug: vine.string().trim().minLength(1), }) ) export const selectWikipediaValidator = vine.compile( vine.object({ optionId: vine.string().trim().minLength(1), }) ) const resourceUpdateInfoBase = vine.object({ resource_id: vine.string().trim().minLength(1), resource_type: vine.enum(['zim', 'map'] as const), installed_version: vine.string().trim(), latest_version: vine.string().trim().minLength(1), download_url: vine.string().url({ require_tld: false }).trim(), }) export const applyContentUpdateValidator = vine.compile(resourceUpdateInfoBase) export const applyAllContentUpdatesValidator = vine.compile( vine.object({ updates: vine .array(resourceUpdateInfoBase) .minLength(1), }) ) ================================================ FILE: admin/app/validators/curated_collections.ts ================================================ import vine from '@vinejs/vine' // ---- Versioned resource validators (with id + version) ---- export const specResourceValidator = vine.object({ id: vine.string(), version: vine.string(), title: vine.string(), description: vine.string(), url: vine.string().url(), size_mb: vine.number().min(0).optional(), }) // ---- ZIM Categories spec (versioned) ---- export const zimCategoriesSpecSchema = vine.object({ spec_version: vine.string(), categories: vine.array( vine.object({ name: vine.string(), slug: vine.string(), icon: vine.string(), description: vine.string(), language: vine.string().minLength(2).maxLength(5), tiers: vine.array( vine.object({ name: vine.string(), slug: vine.string(), description: vine.string(), recommended: vine.boolean().optional(), includesTier: vine.string().optional(), resources: vine.array(specResourceValidator), }) ), }) ), }) // ---- Maps spec (versioned) ---- export const mapsSpecSchema = vine.object({ spec_version: vine.string(), collections: vine.array( vine.object({ slug: vine.string(), name: vine.string(), description: vine.string(), icon: vine.string(), language: vine.string().minLength(2).maxLength(5), resources: vine.array(specResourceValidator).minLength(1), }) ).minLength(1), }) // ---- Wikipedia spec (versioned) ---- export const wikipediaSpecSchema = vine.object({ spec_version: vine.string(), options: vine.array( vine.object({ id: vine.string(), name: vine.string(), description: vine.string(), size_mb: vine.number().min(0), url: vine.string().url().nullable(), version: vine.string().nullable(), }) ).minLength(1), }) // ---- Wikipedia validators (used by ZimService) ---- export const wikipediaOptionSchema = vine.object({ id: vine.string(), name: vine.string(), description: vine.string(), size_mb: vine.number().min(0), url: vine.string().url().nullable(), }) export const wikipediaOptionsFileSchema = vine.object({ options: vine.array(wikipediaOptionSchema).minLength(1), }) ================================================ FILE: admin/app/validators/download.ts ================================================ import vine from '@vinejs/vine' export const downloadJobsByFiletypeSchema = vine.compile( vine.object({ params: vine.object({ filetype: vine.string(), }), }) ) export const modelNameSchema = vine.compile( vine.object({ model: vine.string(), }) ) ================================================ FILE: admin/app/validators/ollama.ts ================================================ import vine from '@vinejs/vine' export const chatSchema = vine.compile( vine.object({ model: vine.string().trim().minLength(1), messages: vine.array( vine.object({ role: vine.enum(['system', 'user', 'assistant'] as const), content: vine.string(), }) ), stream: vine.boolean().optional(), sessionId: vine.number().positive().optional(), }) ) export const getAvailableModelsSchema = vine.compile( vine.object({ sort: vine.enum(['pulls', 'name'] as const).optional(), recommendedOnly: vine.boolean().optional(), query: vine.string().trim().optional(), limit: vine.number().positive().optional(), force: vine.boolean().optional(), }) ) ================================================ FILE: admin/app/validators/rag.ts ================================================ import vine from '@vinejs/vine' export const getJobStatusSchema = vine.compile( vine.object({ filePath: vine.string(), }) ) export const deleteFileSchema = vine.compile( vine.object({ source: vine.string(), }) ) ================================================ FILE: admin/app/validators/settings.ts ================================================ import vine from "@vinejs/vine"; import { SETTINGS_KEYS } from "../../constants/kv_store.js"; export const updateSettingSchema = vine.compile(vine.object({ key: vine.enum(SETTINGS_KEYS), value: vine.any().optional(), })) ================================================ FILE: admin/app/validators/system.ts ================================================ import vine from '@vinejs/vine' export const installServiceValidator = vine.compile( vine.object({ service_name: vine.string().trim(), }) ) export const affectServiceValidator = vine.compile( vine.object({ service_name: vine.string().trim(), action: vine.enum(['start', 'stop', 'restart']), }) ) export const subscribeToReleaseNotesValidator = vine.compile( vine.object({ email: vine.string().email().trim(), }) ) export const checkLatestVersionValidator = vine.compile( vine.object({ force: vine.boolean().optional(), // Optional flag to force bypassing cache and checking for updates immediately }) ) export const updateServiceValidator = vine.compile( vine.object({ service_name: vine.string().trim(), target_version: vine.string().trim(), }) ) ================================================ FILE: admin/app/validators/zim.ts ================================================ import vine from '@vinejs/vine' export const listRemoteZimValidator = vine.compile( vine.object({ start: vine.number().min(0).optional(), count: vine.number().min(1).max(100).optional(), query: vine.string().optional(), }) ) ================================================ FILE: admin/bin/console.ts ================================================ /* |-------------------------------------------------------------------------- | Ace entry point |-------------------------------------------------------------------------- | | The "console.ts" file is the entrypoint for booting the AdonisJS | command-line framework and executing commands. | | Commands do not boot the application, unless the currently running command | has "options.startApp" flag set to true. | */ import 'reflect-metadata' import { Ignitor, prettyPrintError } from '@adonisjs/core' /** * URL to the application root. AdonisJS need it to resolve * paths to file and directories for scaffolding commands */ const APP_ROOT = new URL('../', import.meta.url) /** * The importer is used to import files in context of the * application. */ const IMPORTER = (filePath: string) => { if (filePath.startsWith('./') || filePath.startsWith('../')) { return import(new URL(filePath, APP_ROOT).href) } return import(filePath) } new Ignitor(APP_ROOT, { importer: IMPORTER }) .tap((app) => { app.booting(async () => { await import('#start/env') }) app.listen('SIGTERM', () => app.terminate()) app.listenIf(app.managedByPm2, 'SIGINT', () => app.terminate()) }) .ace() .handle(process.argv.splice(2)) .catch((error) => { process.exitCode = 1 prettyPrintError(error) }) ================================================ FILE: admin/bin/server.ts ================================================ /* |-------------------------------------------------------------------------- | HTTP server entrypoint |-------------------------------------------------------------------------- | | The "server.ts" file is the entrypoint for starting the AdonisJS HTTP | server. Either you can run this file directly or use the "serve" | command to run this file and monitor file changes | */ import 'reflect-metadata' import { Ignitor, prettyPrintError } from '@adonisjs/core' /** * URL to the application root. AdonisJS need it to resolve * paths to file and directories for scaffolding commands */ const APP_ROOT = new URL('../', import.meta.url) /** * The importer is used to import files in context of the * application. */ const IMPORTER = (filePath: string) => { if (filePath.startsWith('./') || filePath.startsWith('../')) { return import(new URL(filePath, APP_ROOT).href) } return import(filePath) } new Ignitor(APP_ROOT, { importer: IMPORTER }) .tap((app) => { app.booting(async () => { await import('#start/env') }) app.listen('SIGTERM', () => app.terminate()) app.listenIf(app.managedByPm2, 'SIGINT', () => app.terminate()) app.ready(async () => { try { const collectionManifestService = new (await import('#services/collection_manifest_service')).CollectionManifestService() await collectionManifestService.reconcileFromFilesystem() } catch (error) { // Catch and log any errors during reconciliation to prevent the server from crashing console.error('Error during collection manifest reconciliation:', error) } }) }) .httpServer() .start() .catch((error) => { process.exitCode = 1 prettyPrintError(error) }) ================================================ FILE: admin/bin/test.ts ================================================ /* |-------------------------------------------------------------------------- | Test runner entrypoint |-------------------------------------------------------------------------- | | The "test.ts" file is the entrypoint for running tests using Japa. | | Either you can run this file directly or use the "test" | command to run this file and monitor file changes. | */ process.env.NODE_ENV = 'test' import 'reflect-metadata' import { Ignitor, prettyPrintError } from '@adonisjs/core' import { configure, processCLIArgs, run } from '@japa/runner' /** * URL to the application root. AdonisJS need it to resolve * paths to file and directories for scaffolding commands */ const APP_ROOT = new URL('../', import.meta.url) /** * The importer is used to import files in context of the * application. */ const IMPORTER = (filePath: string) => { if (filePath.startsWith('./') || filePath.startsWith('../')) { return import(new URL(filePath, APP_ROOT).href) } return import(filePath) } new Ignitor(APP_ROOT, { importer: IMPORTER }) .tap((app) => { app.booting(async () => { await import('#start/env') }) app.listen('SIGTERM', () => app.terminate()) app.listenIf(app.managedByPm2, 'SIGINT', () => app.terminate()) }) .testRunner() .configure(async (app) => { const { runnerHooks, ...config } = await import('../tests/bootstrap.js') processCLIArgs(process.argv.splice(2)) configure({ ...app.rcFile.tests, ...config, ...{ setup: runnerHooks.setup, teardown: runnerHooks.teardown.concat([() => app.terminate()]), }, }) }) .run(() => run()) .catch((error) => { process.exitCode = 1 prettyPrintError(error) }) ================================================ FILE: admin/commands/benchmark/results.ts ================================================ import { BaseCommand, flags } from '@adonisjs/core/ace' import type { CommandOptions } from '@adonisjs/core/types/ace' export default class BenchmarkResults extends BaseCommand { static commandName = 'benchmark:results' static description = 'Display benchmark results' @flags.boolean({ description: 'Show only the latest result', alias: 'l' }) declare latest: boolean @flags.string({ description: 'Output format (table, json)', default: 'table' }) declare format: string @flags.string({ description: 'Show specific benchmark by ID', alias: 'i' }) declare id: string static options: CommandOptions = { startApp: true, } async run() { const { DockerService } = await import('#services/docker_service') const { BenchmarkService } = await import('#services/benchmark_service') const dockerService = new DockerService() const benchmarkService = new BenchmarkService(dockerService) try { let results if (this.id) { const result = await benchmarkService.getResultById(this.id) results = result ? [result] : [] } else if (this.latest) { const result = await benchmarkService.getLatestResult() results = result ? [result] : [] } else { results = await benchmarkService.getAllResults() } if (results.length === 0) { this.logger.info('No benchmark results found.') this.logger.info('Run "node ace benchmark:run" to create a benchmark.') return } if (this.format === 'json') { console.log(JSON.stringify(results, null, 2)) return } // Table format for (const result of results) { this.logger.info('') this.logger.info(`=== Benchmark ${result.benchmark_id} ===`) this.logger.info(`Type: ${result.benchmark_type}`) this.logger.info(`Date: ${result.created_at}`) this.logger.info('') this.logger.info('Hardware:') this.logger.info(` CPU: ${result.cpu_model}`) this.logger.info(` Cores: ${result.cpu_cores} physical, ${result.cpu_threads} threads`) this.logger.info(` RAM: ${Math.round(result.ram_bytes / (1024 * 1024 * 1024))} GB`) this.logger.info(` Disk: ${result.disk_type}`) if (result.gpu_model) { this.logger.info(` GPU: ${result.gpu_model}`) } this.logger.info('') this.logger.info('Scores:') this.logger.info(` CPU: ${result.cpu_score.toFixed(2)}`) this.logger.info(` Memory: ${result.memory_score.toFixed(2)}`) this.logger.info(` Disk Read: ${result.disk_read_score.toFixed(2)}`) this.logger.info(` Disk Write: ${result.disk_write_score.toFixed(2)}`) if (result.ai_tokens_per_second) { this.logger.info(` AI Tokens/sec: ${result.ai_tokens_per_second.toFixed(2)}`) this.logger.info(` AI TTFT: ${result.ai_time_to_first_token?.toFixed(2)} ms`) } this.logger.info('') this.logger.info(`NOMAD Score: ${result.nomad_score.toFixed(2)} / 100`) if (result.submitted_to_repository) { this.logger.info(`Submitted: Yes (${result.repository_id})`) } else { this.logger.info('Submitted: No') } this.logger.info('') } this.logger.info(`Total results: ${results.length}`) } catch (error) { this.logger.error(`Failed to retrieve results: ${error.message}`) this.exitCode = 1 } } } ================================================ FILE: admin/commands/benchmark/run.ts ================================================ import { BaseCommand, flags } from '@adonisjs/core/ace' import type { CommandOptions } from '@adonisjs/core/types/ace' export default class BenchmarkRun extends BaseCommand { static commandName = 'benchmark:run' static description = 'Run system and/or AI benchmarks to measure server performance' @flags.boolean({ description: 'Run system benchmarks only (CPU, memory, disk)', alias: 's' }) declare systemOnly: boolean @flags.boolean({ description: 'Run AI benchmark only', alias: 'a' }) declare aiOnly: boolean @flags.boolean({ description: 'Submit results to repository after completion', alias: 'S' }) declare submit: boolean static options: CommandOptions = { startApp: true, } async run() { const { DockerService } = await import('#services/docker_service') const { BenchmarkService } = await import('#services/benchmark_service') const dockerService = new DockerService() const benchmarkService = new BenchmarkService(dockerService) // Determine benchmark type let benchmarkType: 'full' | 'system' | 'ai' = 'full' if (this.systemOnly) { benchmarkType = 'system' } else if (this.aiOnly) { benchmarkType = 'ai' } this.logger.info(`Starting ${benchmarkType} benchmark...`) this.logger.info('') try { // Run the benchmark let result switch (benchmarkType) { case 'system': this.logger.info('Running system benchmarks (CPU, memory, disk)...') result = await benchmarkService.runSystemBenchmarks() break case 'ai': this.logger.info('Running AI benchmark...') result = await benchmarkService.runAIBenchmark() break default: this.logger.info('Running full benchmark suite...') result = await benchmarkService.runFullBenchmark() } // Display results this.logger.info('') this.logger.success('Benchmark completed!') this.logger.info('') this.logger.info('=== Hardware Info ===') this.logger.info(`CPU: ${result.cpu_model}`) this.logger.info(`Cores: ${result.cpu_cores} physical, ${result.cpu_threads} threads`) this.logger.info(`RAM: ${Math.round(result.ram_bytes / (1024 * 1024 * 1024))} GB`) this.logger.info(`Disk Type: ${result.disk_type}`) if (result.gpu_model) { this.logger.info(`GPU: ${result.gpu_model}`) } this.logger.info('') this.logger.info('=== Benchmark Scores ===') this.logger.info(`CPU Score: ${result.cpu_score.toFixed(2)}`) this.logger.info(`Memory Score: ${result.memory_score.toFixed(2)}`) this.logger.info(`Disk Read Score: ${result.disk_read_score.toFixed(2)}`) this.logger.info(`Disk Write Score: ${result.disk_write_score.toFixed(2)}`) if (result.ai_tokens_per_second) { this.logger.info(`AI Tokens/sec: ${result.ai_tokens_per_second.toFixed(2)}`) this.logger.info(`AI Time to First Token: ${result.ai_time_to_first_token?.toFixed(2)} ms`) this.logger.info(`AI Model: ${result.ai_model_used}`) } this.logger.info('') this.logger.info(`NOMAD Score: ${result.nomad_score.toFixed(2)} / 100`) this.logger.info('') this.logger.info(`Benchmark ID: ${result.benchmark_id}`) // Submit if requested if (this.submit) { this.logger.info('') this.logger.info('Submitting results to repository...') try { const submitResult = await benchmarkService.submitToRepository(result.benchmark_id) this.logger.success(`Results submitted! Repository ID: ${submitResult.repository_id}`) this.logger.info(`Your percentile: ${submitResult.percentile}%`) } catch (error) { this.logger.error(`Failed to submit: ${error.message}`) } } } catch (error) { this.logger.error(`Benchmark failed: ${error.message}`) this.exitCode = 1 } } } ================================================ FILE: admin/commands/benchmark/submit.ts ================================================ import { BaseCommand, flags } from '@adonisjs/core/ace' import type { CommandOptions } from '@adonisjs/core/types/ace' export default class BenchmarkSubmit extends BaseCommand { static commandName = 'benchmark:submit' static description = 'Submit benchmark results to the community repository' @flags.string({ description: 'Benchmark ID to submit (defaults to latest)', alias: 'i' }) declare benchmarkId: string @flags.boolean({ description: 'Skip confirmation prompt', alias: 'y' }) declare yes: boolean static options: CommandOptions = { startApp: true, } async run() { const { DockerService } = await import('#services/docker_service') const { BenchmarkService } = await import('#services/benchmark_service') const dockerService = new DockerService() const benchmarkService = new BenchmarkService(dockerService) try { // Get the result to submit const result = this.benchmarkId ? await benchmarkService.getResultById(this.benchmarkId) : await benchmarkService.getLatestResult() if (!result) { this.logger.error('No benchmark result found.') this.logger.info('Run "node ace benchmark:run" first to create a benchmark.') this.exitCode = 1 return } if (result.submitted_to_repository) { this.logger.warning(`Benchmark ${result.benchmark_id} has already been submitted.`) this.logger.info(`Repository ID: ${result.repository_id}`) return } // Show what will be submitted this.logger.info('') this.logger.info('=== Data to be submitted ===') this.logger.info('') this.logger.info('Hardware Information:') this.logger.info(` CPU Model: ${result.cpu_model}`) this.logger.info(` CPU Cores: ${result.cpu_cores}`) this.logger.info(` CPU Threads: ${result.cpu_threads}`) this.logger.info(` RAM: ${Math.round(result.ram_bytes / (1024 * 1024 * 1024))} GB`) this.logger.info(` Disk Type: ${result.disk_type}`) if (result.gpu_model) { this.logger.info(` GPU: ${result.gpu_model}`) } this.logger.info('') this.logger.info('Benchmark Scores:') this.logger.info(` CPU Score: ${result.cpu_score.toFixed(2)}`) this.logger.info(` Memory Score: ${result.memory_score.toFixed(2)}`) this.logger.info(` Disk Read: ${result.disk_read_score.toFixed(2)}`) this.logger.info(` Disk Write: ${result.disk_write_score.toFixed(2)}`) if (result.ai_tokens_per_second) { this.logger.info(` AI Tokens/sec: ${result.ai_tokens_per_second.toFixed(2)}`) this.logger.info(` AI TTFT: ${result.ai_time_to_first_token?.toFixed(2)} ms`) } this.logger.info(` NOMAD Score: ${result.nomad_score.toFixed(2)}`) this.logger.info('') this.logger.info('Privacy Notice:') this.logger.info(' - Only the information shown above will be submitted') this.logger.info(' - No IP addresses, hostnames, or personal data is collected') this.logger.info(' - Submissions are completely anonymous') this.logger.info('') // Confirm submission if (!this.yes) { const confirm = await this.prompt.confirm( 'Do you want to submit this benchmark to the community repository?' ) if (!confirm) { this.logger.info('Submission cancelled.') return } } // Submit this.logger.info('Submitting benchmark...') const submitResult = await benchmarkService.submitToRepository(result.benchmark_id) this.logger.success('Benchmark submitted successfully!') this.logger.info('') this.logger.info(`Repository ID: ${submitResult.repository_id}`) this.logger.info(`Your percentile: ${submitResult.percentile}%`) this.logger.info('') this.logger.info('Thank you for contributing to the NOMAD community!') } catch (error) { this.logger.error(`Submission failed: ${error.message}`) this.exitCode = 1 } } } ================================================ FILE: admin/commands/queue/work.ts ================================================ import { BaseCommand, flags } from '@adonisjs/core/ace' import type { CommandOptions } from '@adonisjs/core/types/ace' import { Worker } from 'bullmq' import queueConfig from '#config/queue' import { RunDownloadJob } from '#jobs/run_download_job' import { DownloadModelJob } from '#jobs/download_model_job' import { RunBenchmarkJob } from '#jobs/run_benchmark_job' import { EmbedFileJob } from '#jobs/embed_file_job' import { CheckUpdateJob } from '#jobs/check_update_job' import { CheckServiceUpdatesJob } from '#jobs/check_service_updates_job' export default class QueueWork extends BaseCommand { static commandName = 'queue:work' static description = 'Start processing jobs from the queue' @flags.string({ description: 'Queue name to process' }) declare queue: string @flags.boolean({ description: 'Process all queues automatically' }) declare all: boolean static options: CommandOptions = { startApp: true, staysAlive: true, } async run() { // Validate that either --queue or --all is provided if (!this.queue && !this.all) { this.logger.error('You must specify either --queue= or --all') process.exit(1) } if (this.queue && this.all) { this.logger.error('Cannot specify both --queue and --all flags') process.exit(1) } const [jobHandlers, allQueues] = await this.loadJobHandlers() // Determine which queues to process const queuesToProcess = this.all ? Array.from(allQueues.values()) : [this.queue] this.logger.info(`Starting workers for queues: ${queuesToProcess.join(', ')}`) const workers: Worker[] = [] // Create a worker for each queue for (const queueName of queuesToProcess) { const worker = new Worker( queueName, async (job) => { this.logger.info(`[${queueName}] Processing job: ${job.id} of type: ${job.name}`) const jobHandler = jobHandlers.get(job.name) if (!jobHandler) { throw new Error(`No handler found for job: ${job.name}`) } return await jobHandler.handle(job) }, { connection: queueConfig.connection, concurrency: this.getConcurrencyForQueue(queueName), autorun: true, } ) worker.on('failed', async (job, err) => { this.logger.error(`[${queueName}] Job failed: ${job?.id}, Error: ${err.message}`) // If this was a Wikipedia download, mark it as failed in the DB if (job?.data?.filetype === 'zim' && job?.data?.url?.includes('wikipedia_en_')) { try { const { DockerService } = await import('#services/docker_service') const { ZimService } = await import('#services/zim_service') const dockerService = new DockerService() const zimService = new ZimService(dockerService) await zimService.onWikipediaDownloadComplete(job.data.url, false) } catch (e: any) { this.logger.error( `[${queueName}] Failed to update Wikipedia status: ${e.message}` ) } } }) worker.on('completed', (job) => { this.logger.info(`[${queueName}] Job completed: ${job.id}`) }) workers.push(worker) this.logger.info(`Worker started for queue: ${queueName}`) } // Schedule nightly update checks (idempotent, will persist over restarts) await CheckUpdateJob.scheduleNightly() await CheckServiceUpdatesJob.scheduleNightly() // Graceful shutdown for all workers process.on('SIGTERM', async () => { this.logger.info('SIGTERM received. Shutting down workers...') await Promise.all(workers.map((worker) => worker.close())) this.logger.info('All workers shut down gracefully.') process.exit(0) }) } private async loadJobHandlers(): Promise<[Map, Map]> { const handlers = new Map() const queues = new Map() handlers.set(RunDownloadJob.key, new RunDownloadJob()) handlers.set(DownloadModelJob.key, new DownloadModelJob()) handlers.set(RunBenchmarkJob.key, new RunBenchmarkJob()) handlers.set(EmbedFileJob.key, new EmbedFileJob()) handlers.set(CheckUpdateJob.key, new CheckUpdateJob()) handlers.set(CheckServiceUpdatesJob.key, new CheckServiceUpdatesJob()) queues.set(RunDownloadJob.key, RunDownloadJob.queue) queues.set(DownloadModelJob.key, DownloadModelJob.queue) queues.set(RunBenchmarkJob.key, RunBenchmarkJob.queue) queues.set(EmbedFileJob.key, EmbedFileJob.queue) queues.set(CheckUpdateJob.key, CheckUpdateJob.queue) queues.set(CheckServiceUpdatesJob.key, CheckServiceUpdatesJob.queue) return [handlers, queues] } /** * Get concurrency setting for a specific queue * Can be customized per queue based on workload characteristics */ private getConcurrencyForQueue(queueName: string): number { const concurrencyMap: Record = { [RunDownloadJob.queue]: 3, [DownloadModelJob.queue]: 2, // Lower concurrency for resource-intensive model downloads [RunBenchmarkJob.queue]: 1, // Run benchmarks one at a time for accurate results [EmbedFileJob.queue]: 2, // Lower concurrency for embedding jobs, can be resource intensive [CheckUpdateJob.queue]: 1, // No need to run more than one update check at a time default: 3, } return concurrencyMap[queueName] || concurrencyMap.default } } ================================================ FILE: admin/config/app.ts ================================================ import env from '#start/env' import app from '@adonisjs/core/services/app' import { Secret } from '@adonisjs/core/helpers' import { defineConfig } from '@adonisjs/core/http' /** * The app key is used for encrypting cookies, generating signed URLs, * and by the "encryption" module. * * The encryption module will fail to decrypt data if the key is lost or * changed. Therefore it is recommended to keep the app key secure. */ export const appKey = new Secret(env.get('APP_KEY')) /** * The configuration settings used by the HTTP server */ export const http = defineConfig({ generateRequestId: true, allowMethodSpoofing: false, /** * Enabling async local storage will let you access HTTP context * from anywhere inside your application. */ useAsyncLocalStorage: false, /** * Manage cookies configuration. The settings for the session id cookie are * defined inside the "config/session.ts" file. */ cookie: { domain: '', path: '/', maxAge: '2h', httpOnly: true, secure: app.inProduction, sameSite: 'lax', }, }) ================================================ FILE: admin/config/bodyparser.ts ================================================ import { defineConfig } from '@adonisjs/core/bodyparser' const bodyParserConfig = defineConfig({ /** * The bodyparser middleware will parse the request body * for the following HTTP methods. */ allowedMethods: ['POST', 'PUT', 'PATCH', 'DELETE'], /** * Config for the "application/x-www-form-urlencoded" * content-type parser */ form: { convertEmptyStringsToNull: true, types: ['application/x-www-form-urlencoded'], }, /** * Config for the JSON parser */ json: { convertEmptyStringsToNull: true, types: [ 'application/json', 'application/json-patch+json', 'application/vnd.api+json', 'application/csp-report', ], }, /** * Config for the "multipart/form-data" content-type parser. * File uploads are handled by the multipart parser. */ multipart: { /** * Enabling auto process allows bodyparser middleware to * move all uploaded files inside the tmp folder of your * operating system */ autoProcess: true, convertEmptyStringsToNull: true, processManually: [], /** * Maximum limit of data to parse including all files * and fields */ limit: '20mb', types: ['multipart/form-data'], }, }) export default bodyParserConfig ================================================ FILE: admin/config/cors.ts ================================================ import { defineConfig } from '@adonisjs/cors' /** * Configuration options to tweak the CORS policy. The following * options are documented on the official documentation website. * * https://docs.adonisjs.com/guides/security/cors */ const corsConfig = defineConfig({ enabled: true, origin: ['*'], methods: ['GET', 'HEAD', 'POST', 'PUT', 'DELETE'], headers: true, exposeHeaders: [], credentials: true, maxAge: 90, }) export default corsConfig ================================================ FILE: admin/config/database.ts ================================================ import env from '#start/env' import { defineConfig } from '@adonisjs/lucid' const dbConfig = defineConfig({ connection: 'mysql', connections: { mysql: { client: 'mysql2', debug: env.get('NODE_ENV') === 'development', connection: { host: env.get('DB_HOST'), port: env.get('DB_PORT') ?? 3306, // Default MySQL port user: env.get('DB_USER'), password: env.get('DB_PASSWORD'), database: env.get('DB_DATABASE'), ssl: env.get('DB_SSL') ?? true, // Default to true }, migrations: { naturalSort: true, paths: ['database/migrations'], }, }, }, }) export default dbConfig ================================================ FILE: admin/config/hash.ts ================================================ import { defineConfig, drivers } from '@adonisjs/core/hash' const hashConfig = defineConfig({ default: 'scrypt', list: { scrypt: drivers.scrypt({ cost: 16384, blockSize: 8, parallelization: 1, maxMemory: 33554432, }), }, }) export default hashConfig /** * Inferring types for the list of hashers you have configured * in your application. */ declare module '@adonisjs/core/types' { export interface HashersList extends InferHashers {} } ================================================ FILE: admin/config/inertia.ts ================================================ import KVStore from '#models/kv_store' import { SystemService } from '#services/system_service' import { defineConfig } from '@adonisjs/inertia' import type { InferSharedProps } from '@adonisjs/inertia/types' const inertiaConfig = defineConfig({ /** * Path to the Edge view that will be used as the root view for Inertia responses */ rootView: 'inertia_layout', /** * Data that should be shared with all rendered pages */ sharedData: { appVersion: () => SystemService.getAppVersion(), environment: process.env.NODE_ENV || 'production', aiAssistantName: async () => { const customName = await KVStore.getValue('ai.assistantCustomName') return (customName && customName.trim()) ? customName : 'AI Assistant' }, }, /** * Options for the server-side rendering */ ssr: { enabled: false, entrypoint: 'inertia/app/ssr.tsx' } }) export default inertiaConfig declare module '@adonisjs/inertia/types' { export interface SharedProps extends InferSharedProps {} } ================================================ FILE: admin/config/logger.ts ================================================ import env from '#start/env' import app from '@adonisjs/core/services/app' import { defineConfig, targets } from '@adonisjs/core/logger' const loggerConfig = defineConfig({ default: 'app', /** * The loggers object can be used to define multiple loggers. * By default, we configure only one logger (named "app"). */ loggers: { app: { enabled: true, name: env.get('APP_NAME'), level: env.get('NODE_ENV') === 'production' ? env.get('LOG_LEVEL') : 'debug', // default to 'debug' in non-production envs transport: { targets: targets() .pushIf(!app.inProduction, targets.pretty()) .pushIf(app.inProduction, targets.file({ destination: "/app/storage/logs/admin.log", mkdir: true })) .toArray(), }, }, }, }) export default loggerConfig /** * Inferring types for the list of loggers you have configured * in your application. */ declare module '@adonisjs/core/types' { export interface LoggersList extends InferLoggers { } } ================================================ FILE: admin/config/queue.ts ================================================ import env from '#start/env' const queueConfig = { connection: { host: env.get('REDIS_HOST'), port: env.get('REDIS_PORT') ?? 6379, }, } export default queueConfig ================================================ FILE: admin/config/session.ts ================================================ // import env from '#start/env' // import app from '@adonisjs/core/services/app' // import { defineConfig, stores } from '@adonisjs/session' // const sessionConfig = defineConfig({ // enabled: false, // cookieName: 'adonis-session', // /** // * When set to true, the session id cookie will be deleted // * once the user closes the browser. // */ // clearWithBrowser: false, // /** // * Define how long to keep the session data alive without // * any activity. // */ // age: '2h', // /** // * Configuration for session cookie and the // * cookie store // */ // cookie: { // path: '/', // httpOnly: true, // secure: app.inProduction, // sameSite: 'lax', // }, // /** // * The store to use. Make sure to validate the environment // * variable in order to infer the store name without any // * errors. // */ // store: env.get('SESSION_DRIVER'), // /** // * List of configured stores. Refer documentation to see // * list of available stores and their config. // */ // stores: { // cookie: stores.cookie(), // }, // }) // export default sessionConfig ================================================ FILE: admin/config/shield.ts ================================================ import { defineConfig } from '@adonisjs/shield' const shieldConfig = defineConfig({ /** * Configure CSP policies for your app. Refer documentation * to learn more */ csp: { enabled: false, directives: {}, reportOnly: false, }, /** * Configure CSRF protection options. Refer documentation * to learn more */ csrf: { enabled: false, // TODO: Enable CSRF protection exceptRoutes: [], enableXsrfCookie: true, methods: ['POST', 'PUT', 'PATCH', 'DELETE'], }, /** * Control how your website should be embedded inside * iFrames */ xFrame: { enabled: true, action: 'DENY', }, /** * Force browser to always use HTTPS */ hsts: { enabled: false, // TODO: Enable HSTS in production maxAge: '180 days', }, /** * Disable browsers from sniffing the content type of a * response and always rely on the "content-type" header. */ contentTypeSniffing: { enabled: true, }, }) export default shieldConfig ================================================ FILE: admin/config/static.ts ================================================ import { defineConfig } from '@adonisjs/static' /** * Configuration options to tweak the static files middleware. * The complete set of options are documented on the * official documentation website. * * https://docs.adonisjs.com/guides/static-assets */ const staticServerConfig = defineConfig({ enabled: true, etag: true, lastModified: true, dotFiles: 'ignore', acceptRanges: true, }) export default staticServerConfig ================================================ FILE: admin/config/transmit.ts ================================================ import env from '#start/env' import { defineConfig } from '@adonisjs/transmit' import { redis } from '@adonisjs/transmit/transports' export default defineConfig({ pingInterval: '30s', transport: { driver: redis({ host: env.get('REDIS_HOST'), port: env.get('REDIS_PORT'), keyPrefix: 'transmit:', }) } }) ================================================ FILE: admin/config/vite.ts ================================================ import { defineConfig } from '@adonisjs/vite' const viteBackendConfig = defineConfig({ /** * The output of vite will be written inside this * directory. The path should be relative from * the application root. */ buildDirectory: 'public/assets', /** * The path to the manifest file generated by the * "vite build" command. */ manifestFile: 'public/assets/.vite/manifest.json', /** * Feel free to change the value of the "assetsUrl" to * point to a CDN in production. */ assetsUrl: '/assets', scriptAttributes: { defer: true, }, }) export default viteBackendConfig ================================================ FILE: admin/constants/broadcast.ts ================================================ export const BROADCAST_CHANNELS = { BENCHMARK_PROGRESS: 'benchmark-progress', OLLAMA_MODEL_DOWNLOAD: 'ollama-model-download', SERVICE_INSTALLATION: 'service-installation', SERVICE_UPDATES: 'service-updates', } ================================================ FILE: admin/constants/kv_store.ts ================================================ import { KVStoreKey } from "../types/kv_store.js"; export const SETTINGS_KEYS: KVStoreKey[] = ['chat.suggestionsEnabled', 'chat.lastModel', 'ui.hasVisitedEasySetup', 'ui.theme', 'system.earlyAccess', 'ai.assistantCustomName']; ================================================ FILE: admin/constants/misc.ts ================================================ export const NOMAD_API_DEFAULT_BASE_URL = 'https://api.projectnomad.us' ================================================ FILE: admin/constants/ollama.ts ================================================ import { NomadOllamaModel } from '../types/ollama.js' /** * Fallback basic recommended Ollama models in case fetching from the service fails. */ export const FALLBACK_RECOMMENDED_OLLAMA_MODELS: NomadOllamaModel[] = [ { name: 'llama3.1', description: 'Llama 3.1 is a new state-of-the-art model from Meta available in 8B, 70B and 405B parameter sizes.', estimated_pulls: '109.3M', id: '9fe9c575-e77e-4a51-a743-07359458ee71', first_seen: '2026-01-28T23:37:31.000+00:00', model_last_updated: '1 year ago', tags: [ { name: 'llama3.1:8b-text-q4_1', size: '5.1 GB', context: '128k', input: 'Text', cloud: false, thinking: false }, ], }, { name: 'deepseek-r1', description: 'DeepSeek-R1 is a family of open reasoning models with performance approaching that of leading models, such as O3 and Gemini 2.5 Pro.', estimated_pulls: '77.2M', id: '0b566560-68a6-4964-b0d4-beb3ab1ad694', first_seen: '2026-01-28T23:37:31.000+00:00', model_last_updated: '7 months ago', tags: [ { name: 'deepseek-r1:1.5b', size: '1.1 GB', context: '128k', input: 'Text', cloud: false, thinking: true }, ], }, { name: 'llama3.2', description: "Meta's Llama 3.2 goes small with 1B and 3B models.", estimated_pulls: '54.7M', id: 'c9a1bc23-b290-4501-a913-f7c9bb39c3ad', first_seen: '2026-01-28T23:37:31.000+00:00', model_last_updated: '1 year ago', tags: [ { name: 'llama3.2:1b-text-q2_K', size: '581 MB', context: '128k', input: 'Text', cloud: false, thinking: false }, ], }, ] export const DEFAULT_QUERY_REWRITE_MODEL = 'qwen2.5:3b' // default to qwen2.5 for query rewriting with good balance of text task performance and resource usage /** * Adaptive RAG context limits based on model size. * Smaller models get overwhelmed with too much context, so we cap it. */ export const RAG_CONTEXT_LIMITS: { maxParams: number; maxResults: number; maxTokens: number }[] = [ { maxParams: 3, maxResults: 2, maxTokens: 1000 }, // 1-3B models { maxParams: 8, maxResults: 4, maxTokens: 2500 }, // 4-8B models { maxParams: Infinity, maxResults: 5, maxTokens: 0 }, // 13B+ (no cap) ] export const SYSTEM_PROMPTS = { default: ` Format all responses using markdown for better readability. Vanilla markdown or GitHub-flavored markdown is preferred. - Use **bold** and *italic* for emphasis. - Use code blocks with language identifiers for code snippets. - Use headers (##, ###) to organize longer responses. - Use bullet points or numbered lists for clarity. - Use tables when presenting structured data. `, rag_context: (context: string) => ` You have access to relevant information from the knowledge base. This context has been retrieved based on semantic similarity to the user's question. [Knowledge Base Context] ${context} IMPORTANT INSTRUCTIONS: 1. If the user's question is directly related to the context above, use this information to provide accurate, detailed answers. 2. Always cite or reference the context when using it (e.g., "According to the information available..." or "Based on the knowledge base..."). 3. If the context is only partially relevant, combine it with your general knowledge but be clear about what comes from the knowledge base. 4. If the context is not relevant to the user's question, you can respond using your general knowledge without forcing the context into your answer. Do not mention the context if it's not relevant. 5. Never fabricate information that isn't in the context or your training data. 6. If you're unsure or you don't have enough information to answer the user's question, acknowledge the limitations. Format your response using markdown for readability. `, chat_suggestions: ` You are a helpful assistant that generates conversation starter suggestions for a survivalist/prepper using an AI assistant. Provide exactly 3 conversation starter topics as direct questions that someone would ask. These should be clear, complete questions that can start meaningful conversations. Examples of good suggestions: - "How do I purify water in an emergency?" - "What are the best foods for long-term storage?" - "Help me create a 72-hour emergency kit" Do NOT use: - Follow-up questions seeking clarification - Vague or incomplete suggestions - Questions that assume prior context - Statements that are not suggestions themselves, such as praise for asking the question - Direct questions or commands to the user Return ONLY the 3 suggestions as a comma-separated list with no additional text, formatting, numbering, or quotation marks. The suggestions should be in title case. Ensure that your suggestions are comma-seperated with no conjunctions like "and" or "or". Do not use line breaks, new lines, or extra spacing to separate the suggestions. Format: suggestion1, suggestion2, suggestion3 `, title_generation: `You are a title generator. Given the start of a conversation, generate a concise, descriptive title under 50 characters. Return ONLY the title text with no quotes, punctuation wrapping, or extra formatting.`, query_rewrite: ` You are a query rewriting assistant. Your task is to reformulate the user's latest question to include relevant context from the conversation history. Given the conversation history, rewrite the user's latest question to be a standalone, context-aware search query that will retrieve the most relevant information. Rules: 1. Keep the rewritten query concise (under 150 words) 2. Include key entities, topics, and context from previous messages 3. Make it a clear, searchable query 4. Do NOT answer the question - only rewrite the user's query to be more effective for retrieval 5. Output ONLY the rewritten query, nothing else Examples: Conversation: User: "How do I install Gentoo?" Assistant: [detailed installation guide] User: "Is an internet connection required to install?" Rewritten Query: "Is an internet connection required to install Gentoo Linux?" --- Conversation: User: "What's the best way to preserve meat?" Assistant: [preservation methods] User: "How long does it last?" Rewritten Query: "How long does preserved meat last using curing or smoking methods?" `, } ================================================ FILE: admin/constants/service_names.ts ================================================ export const SERVICE_NAMES = { KIWIX: 'nomad_kiwix_server', OLLAMA: 'nomad_ollama', QDRANT: 'nomad_qdrant', CYBERCHEF: 'nomad_cyberchef', FLATNOTES: 'nomad_flatnotes', KOLIBRI: 'nomad_kolibri', } ================================================ FILE: admin/constants/zim_extraction.ts ================================================ export const HTML_SELECTORS_TO_REMOVE = [ 'script', 'style', 'nav', 'header', 'footer', 'noscript', 'iframe', 'svg', '.navbox', '.sidebar', '.infobox', '.mw-editsection', '.reference', '.reflist', '.toc', '.noprint', '.mw-jump-link', '.mw-headline-anchor', '[role="navigation"]', '.navbar', '.hatnote', '.ambox', '.sistersitebox', '.portal', '#coordinates', '.geo-nondefault', '.authority-control', ] // Common heading names that usually don't have meaningful content under them export const NON_CONTENT_HEADING_PATTERNS = [ /^see also$/i, /^references$/i, /^external links$/i, /^further reading$/i, /^notes$/i, /^bibliography$/i, /^navigation$/i, ] /** * Batch size for processing ZIM articles to prevent lock timeout errors. * Processing 50 articles at a time balances throughput with job duration. * Typical processing time: 2-5 minutes per batch depending on article complexity. */ export const ZIM_BATCH_SIZE = 50 ================================================ FILE: admin/database/migrations/1751086751801_create_services_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'services' async up() { this.schema.createTable(this.tableName, (table) => { table.increments('id') table.string('service_name').unique().notNullable() table.string('container_image').notNullable() table.string('container_command').nullable() table.json('container_config').nullable() table.boolean('installed').defaultTo(false) table.string('depends_on').nullable().references('service_name').inTable(this.tableName).onDelete('SET NULL') table.boolean('is_dependency_service').defaultTo(false) table.string('ui_location') table.json('metadata').nullable() table.timestamp('created_at') table.timestamp('updated_at') }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1763499145832_update_services_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'services' async up() { this.schema.alterTable(this.tableName, (table) => { table.string('friendly_name').nullable() table.string('description').nullable() }) } async down() { this.schema.alterTable(this.tableName, (table) => { table.dropColumn('friendly_name') table.dropColumn('description') }) } } ================================================ FILE: admin/database/migrations/1764912210741_create_curated_collections_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'curated_collections' async up() { this.schema.createTable(this.tableName, (table) => { table.string('slug').primary() table.enum('type', ['zim', 'map']).notNullable() table.string('name').notNullable() table.text('description').notNullable() table.string('icon').notNullable() table.string('language').notNullable() table.timestamp('created_at') table.timestamp('updated_at') }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1764912270123_create_curated_collection_resources_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'curated_collection_resources' async up() { this.schema.createTable(this.tableName, (table) => { table.increments('id') table.string('curated_collection_slug').notNullable().references('slug').inTable('curated_collections').onDelete('CASCADE') table.string('title').notNullable() table.string('url').notNullable() table.text('description').notNullable() table.integer('size_mb').notNullable() table.boolean('downloaded').notNullable().defaultTo(false) table.timestamp('created_at') table.timestamp('updated_at') }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1768170944482_update_services_add_installation_statuses_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'services' async up() { this.schema.alterTable(this.tableName, (table) => { table.string('installation_status').defaultTo('idle').notNullable() }) } async down() { this.schema.alterTable(this.tableName, (table) => { table.dropColumn('installation_status') }) } } ================================================ FILE: admin/database/migrations/1768453747522_update_services_add_icon.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'services' async up() { this.schema.alterTable(this.tableName, (table) => { table.string('icon').nullable() }) } async down() { this.schema.alterTable(this.tableName, (table) => { table.dropColumn('icon') }) } } ================================================ FILE: admin/database/migrations/1769097600001_create_benchmark_results_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'benchmark_results' async up() { this.schema.createTable(this.tableName, (table) => { table.increments('id') table.string('benchmark_id').unique().notNullable() table.enum('benchmark_type', ['full', 'system', 'ai']).notNullable() // Hardware information table.string('cpu_model').notNullable() table.integer('cpu_cores').notNullable() table.integer('cpu_threads').notNullable() table.bigInteger('ram_bytes').notNullable() table.enum('disk_type', ['ssd', 'hdd', 'nvme', 'unknown']).notNullable() table.string('gpu_model').nullable() // System benchmark scores table.float('cpu_score').notNullable() table.float('memory_score').notNullable() table.float('disk_read_score').notNullable() table.float('disk_write_score').notNullable() // AI benchmark scores (nullable for system-only benchmarks) table.float('ai_tokens_per_second').nullable() table.string('ai_model_used').nullable() table.float('ai_time_to_first_token').nullable() // Composite NOMAD score (0-100) table.float('nomad_score').notNullable() // Repository submission tracking table.boolean('submitted_to_repository').defaultTo(false) table.timestamp('submitted_at').nullable() table.string('repository_id').nullable() table.timestamp('created_at') table.timestamp('updated_at') }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1769097600002_create_benchmark_settings_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'benchmark_settings' async up() { this.schema.createTable(this.tableName, (table) => { table.increments('id') table.string('key').unique().notNullable() table.text('value').nullable() table.timestamp('created_at') table.timestamp('updated_at') }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1769300000001_add_powered_by_and_display_order_to_services.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'services' async up() { this.schema.alterTable(this.tableName, (table) => { table.string('powered_by').nullable() table.integer('display_order').nullable().defaultTo(100) }) } async down() { this.schema.alterTable(this.tableName, (table) => { table.dropColumn('powered_by') table.dropColumn('display_order') }) } } ================================================ FILE: admin/database/migrations/1769300000002_update_services_friendly_names.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'services' async up() { // Update existing services with new friendly names and powered_by values await this.db.rawQuery(` UPDATE services SET friendly_name = 'Information Library', powered_by = 'Kiwix', display_order = 1, description = 'Offline access to Wikipedia, medical references, how-to guides, and encyclopedias' WHERE service_name = 'nomad_kiwix_serve' `) await this.db.rawQuery(` UPDATE services SET friendly_name = 'Education Platform', powered_by = 'Kolibri', display_order = 2, description = 'Interactive learning platform with video courses and exercises' WHERE service_name = 'nomad_kolibri' `) await this.db.rawQuery(` UPDATE services SET friendly_name = 'AI Assistant', powered_by = 'Ollama', ui_location = '/chat', display_order = 3, description = 'Local AI chat that runs entirely on your hardware - no internet required' WHERE service_name = 'nomad_ollama' `) await this.db.rawQuery(` UPDATE services SET friendly_name = 'Notes', powered_by = 'FlatNotes', display_order = 10, description = 'Simple note-taking app with local storage' WHERE service_name = 'nomad_flatnotes' `) await this.db.rawQuery(` UPDATE services SET friendly_name = 'Data Tools', powered_by = 'CyberChef', display_order = 11, description = 'Swiss Army knife for data encoding, encryption, and analysis' WHERE service_name = 'nomad_cyberchef' `) } async down() { // Revert to original names await this.db.rawQuery(` UPDATE services SET friendly_name = 'Kiwix', powered_by = NULL, display_order = NULL, description = 'Offline Wikipedia, eBooks, and more' WHERE service_name = 'nomad_kiwix_serve' `) await this.db.rawQuery(` UPDATE services SET friendly_name = 'Kolibri', powered_by = NULL, display_order = NULL, description = 'An offline-first education platform for schools and learners' WHERE service_name = 'nomad_kolibri' `) await this.db.rawQuery(` UPDATE services SET friendly_name = 'Ollama', powered_by = NULL, display_order = NULL, description = 'Local AI chat that runs entirely on your hardware - no internet required' WHERE service_name = 'nomad_ollama' `) await this.db.rawQuery(` UPDATE services SET friendly_name = 'FlatNotes', powered_by = NULL, display_order = NULL, description = 'A simple note-taking app that stores all files locally' WHERE service_name = 'nomad_flatnotes' `) await this.db.rawQuery(` UPDATE services SET friendly_name = 'CyberChef', powered_by = NULL, display_order = NULL, description = 'The Cyber Swiss Army Knife - a web app for encryption, encoding, and data analysis' WHERE service_name = 'nomad_cyberchef' `) } } ================================================ FILE: admin/database/migrations/1769324448000_add_builder_tag_to_benchmark_results.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'benchmark_results' async up() { this.schema.alterTable(this.tableName, (table) => { table.string('builder_tag', 64).nullable() }) } async down() { this.schema.alterTable(this.tableName, (table) => { table.dropColumn('builder_tag') }) } } ================================================ FILE: admin/database/migrations/1769400000001_create_installed_tiers_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'installed_tiers' async up() { this.schema.createTable(this.tableName, (table) => { table.increments('id').primary() table.string('category_slug').notNullable().unique() table.string('tier_slug').notNullable() table.timestamp('created_at') table.timestamp('updated_at') }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1769400000002_create_kv_store_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'kv_store' async up() { this.schema.createTable(this.tableName, (table) => { table.increments('id') table.string('key').unique().notNullable() table.text('value').nullable() table.timestamp('created_at') table.timestamp('updated_at') }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1769500000001_create_wikipedia_selection_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'wikipedia_selections' async up() { this.schema.createTable(this.tableName, (table) => { table.increments('id').primary() table.string('option_id').notNullable() table.string('url').nullable() table.string('filename').nullable() table.enum('status', ['none', 'downloading', 'installed', 'failed']).defaultTo('none') table.timestamp('created_at') table.timestamp('updated_at') }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1769646771604_create_create_chat_sessions_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'chat_sessions' async up() { this.schema.createTable(this.tableName, (table) => { table.increments('id') table.string('title').notNullable() table.string('model').nullable() table.timestamp('created_at') table.timestamp('updated_at') }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1769646798266_create_create_chat_messages_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'chat_messages' async up() { this.schema.createTable(this.tableName, (table) => { table.increments('id') table.integer('session_id').unsigned().references('id').inTable('chat_sessions').onDelete('CASCADE') table.enum('role', ['system', 'user', 'assistant']).notNullable() table.text('content').notNullable() table.timestamp('created_at') table.timestamp('updated_at') }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1769700000001_create_zim_file_metadata_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'zim_file_metadata' async up() { this.schema.createTable(this.tableName, (table) => { table.increments('id').primary() table.string('filename').notNullable().unique() table.string('title').notNullable() table.text('summary').nullable() table.string('author').nullable() table.bigInteger('size_bytes').nullable() table.timestamp('created_at') table.timestamp('updated_at') }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1770269324176_add_unique_constraint_to_curated_collection_resources_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'curated_collection_resources' async up() { this.schema.alterTable(this.tableName, (table) => { table.unique(['curated_collection_slug', 'url'], { indexName: 'curated_collection_resources_unique', }) }) } async down() { this.schema.alterTable(this.tableName, (table) => { table.dropUnique(['curated_collection_slug', 'url'], 'curated_collection_resources_unique') }) } } ================================================ FILE: admin/database/migrations/1770273423670_drop_installed_tiers_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'installed_tiers' async up() { this.schema.dropTableIfExists(this.tableName) } async down() { // Recreate the table if we need to rollback this.schema.createTable(this.tableName, (table) => { table.increments('id') table.string('category_slug').notNullable().unique() table.string('tier_slug').notNullable() table.timestamp('created_at', { useTz: true }) table.timestamp('updated_at', { useTz: true }) }) } } ================================================ FILE: admin/database/migrations/1770849108030_create_create_collection_manifests_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'collection_manifests' async up() { this.schema.createTable(this.tableName, (table) => { table.string('type').primary() // 'zim_categories' | 'maps' | 'wikipedia' table.string('spec_version').notNullable() table.json('spec_data').notNullable() table.timestamp('fetched_at').notNullable() }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1770849119787_create_create_installed_resources_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'installed_resources' async up() { this.schema.createTable(this.tableName, (table) => { table.increments('id').primary() table.string('resource_id').notNullable() table.enum('resource_type', ['zim', 'map']).notNullable() table.string('collection_ref').nullable() table.string('version').notNullable() table.string('url').notNullable() table.string('file_path').notNullable() table.bigInteger('file_size_bytes').nullable() table.timestamp('installed_at').notNullable() table.unique(['resource_id', 'resource_type']) }) } async down() { this.schema.dropTable(this.tableName) } } ================================================ FILE: admin/database/migrations/1770850092871_create_drop_legacy_curated_tables_table.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { async up() { this.schema.dropTableIfExists('curated_collection_resources') this.schema.dropTableIfExists('curated_collections') this.schema.dropTableIfExists('zim_file_metadata') } async down() { // These tables are legacy and intentionally not recreated } } ================================================ FILE: admin/database/migrations/1771000000001_add_update_fields_to_services.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'services' async up() { this.schema.alterTable(this.tableName, (table) => { table.string('source_repo', 255).nullable() table.string('available_update_version', 50).nullable() table.timestamp('update_checked_at').nullable() }) } async down() { this.schema.alterTable(this.tableName, (table) => { table.dropColumn('source_repo') table.dropColumn('available_update_version') table.dropColumn('update_checked_at') }) } } ================================================ FILE: admin/database/migrations/1771000000002_pin_latest_service_images.ts ================================================ import { BaseSchema } from '@adonisjs/lucid/schema' export default class extends BaseSchema { protected tableName = 'services' async up() { this.defer(async (db) => { // Pin :latest images to specific versions await db .from(this.tableName) .where('container_image', 'ghcr.io/gchq/cyberchef:latest') .update({ container_image: 'ghcr.io/gchq/cyberchef:10.19.4' }) await db .from(this.tableName) .where('container_image', 'dullage/flatnotes:latest') .update({ container_image: 'dullage/flatnotes:v5.5.4' }) await db .from(this.tableName) .where('container_image', 'treehouses/kolibri:latest') .update({ container_image: 'treehouses/kolibri:0.12.8' }) // Populate source_repo for services whose images lack the OCI source label const sourceRepos: Record = { nomad_kiwix_server: 'https://github.com/kiwix/kiwix-tools', nomad_ollama: 'https://github.com/ollama/ollama', nomad_qdrant: 'https://github.com/qdrant/qdrant', nomad_cyberchef: 'https://github.com/gchq/CyberChef', nomad_flatnotes: 'https://github.com/dullage/flatnotes', nomad_kolibri: 'https://github.com/learningequality/kolibri', } for (const [serviceName, repoUrl] of Object.entries(sourceRepos)) { await db .from(this.tableName) .where('service_name', serviceName) .update({ source_repo: repoUrl }) } }) } async down() { this.defer(async (db) => { await db .from(this.tableName) .where('container_image', 'ghcr.io/gchq/cyberchef:10.19.4') .update({ container_image: 'ghcr.io/gchq/cyberchef:latest' }) await db .from(this.tableName) .where('container_image', 'dullage/flatnotes:v5.5.4') .update({ container_image: 'dullage/flatnotes:latest' }) await db .from(this.tableName) .where('container_image', 'treehouses/kolibri:0.12.8') .update({ container_image: 'treehouses/kolibri:latest' }) }) } } ================================================ FILE: admin/database/seeders/service_seeder.ts ================================================ import Service from '#models/service' import { BaseSeeder } from '@adonisjs/lucid/seeders' import { ModelAttributes } from '@adonisjs/lucid/types/model' import env from '#start/env' import { SERVICE_NAMES } from '../../constants/service_names.js' export default class ServiceSeeder extends BaseSeeder { // Use environment variable with fallback to production default private static NOMAD_STORAGE_ABS_PATH = env.get( 'NOMAD_STORAGE_PATH', '/opt/project-nomad/storage' ) private static DEFAULT_SERVICES: Omit< ModelAttributes, 'created_at' | 'updated_at' | 'metadata' | 'id' | 'available_update_version' | 'update_checked_at' >[] = [ { service_name: SERVICE_NAMES.KIWIX, friendly_name: 'Information Library', powered_by: 'Kiwix', display_order: 1, description: 'Offline access to Wikipedia, medical references, how-to guides, and encyclopedias', icon: 'IconBooks', container_image: 'ghcr.io/kiwix/kiwix-serve:3.8.1', source_repo: 'https://github.com/kiwix/kiwix-tools', container_command: '*.zim --address=all', container_config: JSON.stringify({ HostConfig: { RestartPolicy: { Name: 'unless-stopped' }, Binds: [`${ServiceSeeder.NOMAD_STORAGE_ABS_PATH}/zim:/data`], PortBindings: { '8080/tcp': [{ HostPort: '8090' }] }, }, ExposedPorts: { '8080/tcp': {} }, }), ui_location: '8090', installed: false, installation_status: 'idle', is_dependency_service: false, depends_on: null, }, { service_name: SERVICE_NAMES.QDRANT, friendly_name: 'Qdrant Vector Database', powered_by: null, display_order: 100, // Dependency service, not shown directly description: 'Vector database for storing and searching embeddings', icon: 'IconRobot', container_image: 'qdrant/qdrant:v1.16', source_repo: 'https://github.com/qdrant/qdrant', container_command: null, container_config: JSON.stringify({ HostConfig: { RestartPolicy: { Name: 'unless-stopped' }, Binds: [`${ServiceSeeder.NOMAD_STORAGE_ABS_PATH}/qdrant:/qdrant/storage`], PortBindings: { '6333/tcp': [{ HostPort: '6333' }], '6334/tcp': [{ HostPort: '6334' }] }, }, ExposedPorts: { '6333/tcp': {}, '6334/tcp': {} }, }), ui_location: '6333', installed: false, installation_status: 'idle', is_dependency_service: true, depends_on: null, }, { service_name: SERVICE_NAMES.OLLAMA, friendly_name: 'AI Assistant', powered_by: 'Ollama', display_order: 3, description: 'Local AI chat that runs entirely on your hardware - no internet required', icon: 'IconWand', container_image: 'ollama/ollama:0.15.2', source_repo: 'https://github.com/ollama/ollama', container_command: 'serve', container_config: JSON.stringify({ HostConfig: { RestartPolicy: { Name: 'unless-stopped' }, Binds: [`${ServiceSeeder.NOMAD_STORAGE_ABS_PATH}/ollama:/root/.ollama`], PortBindings: { '11434/tcp': [{ HostPort: '11434' }] }, }, ExposedPorts: { '11434/tcp': {} }, }), ui_location: '/chat', installed: false, installation_status: 'idle', is_dependency_service: false, depends_on: SERVICE_NAMES.QDRANT, }, { service_name: SERVICE_NAMES.CYBERCHEF, friendly_name: 'Data Tools', powered_by: 'CyberChef', display_order: 11, description: 'Swiss Army knife for data encoding, encryption, and analysis', icon: 'IconChefHat', container_image: 'ghcr.io/gchq/cyberchef:10.19.4', source_repo: 'https://github.com/gchq/CyberChef', container_command: null, container_config: JSON.stringify({ HostConfig: { RestartPolicy: { Name: 'unless-stopped' }, PortBindings: { '80/tcp': [{ HostPort: '8100' }] }, }, ExposedPorts: { '80/tcp': {} }, }), ui_location: '8100', installed: false, installation_status: 'idle', is_dependency_service: false, depends_on: null, }, { service_name: SERVICE_NAMES.FLATNOTES, friendly_name: 'Notes', powered_by: 'FlatNotes', display_order: 10, description: 'Simple note-taking app with local storage', icon: 'IconNotes', container_image: 'dullage/flatnotes:v5.5.4', source_repo: 'https://github.com/dullage/flatnotes', container_command: null, container_config: JSON.stringify({ HostConfig: { RestartPolicy: { Name: 'unless-stopped' }, PortBindings: { '8080/tcp': [{ HostPort: '8200' }] }, Binds: [`${ServiceSeeder.NOMAD_STORAGE_ABS_PATH}/flatnotes:/data`], }, ExposedPorts: { '8080/tcp': {} }, Env: ['FLATNOTES_AUTH_TYPE=none'], }), ui_location: '8200', installed: false, installation_status: 'idle', is_dependency_service: false, depends_on: null, }, { service_name: SERVICE_NAMES.KOLIBRI, friendly_name: 'Education Platform', powered_by: 'Kolibri', display_order: 2, description: 'Interactive learning platform with video courses and exercises', icon: 'IconSchool', container_image: 'treehouses/kolibri:0.12.8', source_repo: 'https://github.com/learningequality/kolibri', container_command: null, container_config: JSON.stringify({ HostConfig: { RestartPolicy: { Name: 'unless-stopped' }, PortBindings: { '8080/tcp': [{ HostPort: '8300' }] }, Binds: [`${ServiceSeeder.NOMAD_STORAGE_ABS_PATH}/kolibri:/root/.kolibri`], }, ExposedPorts: { '8080/tcp': {} }, }), ui_location: '8300', installed: false, installation_status: 'idle', is_dependency_service: false, depends_on: null, }, ] async run() { const existingServices = await Service.query().select('service_name') const existingServiceNames = new Set(existingServices.map((service) => service.service_name)) const newServices = ServiceSeeder.DEFAULT_SERVICES.filter( (service) => !existingServiceNames.has(service.service_name) ) await Service.createMany([...newServices]) } } ================================================ FILE: admin/docs/about.md ================================================ # About Project N.O.M.A.D. Project N.O.M.A.D. (Node for Offline Media, Archives, and Data; "Nomad" for short) is a project started in 2025 by Chris Sherwood of [Crosstalk Solutions, LLC](https://crosstalksolutions.com). The goal of the project is not to create just another utility for storing offline resources, but rather to allow users to run their own ultimate "survival computer". While many similar offline survival computers are designed to be run on bare-minimum, lightweight hardware, Project N.O.M.A.D. is quite the opposite. To install and run the available AI tools, we highly encourage the use of a beefy, GPU-backed device to make the most of your install. See the [Hardware Guide](https://www.projectnomad.us/hardware) for detailed build recommendations at three price points. Since its initial release, NOMAD has grown to include built-in AI chat with a Knowledge Base for document-aware responses, a System Benchmark with a community leaderboard, curated content collections with tiered options, and an Easy Setup Wizard to get new users up and running quickly. Project N.O.M.A.D. is open source, released under the [Apache License 2.0](https://github.com/Crosstalk-Solutions/project-nomad/blob/main/LICENSE). ## Links - **Website:** [www.projectnomad.us](https://www.projectnomad.us) - **Hardware Guide:** [www.projectnomad.us/hardware](https://www.projectnomad.us/hardware) - **Discord:** [Join the Community](https://discord.com/invite/crosstalksolutions) - **GitHub:** [Crosstalk-Solutions/project-nomad](https://github.com/Crosstalk-Solutions/project-nomad) - **Benchmark Leaderboard:** [benchmark.projectnomad.us](https://benchmark.projectnomad.us) ================================================ FILE: admin/docs/faq.md ================================================ # Frequently Asked Questions ## General Questions ### What is N.O.M.A.D.? N.O.M.A.D. (Node for Offline Media, Archives, and Data) is a personal server that gives you access to knowledge, education, and AI assistance without requiring an internet connection. It runs on your own hardware, keeping your data private and accessible anytime. ### Do I need internet to use N.O.M.A.D.? No — that's the whole point. Once your content is downloaded, everything works offline. You only need internet to: - Download new content - Update the software - Sync the latest versions of Wikipedia, maps, etc. ### What hardware do I need? N.O.M.A.D. is designed for capable hardware, especially if you want to use the AI features. Recommended: - Modern multi-core CPU (AMD Ryzen 7 with Radeon graphics is the community sweet spot) - 16GB+ RAM (32GB+ for best AI performance) - SSD storage (size depends on content — 500GB minimum, 1TB+ recommended) - NVIDIA or AMD GPU recommended for faster AI responses **For detailed build recommendations at three price points ($150–$1,000+), see the [Hardware Guide](https://www.projectnomad.us/hardware).** ### How much storage do I need? It depends on what you download: - Full Wikipedia: ~95GB - Khan Academy courses: ~50GB - Medical references: ~500MB - US state maps: ~2-3GB each - AI models: 10-40GB depending on model Start with essentials and add more as needed. --- ## Content Questions ### How do I add more Wikipedia content? 1. Go to **Settings** (hamburger menu → Settings) 2. Click **Content Explorer** 3. Browse available Wikipedia packages 4. Click Download on items you want You can also use the **Content Explorer** to browse all available ZIM content beyond Wikipedia. ### How do I add more educational courses? 1. Open **Kolibri** 2. Sign in as an admin 3. Go to **Device → Channels** 4. Browse and import available channels ### How current is the content? Content is as current as when it was last downloaded. Wikipedia snapshots are typically updated monthly. Check the file names or descriptions for dates. ### Can I add my own files? Yes — with the Knowledge Base. Upload PDFs, text files, and other documents to the [Knowledge Base](/knowledge-base), and the AI can reference them when answering your questions. This uses semantic search to find relevant information from your uploaded files. For Kiwix content, N.O.M.A.D. uses standard ZIM files. For educational content, Kolibri uses its own channel format. ### What are curated collection tiers? When selecting content in the Easy Setup wizard or Content Explorer, collections are organized into three tiers: - **Essential** — Core content for the category (smallest download) - **Standard** — Essential plus additional useful content - **Comprehensive** — Everything available for the category (largest download) This helps you balance content coverage against storage usage. --- ## AI Questions ### How do I use the AI chat? 1. Go to [AI Chat](/chat) from the Command Center 2. Type your question or request 3. The AI responds in conversational style The AI must be installed first — enable it during Easy Setup or install it from the [Apps](/settings/apps) page. ### How do I upload documents to the Knowledge Base? 1. Go to **[Knowledge Base →](/knowledge-base)** 2. Upload your documents (PDFs, text files, etc.) 3. Documents are processed and indexed automatically 4. Ask questions in AI Chat — the AI will reference your uploaded documents when relevant You can also remove documents from the Knowledge Base when they're no longer needed. NOMAD documentation is automatically added to the Knowledge Base when the AI Assistant is installed. ### What is the System Benchmark? The System Benchmark tests your hardware performance and generates a NOMAD Score — a weighted composite of CPU, memory, disk, and AI performance. You can create a Builder Tag (a NOMAD-themed identity like "Tactical-Llama-1234") and share your results with the [community leaderboard](https://benchmark.projectnomad.us). Go to **[System Benchmark →](/settings/benchmark)** to run one. ### What is the Early Access Channel? The Early Access Channel lets you opt in to receive release candidate builds with the latest features and improvements before they hit stable releases. You can enable or disable it from **Settings → Check for Updates**. Early access builds may contain bugs — if you prefer stability, stay on the stable channel. --- ## Troubleshooting ### A feature isn't loading or shows a blank page **Try these steps:** 1. Wait 30 seconds — some features take time to start 2. Refresh the page (Ctrl+R or Cmd+R) 3. Go back to the Command Center and try again 4. Check Settings → System to see if the service is running 5. Try restarting the service (Stop, then Start in Apps manager) ### Maps show a gray/blank area The Maps feature requires downloaded map data. If you see a blank area: 1. Go to **Settings → Maps Manager** 2. Download map regions for your area 3. Wait for downloads to complete 4. Return to Maps and refresh ### AI responses are slow Local AI requires significant computing power. To improve speed: - **Add a GPU** — An NVIDIA GPU with the NVIDIA Container Toolkit can improve AI speed by 10-20x or more - Close other applications on the server - Ensure adequate cooling (overheating causes throttling) - Consider using a smaller/faster AI model if available ### How do I enable GPU acceleration for AI? N.O.M.A.D. automatically detects NVIDIA GPUs when the NVIDIA Container Toolkit is installed on the host system. To set up GPU acceleration: 1. **Install an NVIDIA GPU** in your server (if not already present) 2. **Install the NVIDIA Container Toolkit** on the host — follow the [official installation guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) 3. **Reinstall the AI Assistant** — Go to [Apps](/settings/apps), find AI Assistant, and click **Force Reinstall** N.O.M.A.D. will detect the GPU during installation and configure the AI to use it automatically. You'll see "NVIDIA container runtime detected" in the installation progress. **Tip:** Run a [System Benchmark](/settings/benchmark) before and after to see the difference. GPU-accelerated systems typically see 100+ tokens per second vs 10-15 on CPU only. ### I added/changed my GPU but AI is still slow When you add or swap a GPU, N.O.M.A.D. needs to reconfigure the AI container to use it: 1. Make sure the **NVIDIA Container Toolkit** is installed on the host 2. Go to **[Apps](/settings/apps)** 3. Find the **AI Assistant** and click **Force Reinstall** Force Reinstall recreates the AI container with GPU support enabled. Without this step, the AI continues to run on CPU only. ### I see a "GPU passthrough not working" warning N.O.M.A.D. checks whether your GPU is actually accessible inside the AI container. If a GPU is detected on the host but isn't working inside the container, you'll see a warning banner on the System Information and AI Settings pages. Click the **"Fix: Reinstall AI Assistant"** button to recreate the container with proper GPU access. This preserves your downloaded AI models. ### AI Chat not available The AI Chat page requires the AI Assistant to be installed first: 1. Go to **[Apps](/settings/apps)** 2. Install the **AI Assistant** 3. Wait for the installation to complete 4. The AI Chat will then be accessible from the home screen or [Chat](/chat) ### Knowledge Base upload stuck If a document upload appears stuck in the Knowledge Base: 1. Check that the AI Assistant is running in **Settings → Apps** 2. Large documents take time to process — wait a few minutes 3. Try uploading a smaller document to verify the system is working 4. Check **Settings → System** for any error messages ### Benchmark won't submit to leaderboard To share results with the community leaderboard: - You must run a **Full Benchmark** (not System Only or AI Only) - The benchmark must include AI results (AI Assistant must be installed and working) - Your score must be higher than any previous submission from the same hardware If submission fails, check the error message for details. ### "Service unavailable" or connection errors The service might still be starting up. Wait 1-2 minutes and try again. If the problem persists: 1. Go to **Settings → Apps** 2. Find the problematic service 3. Click **Restart** 4. Wait 30 seconds, then try again ### Downloads are stuck or failing 1. Check your internet connection 2. Go to **Settings** and check available storage 3. If storage is full, delete unused content 4. Cancel the stuck download and try again ### The server won't start If you can't access the Command Center at all: 1. Verify the server hardware is powered on 2. Check network connectivity 3. Try accessing directly via the server's IP address 4. Check server logs if you have console access ### I forgot my Kolibri password Kolibri passwords are managed separately: 1. If you're an admin, you can reset user passwords in Kolibri's user management 2. If you forgot the admin password, you may need to reset it via command line (contact your administrator) --- ## Updates and Maintenance ### How do I update N.O.M.A.D.? 1. Go to **Settings → Check for Updates** 2. If an update is available, click to install 3. The system will download updates and restart automatically 4. This typically takes 2-5 minutes ### Should I update regularly? Yes, while you have internet access. Updates include: - Bug fixes - New features - Security improvements - Performance enhancements ### How do I update content (Wikipedia, etc.)? Content updates are separate from software updates: 1. Go to **Settings → Content Manager** or **Content Explorer** 2. Check for newer versions of your installed content 3. Download updated versions as needed Tip: New Wikipedia snapshots are released approximately monthly. ### What happens if an update fails? The system is designed to recover gracefully. If an update fails: 1. The previous version should continue working 2. Try the update again later 3. Check Settings → System for error messages ### Command-Line Maintenance For advanced troubleshooting or when you can't access the web interface, N.O.M.A.D. includes helper scripts in `/opt/project-nomad`: **Start all services:** ```bash sudo bash /opt/project-nomad/start_nomad.sh ``` **Stop all services:** ```bash sudo bash /opt/project-nomad/stop_nomad.sh ``` **Update Command Center:** ```bash sudo bash /opt/project-nomad/update_nomad.sh ``` *Note: This updates the Command Center only, not individual apps. Update apps through the web interface.* **Uninstall N.O.M.A.D.:** ```bash curl -fsSL https://raw.githubusercontent.com/Crosstalk-Solutions/project-nomad/refs/heads/main/install/uninstall_nomad.sh -o uninstall_nomad.sh sudo bash uninstall_nomad.sh ``` *Warning: This cannot be undone. All data will be deleted.* --- ## Privacy and Security ### Is my data private? Yes. N.O.M.A.D. runs entirely on your hardware. Your searches, AI conversations, and usage data never leave your server. ### Can others access my server? By default, N.O.M.A.D. is accessible on your local network. Anyone on the same network can access it. For public networks, consider additional security measures. ### Does the AI send data anywhere? No. The AI runs completely locally. Your conversations are not sent to any external service. The AI chat is built into the Command Center — there's no separate service to configure. --- ## Getting More Help ### The AI can help Try asking a question in [AI Chat](/chat). The local AI can answer questions about many topics, including technical troubleshooting. If you've uploaded NOMAD documentation to the Knowledge Base, it can also help with NOMAD-specific questions. ### Check the documentation You're in the docs now. Use the menu to find specific topics. ### Join the community Get help from other NOMAD users on **[Discord](https://discord.com/invite/crosstalksolutions)**. ### Release Notes See what's changed in each version: **[Release Notes](/docs/release-notes)** ================================================ FILE: admin/docs/getting-started.md ================================================ # Getting Started with N.O.M.A.D. This guide will help you get the most out of your N.O.M.A.D. server. --- ## Easy Setup Wizard If this is your first time using N.O.M.A.D., the Easy Setup wizard will help you get everything configured. **[Launch Easy Setup →](/easy-setup)** ![Easy Setup Wizard — Step 1: Choose your capabilities](/docs/easy-setup-step1.png) The wizard walks you through four simple steps: 1. **Capabilities** — Choose what to enable: Information Library, AI Assistant, Education Platform, Maps, Data Tools, and Notes 2. **Maps** — Select geographic regions for offline maps 3. **Content** — Choose curated content collections with Essential, Standard, or Comprehensive tiers ![Content tiers — Essential, Standard, and Comprehensive](/docs/easy-setup-tiers.png) 4. **Review** — Confirm your selections and start downloading Depending on what you selected, downloads may take a while. You can monitor progress in the Settings area, continue using features that are already installed, or leave your server running overnight for large downloads. --- ## Understanding Your Tools ### Information Library — Offline Knowledge (Kiwix) The Information Library stores compressed versions of websites and references that work without internet. **What's included:** - Full Wikipedia (millions of articles) - Medical references and first aid guides - How-to guides and survival information - Classic books from Project Gutenberg **How to use it:** 1. Click **Information Library** from the Command Center home screen or [Apps](/settings/apps) page 2. Choose a collection (like Wikipedia) 3. Search or browse just like the regular website --- ### Education Platform — Offline Courses (Kolibri) The Education Platform provides complete educational courses that work offline. **What's included:** - Khan Academy video courses - Math, science, reading, and more - Progress tracking for learners - Works for all ages **How to use it:** 1. Click **Education Platform** from the Command Center home screen or [Apps](/settings/apps) page 2. Sign in or create a learner account 3. Browse courses and start learning **Tip:** Kolibri supports multiple users. Create accounts for each family member to track individual progress. --- ### AI Assistant — Built-in Chat ![AI Chat interface](/docs/ai-chat.png) N.O.M.A.D. includes a built-in AI chat interface powered by Ollama. It runs entirely on your server — no internet needed, no data sent anywhere. **What can it do:** - Answer questions on any topic - Explain complex concepts simply - Help with writing and editing - Reference your uploaded documents via the Knowledge Base - Brainstorm ideas and assist with problem-solving **How to use it:** 1. Click **AI Chat** from the Command Center or go to [Chat](/chat) 2. Type your question or request 3. The AI responds in conversational style **Tip:** Be specific in your questions. Instead of "tell me about plants," try "what vegetables grow well in shade?" **Note:** The AI Assistant must be installed first. Enable it during Easy Setup or install it from the [Apps](/settings/apps) page. **GPU Acceleration:** If your server has an NVIDIA GPU with the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) installed, N.O.M.A.D. will automatically use it for AI — dramatically faster responses (10-20x improvement). If you add a GPU later, go to [Apps](/settings/apps) and **Force Reinstall** the AI Assistant to enable it. --- ### Knowledge Base — Document-Aware AI ![Knowledge Base upload interface](/docs/knowledge-base.png) The Knowledge Base lets you upload documents so the AI can reference them when answering your questions. It uses semantic search (RAG via Qdrant) to find relevant information from your uploaded files. **Supported file types:** - PDFs, text files, and other document formats - NOMAD documentation is automatically loaded when the AI Assistant is installed **How to use it:** 1. Go to **[Knowledge Base →](/knowledge-base)** 2. Upload your documents (PDFs, text files, etc.) 3. Documents are processed and indexed automatically 4. Ask questions in AI Chat — the AI will reference your uploaded documents when relevant 5. Remove documents you no longer need — they'll be deleted from the index and local storage **Use cases:** - Upload emergency plans for quick reference during a crisis - Load technical manuals and SOPs for offline work sites - Add curriculum guides for homeschooling - Store research papers for academic work --- ### Maps — Offline Navigation ![Offline maps viewer](/docs/maps.png) View maps without internet. Download the regions you need before going offline. **How to use it:** 1. Click **Maps** from the Command Center 2. Navigate by dragging and zooming 3. Search for locations using the search bar **To add more map regions:** 1. Go to **Settings → Maps Manager** 2. Select the regions you need 3. Click Download **Tip:** Download maps for areas you travel to frequently, plus neighboring regions just in case. **[Open Maps →](/maps)** --- ## Managing Your Server ### Adding More Content As your needs change, you can add more content anytime: - **More apps:** Settings → Apps - **More references:** Settings → Content Explorer or Content Manager - **More map regions:** Settings → Maps Manager - **More educational content:** Through Kolibri's built-in content browser ### Wikipedia Selector ![Content Explorer — browse and download Wikipedia packages and curated collections](/docs/content-explorer.png) N.O.M.A.D. includes a dedicated Wikipedia content management tool for browsing and downloading Wikipedia packages. **How to use it:** 1. Go to **[Content Explorer →](/settings/zim/remote-explorer)** 2. Browse available Wikipedia packages by language and size 3. Select and download the packages you want **Note:** Selecting a different Wikipedia package replaces the previously downloaded version. Only one Wikipedia selection is active at a time. ### System Benchmark ![System Benchmark with NOMAD Score and Builder Tag](/docs/benchmark.png) Test your hardware performance and see how your NOMAD build stacks up against the community. **How to use it:** 1. Go to **[System Benchmark →](/settings/benchmark)** 2. Choose a benchmark type: Full, System Only, or AI Only 3. View your NOMAD Score (a weighted composite of CPU, memory, disk, and AI performance) 4. Create a Builder Tag (your NOMAD-themed identity, like "Tactical-Llama-1234") 5. Share your results with the [community leaderboard](https://benchmark.projectnomad.us) **Note:** Only Full Benchmarks with AI data can be shared to the community leaderboard. ### Keeping Things Updated While you have internet, periodically check for updates: 1. Go to **Settings → Check for Updates** 2. If updates are available, click to install 3. Wait for the update to complete (your server will restart) Content updates (Wikipedia, maps, etc.) can be managed separately from software updates. **Early Access Channel:** Want the latest features before they hit stable? Enable the Early Access Channel from the Check for Updates page to receive release candidate builds. You can switch back to stable anytime. ### Monitoring System Health Check on your server anytime: 1. Go to **Settings → System** 2. View CPU, memory, and storage usage 3. Check system uptime and status --- ## Tips for Best Results ### Before Going Offline - **Update everything** — Run software and content updates - **Download what you need** — Maps, references, educational content - **Test it** — Make sure features work while you still have internet to troubleshoot ### Storage Management Your server has limited storage. Prioritize: - Content you'll actually use - Critical references (medical, survival) - Maps for your region - Educational content matching your needs Check storage usage in **Settings → System**. ### Getting Help - **In-app docs:** You're reading them now - **AI assistant:** Ask a question in [AI Chat](/chat) - **Release notes:** See what's new in each version --- ## Next Steps You're ready to use N.O.M.A.D. Here are some things to try: 1. **Look something up** — Search for a topic in the Information Library 2. **Learn something** — Start a Khan Academy course in the Education Platform 3. **Ask a question** — Chat with the AI in [AI Chat](/chat) 4. **Explore maps** — Find your neighborhood in the Maps viewer 5. **Upload a document** — Add a PDF to the [Knowledge Base](/knowledge-base) and ask the AI about it Enjoy your offline knowledge server! ================================================ FILE: admin/docs/home.md ================================================ # Welcome to Project N.O.M.A.D. Your personal offline knowledge server is ready to use. ## What is N.O.M.A.D.? **N.O.M.A.D.** stands for **Node for Offline Media, Archives, and Data**. It's your personal server for accessing knowledge, education, and AI assistance — even when you have no internet connection. Think of it as having Wikipedia, Khan Academy, an AI assistant, and offline maps all in one place, running on hardware you control. ![Command Center Dashboard](/docs/dashboard.png) ## What Can You Do? ### Browse Offline Knowledge Access millions of Wikipedia articles, medical references, how-to guides, and ebooks — all stored locally on your server. No internet required. *Launch the Information Library from the home screen or the [Apps](/settings/apps) page.* ### Learn Something New Khan Academy courses covering math, science, economics, and more. Complete with videos and exercises, all available offline. *Launch the Education Platform from the home screen or the [Apps](/settings/apps) page.* ### Chat with AI Ask questions, get explanations, brainstorm ideas, or get help with writing. Your local AI assistant works completely offline — and you can upload documents to the Knowledge Base for document-aware responses. **[Open AI Chat →](/chat)** ### Upload Documents to the Knowledge Base Upload PDFs, text files, and other documents for the AI to reference. The Knowledge Base uses semantic search to find relevant information from your uploaded documents when you ask questions. **[Open Knowledge Base →](/knowledge-base)** ### View Offline Maps Navigate and explore maps without an internet connection. Download regions you need before going offline. **[Open Maps →](/maps)** ### Benchmark Your Hardware Run a System Benchmark to see how your hardware performs and compare your NOMAD Score with the community leaderboard. **[Open Benchmark →](/settings/benchmark)** --- ## Getting Started **New to N.O.M.A.D.?** Use the Easy Setup wizard to configure your server and download content collections. **[Run Easy Setup →](/easy-setup)** Or explore the **[Getting Started Guide](/docs/getting-started)** for a walkthrough of all features. --- ## Quick Links | I want to... | Go here | |--------------|---------| | Chat with the AI | [AI Chat →](/chat) | | Upload documents for AI | [Knowledge Base →](/knowledge-base) | | Download more content | [Install Apps →](/settings/apps) | | Add Wikipedia/reference content | [Content Explorer →](/settings/zim/remote-explorer) | | Manage installed content | [Content Manager →](/settings/zim) | | Download map regions | [Maps Manager →](/settings/maps) | | Run a benchmark | [System Benchmark →](/settings/benchmark) | | Check for updates | [System Update →](/settings/update) | | View system status | [System Info →](/settings/system) | --- ## Keeping Your Server Updated N.O.M.A.D. works best when kept up to date while you have internet access. This ensures you have the latest: - Software features and bug fixes - Wikipedia and reference content - Educational materials - AI model improvements When you go offline, you'll have everything you need — the last synced versions of all your content. **[Check for Updates →](/settings/update)** ================================================ FILE: admin/docs/release-notes.md ================================================ # Release Notes ## Version 1.30.0 - March 20, 2026 ### Features - **Night Ops**: Added our most requested feature — a dark mode theme for the Command Center interface! Activate it from the footer and enjoy the sleek new look during your late-night missions. Thanks @chriscrosstalk for the contribution! - **Debug Info**: Added a new "Debug Info" modal accessible from the footer that provides detailed system and application information for troubleshooting and support. Thanks @chriscrosstalk for the contribution! - **Support the Project**: Added a new "Support the Project" page in settings with links to community resources, donation options, and ways to contribute. - **Install**: The main Nomad image is now fully self-contained and directly usable with Docker Compose, allowing for more flexible and customizable installations without relying on external scripts. The image remains fully backwards compatible with existing installations, and the install script has been updated to reflect the simpler deployment process. ### Bug Fixes - **Settings**: Storage usage display now prefers real block devices over tempfs. Thanks @Bortlesboat for the fix! - **Settings**: Fixed an issue where device matching and mount entry deduplication logic could cause incorrect storage usage reporting and missing devices in storage displays. - **Maps**: The Maps page now respects the request protocol (http vs https) to ensure map tiles load correctly. Thanks @davidgross for the bug report! - **Knowledge Base**: Fixed an issue where file embedding jobs could cause a retry storm if the Ollama service was unavailable. Thanks @skyam25 for the bug report! - **Curated Collections**: Fixed some broken links in the curated collections definitions (maps and ZIM files) that were causing some resources to fail to download. - **Easy Setup**: Fixed an issue where the "Start Here" badge would persist even after visiting the Easy Setup Wizard for the first time. Thanks @chriscrosstalk for the fix! - **UI**: Fixed an issue where the loading spinner could look strange in certain use cases. - **System Updates**: Fixed an issue where the update banner would persist even after the system was updated successfully. Thanks @chriscrosstalk for the fix! - **Performance**: Various small memory leak fixes and performance improvements across the UI to ensure a smoother experience. ### Improvements - **Ollama**: Improved GPU detection logic to ensure the latest GPU config is always passed to the Ollama container on update - **Ollama**: The detected GPU type is now persisted in the database for more reliable configuration and troubleshooting across updates and restarts. Thanks @chriscrosstalk for the contribution! - **Downloads**: Users can now dismiss failed download notifications to reduce clutter in the UI. Thanks @chriscrosstalk for the contribution! - **Logging**: Changed the default log level to "info" to reduce noise and focus on important messages. Thanks @traxeon for the suggestion! - **Logging**: Nomad's internal logger now creates it's own log directory on startup if it doesn't already exist to prevent errors on fresh installs where the logs directory hasn't been created yet. - **Dozzle**: Dozzle shell access and container actions are now disabled by default. Thanks @traxeon for the recommendation! - **MySQL & Redis**: Removed port exposure to host by default for improved security. Ports can still be exposed manually if needed. Thanks @traxeon for the recommendation! - **Dependencies**: Various dependency updates to close security vulnerabilities and improve stability - **Utility Scripts**: Added a check for the expected Docker Compose version (v2) in all utility scripts to provide clearer error messages and guidance if the environment is not set up correctly. - **Utility Scripts**: Added an additional warning to the installation script to inform about potential overwriting of existing customized configurations and the importance of backing up data before running the installation script again. - **Documentation**: Updated installation instructions to reflect the new option for manual deployment via Docker Compose without the install script. ## Version 1.29.0 - March 11, 2026 ### Features - **AI Assistant**: Added improved user guidance for troubleshooting GPU pass-through issues - **AI Assistant**: The last used model is now automatically selected when a new chat is started - **Settings**: Nomad now automatically performs nightly checks for available app updates, and users can select and apply updates from the Apps page in Settings ### Bug Fixes - **Settings**: Fixed an issue where the AI Assistant settings page would be shown in navigation even if the AI Assistant was not installed, thus causing 404 errors when clicked - **Security**: Path traversal and SSRF mitigations - **AI Assistant**: Fixed an issue that was causing intermittent failures saving chat session titles ### Improvements - **AI Assistant**: Extensive performance improvements and improved RAG intelligence/context usage ## Version 1.28.0 - March 5, 2026 ### Features - **RAG**: Added support for viewing active embedding jobs in the processing queue and improved job progress tracking with more granular status updates - **RAG**: Added support for removing documents from the knowledge base (deletion from Qdrant and local storage) ### Bug Fixes - **Install**: Fixed broken url's in install script and updated to prompt for Apache 2.0 license acceptance - **Docs**: Updated legal notices to reflect Apache 2.0 license and added Qdrant attribution - **Dependencies**: Various minor dependency updates to close security vulnerabilities ### Improvements - **License**: Added Apache 2.0 license file to repository for clarity and legal compliance ## Version 1.27.0 - March 4, 2026 ### Features - **Settings**: Added pagination support for Ollama model list - **Early Access Channel**: Allows users to opt in to receive early access builds with the latest features and improvements before they hit stable releases ### Bug Fixes ### Improvements - **AI Assistant**: Improved chat performance by optimizing query rewriting and response streaming logic - **CI/CD**: Updated release workflows to support release candidate versions - **KV Store**: Improved type safety in KV store implementation ## Version 1.26.0 - February 19, 2026 ### Features - **AI Assistant**: Added support for showing reasoning stream for models with thinking capabilities - **AI Assistant**: Added support for response streaming for improved UX ### Bug Fixes ### Improvements ## Version 1.25.2 - February 18, 2026 ### Features ### Bug Fixes - **AI Assistant**: Fixed an error from chat suggestions when no Ollama models are installed - **AI Assistant**: Improved discrete GPU detection logic - **UI**: Legacy links to /docs and /knowledge-base now gracefully redirect to the correct pages instead of showing 404 errors ### Improvements - **AI Assistant**: Chat suggestions are now disabled by default to avoid overwhelming smaller hardware setups ## Version 1.25.1 - February 12, 2026 ### Features ### Bug Fixes - **Settings**: Fix potential stale cache issue when checking for system updates - **Settings**: Improve user guidance during system updates ### Improvements ## Version 1.25.0 - February 12, 2026 ### Features - **Collections**: Complete overhaul of collection management with dynamic manifests, database tracking of installed resources, and improved UI for managing ZIM files and map assets - **Collections**: Added support for checking if newer versions of installed resources are available based on manifest data ### Bug Fixes - **Benchmark**: Improved error handling and status code propagation for better user feedback on submission failures - **Benchmark**: Fix a race condition in the sysbench container management that could lead to benchmark test failures ### Improvements --- ## Version 1.24.0 - February 10, 2026 ### 🚀 Features - **AI Assistant**: Query rewriting for enhanced context retrieval - **AI Assistant**: Allow manual scan and resync of Knowledge Base - **AI Assistant**: Integrated Knowledge Base UI into AI Assistant page - **AI Assistant**: ZIM content embedding into Knowledge Base - **Downloads**: Display model download progress - **System**: Cron job for automatic update checks - **Docs**: Polished documentation rendering with desert-themed components ### 🐛 Bug Fixes - **AI Assistant**: Chat suggestion performance improvements - **AI Assistant**: Inline code rendering - **GPU**: Detect NVIDIA GPUs via Docker API instead of lspci - **Install**: Improve Docker GPU configuration - **System**: Correct memory usage percentage calculation - **System**: Show host OS, hostname, and GPU instead of container info - **Collections**: Correct devdocs ZIM filenames in Computing & Technology - **Downloads**: Sort active downloads by progress descending - **Docs**: Fix multiple broken internal links and route references ### ✨ Improvements - **Docs**: Overhauled in-app documentation with sidebar ordering - **Docs**: Updated README with feature overview - **GPU**: Reusable utility for running nvidia-smi --- ## Version 1.23.0 - February 5, 2026 ### 🚀 Features - **Maps**: Maps now use full page by default - **Navigation**: Added "Back to Home" link on standard header pages - **AI**: Fuzzy search for AI models list - **UI**: Improved global error reporting with user notifications ### 🐛 Bug Fixes - **Kiwix**: Avoid restarting the Kiwix container while download jobs are running - **Docker**: Ensure containers are fully removed on failed service install - **AI**: Filter cloud models from API response and fallback model list - **Curated Collections**: Prevent duplicate resources when fetching latest collections - **Content Tiers**: Rework tier system to dynamically determine install status on the server side ### ✨ Improvements - **Docs**: Added pretty rendering for markdown tables in documentation pages --- ## Version 1.22.0 - February 4, 2026 ### 🚀 Features - **Content Manager**: Display friendly names (Title and Summary) instead of raw filenames for ZIM files - **AI Knowledge Base**: Automatically add NOMAD documentation to AI Knowledge Base on install ### 🐛 Bug Fixes - **Maps**: Ensure map asset URLs resolve correctly when accessed via hostname - **Wikipedia**: Prevent loading spinner overlay during download - **Easy Setup**: Scroll to top when navigating between wizard steps - **AI Chat**: Hide chat button and page unless AI Assistant is actually installed - **Settings**: Rename confusing "Port" column to "Location" in Apps Settings ### ✨ Improvements - **Ollama**: Cleanup model download logic and improve progress tracking --- ## Version 1.21.0 - February 2, 2026 ### 🚀 Features - **AI Assistant**: Built-in AI chat interface — no more separate Open WebUI app - **Knowledge Base**: Document upload with OCR, semantic search (RAG), and contextual AI responses via Qdrant - **Wikipedia Selector**: Dedicated Wikipedia content management with smart package selection - **GPU Support**: NVIDIA and AMD GPU passthrough for Ollama (faster AI inference) ### 🐛 Bug Fixes - **Benchmark**: Detect Intel Arc Graphics on Core Ultra processors - **Easy Setup**: Remove built-in System Benchmark from wizard (now in Settings) - **Icons**: Switch to Tabler Icons for consistency, remove unused icon libraries - **Docker**: Avoid re-pulling existing images during install ### ✨ Improvements - **Ollama**: Fallback list of recommended models if api.projectnomad.us is down - **Ollama/Qdrant**: Docker images pinned to specific versions for stability - **README**: Added website and community links - Removed Open WebUI as a separate installable app (replaced by built-in AI Chat) --- ## Version 1.20.0 - January 28, 2026 ### 🚀 Features - **Collections**: Expanded curated categories with more content and improved tier selection modal UX - **Legal**: Expanded Legal Notices and moved to bottom of Settings sidebar ### 🐛 Bug Fixes - **Install**: Handle missing curl dependency on fresh Ubuntu installs - **Migrations**: Fix timestamp ordering for builder_tag migration --- ## Version 1.19.0 - January 28, 2026 ### 🚀 Features - **Benchmark**: Builder Tag system — claim leaderboard spots with NOMAD-themed tags (e.g., "Tactical-Llama-1234") - **Benchmark**: Full benchmark with AI now required for community sharing; HMAC-signed submissions - **Release Notes**: Subscribe to release notes via email - **Maps**: Automatically download base map assets if missing ### 🐛 Bug Fixes - **System Info**: Fall back to fsSize when disk array is empty (fixes "No storage devices detected") --- ## Version 1.18.0 - January 24, 2026 ### 🚀 Features - **Collections**: Improved curated collections UX with persistent tier selection and submit-to-confirm workflow ### 🐛 Bug Fixes - **Benchmark**: Fix AI benchmark connectivity (Docker container couldn't reach Ollama on host) - **Open WebUI**: Fix install status indicator ### ✨ Improvements - **Docker**: Container URL resolution utility and networking improvements --- ## Version 1.17.0 - January 23, 2026 ### 🚀 Features - **System Benchmark**: Hardware scoring with NOMAD Score, circular gauges, and community leaderboard submission - **Dashboard**: User-friendly app names with "Powered by" open source attribution - **Settings**: Updated nomenclature and added tiered content collections to Settings pages - **Queues**: Support working all queues with a single command ### 🐛 Bug Fixes - **Easy Setup**: Select valid primary disk for storage projection bar - **Docs**: Remove broken service links that pointed to invalid routes - **Notifications**: Improved styling - **UI**: Remove splash screen - **Maps**: Static path resolution fix --- ## Version 1.16.0 - January 20, 2026 ### 🚀 Features - **Apps**: Force-reinstall option for installed applications - **Open WebUI**: Manage Ollama models directly from Command Center - **Easy Setup**: Show selected AI model size in storage projection bar ### ✨ Improvements - **Curated Categories**: Improved fetching from GitHub - **Build**: Added dockerignore file --- ## Version 1.15.0 - January 19, 2026 ### 🚀 Features - **Easy Setup Wizard**: Redesigned Step 1 with user-friendly capability cards instead of app names - **Tiered Collections**: Category-based content collections with Essential, Standard, and Comprehensive tiers - **Storage Projection Bar**: Visual disk usage indicator showing projected additions during Easy Setup - **Windows Support**: Docker Desktop support for local development with platform detection and NOMAD_STORAGE_PATH env var - **Documentation**: Comprehensive in-app documentation (Home, Getting Started, FAQ, Use Cases) ### ✨ Improvements - **Easy Setup**: Renamed step 3 label from "ZIM Files" to "Content" - **Notifications**: Fixed auto-dismiss not working due to stale closure - Added Survival & Preparedness and Education & Reference content categories --- ## Version 1.14.0 - January 16, 2026 ### 🚀 Features - **Collections**: Auto-fetch latest curated collections from GitHub ### 🐛 Bug Fixes - **Docker**: Improved container state management --- ## Version 1.13.0 - January 15, 2026 ### 🚀 Features - **Easy Setup Wizard**: Initial implementation of the guided first-time setup experience - **Maps**: Enhanced missing assets warnings - **Apps**: Improved app cards with custom icons ### 🐛 Bug Fixes - **Curated Collections**: UI tweaks - **Install**: Changed admin container pull_policy to always --- ## Version 1.12.0 - 1.12.3 - December 24, 2025 - January 13, 2026 ### 🚀 Features - **System**: Check internet status on backend with custom test URL support ### 🐛 Bug Fixes - **Admin**: Improved service install status management - **Admin**: Improved duplicate install request handling - **Admin**: Fixed base map assets download URL - **Admin**: Fixed port binding for Open WebUI - **Admin**: Improved memory usage indicators - **Admin**: Added favicons - **Admin**: Fixed container healthcheck - **Admin**: Fixed missing ZIM download API client method - **Install**: Fixed disk info file mount and stability - **Install**: Ensure update script always pulls latest images - **Install**: Use modern docker compose command in update script - **Install**: Ensure update script is executable - **Scripts**: Remove disk info file on uninstall --- ## Version 1.11.0 - 1.11.1 - December 24, 2025 ### 🚀 Features - **Maps**: Curated map region collections - **Collections**: Map region collection definitions ### 🐛 Bug Fixes - **Maps**: Fixed custom pmtiles file downloads - **Docs**: Documentation renderer fixes --- ## Version 1.10.1 - December 5, 2025 ### ✨ Improvements - **Kiwix**: ZIM storage path improvements --- ## Version 1.10.0 - December 5, 2025 ### 🚀 Features - Disk info monitoring ### ✨ Improvements - **Install**: Add Redis env variables to compose file - **Kiwix**: Initial download and setup --- ## Version 1.9.0 - December 5, 2025 ### 🚀 Features - Background job management with BullMQ ### ✨ Improvements - **Install**: Character escaping in env variables - **Install**: Host env variable --- ## Version 1.8.0 - December 5, 2025 ### 🚀 Features - Alert and button styles redesign - System info page redesign - **Collections**: Curated ZIM Collections with slug, icon, and language support - Custom map and ZIM file downloads (WIP) - New maps system (WIP) ### ✨ Improvements - **DockerService**: Cleanup old OSM stuff - **Install**: Standardize compose file names --- ## Version 1.7.0 - December 5, 2025 ### 🚀 Features - Alert and button styles redesign - System info page redesign - **Collections**: Curated ZIM Collections - Custom map and ZIM file downloads (WIP) - New maps system (WIP) ### ✨ Improvements - **DockerService**: Cleanup old OSM stuff - **Install**: Standardize compose file names --- ## Version 1.6.0 - November 18, 2025 ### 🚀 Features - Added Kolibri to standard app library ### ✨ Improvements - Standardize container names in management-compose --- ## Version 1.5.0 - November 18, 2025 ### 🚀 Features - Version footer and fix CI version handling --- ## Version 1.4.0 - November 18, 2025 ### 🚀 Features - **Services**: Friendly names and descriptions ### ✨ Improvements - **Scripts**: Logs directory creation improvements - **Scripts**: Fix typo in management-compose file path --- ## Version 1.3.0 - October 9, 2025 ### 🚀 New Features - Uninstall script now removes non-management Nomad app containers ### ✨ Improvements - **OpenStreetMap**: Apply dir permission fixes more robustly --- ## Version 1.2.0 - October 7, 2025 ### 🚀 New Features - Added CyberChef to standard app library - Added Dozzle to core containers for enhanced logs and metrics - Added FlatNotes to standard app library - Uninstall helper script available ### ✨ Improvements - **OpenStreetMap**: - Fixed directory paths and access issues - Improved error handling - Fixed renderer file permissions - Fixed absolute host path issue - **ZIM Manager**: - Initial ZIM download now hosted in Project Nomad GitHub repo for better availability --- ## Version 1.1.0 - August 20, 2025 ### 🚀 New Features **OpenStreetMap Installation** - Added OpenStreetMap to installable applications - Automatically downloads and imports US Pacific region during installation. - Supports rendered tile caching for enhanced performance. ### ✨ Improvements - **Apps**: Added start/stop/restart controls for each application container in settings - **ZIM Manager**: Error-handling/resumable downloads + enhanced UI - **System**: You can now view system information such as CPU, RAM, and disk stats in settings - **Legal**: Added legal notices in settings - **UI**: Added general UI enhancements such as alerts and error dialogs - Standardized container naming to reduce potential for conflicts with existing containers on host system ### ⚠️ Breaking Changes - **Container Naming**: As a result of standardized container naming, it is recommend that you do a fresh install of Project N.O.M.A.D. and any apps to avoid potential conflicts/duplication of containers ### 📚 Documentation - Added release notes page --- ## Version 1.0.1 - July 11, 2025 ### 🐛 Bug Fixes - **Docs**: Fixed doc rendering - **Install**: Fixed installation script URLs - **OpenWebUI**: Fixed Ollama connection --- ## Version 1.0.0 - July 11, 2025 ### 🚀 New Features - Initial alpha release for app installation and documentation - OpenWebUI, Ollama, Kiwix installation - ZIM downloads & management --- ## Support - **Discord:** [Join the Community](https://discord.com/invite/crosstalksolutions) — Get help, share your builds, and connect with other NOMAD users - **Bug Reports:** [GitHub Issues](https://github.com/Crosstalk-Solutions/project-nomad/issues) - **Website:** [www.projectnomad.us](https://www.projectnomad.us) --- *For the full changelog, see our [GitHub releases](https://github.com/Crosstalk-Solutions/project-nomad/releases).* ================================================ FILE: admin/docs/use-cases.md ================================================ # What Can You Do With N.O.M.A.D.? N.O.M.A.D. is designed to be your information lifeline when internet isn't available. Here's how different people use it. --- ## Emergency Preparedness When disasters strike, internet and cell service often go down first. N.O.M.A.D. keeps critical information at your fingertips. **What you can do:** - Look up first aid and emergency medical procedures - Access survival guides and emergency protocols - Find information about water purification, food storage, shelter building - Use offline maps to navigate when GPS services are degraded - Research plant identification, weather patterns, radio frequencies - Upload emergency plans and protocols to the Knowledge Base for quick AI-assisted reference **Recommended content:** - Medical Library ZIM collection - Survival/Prepper reference guides - Maps for your region and evacuation routes - Wikipedia (searchable for almost any topic) --- ## Homeschooling and Education Teach your children anywhere, with or without internet. Complete curriculum available offline. **What you can do:** - Access Khan Academy's full course library (math, science, reading, history) - Track progress for multiple students - Supplement with Wikipedia for research projects - Use the AI as a patient tutor for any subject - Access classic literature through Project Gutenberg - Upload curriculum guides to the Knowledge Base so the AI can help answer curriculum-specific questions **Recommended content:** - Khan Academy courses via Kolibri - Wikipedia for Schools (curated for younger learners) - Project Gutenberg (classic books) - Educational ZIM collections **Tip:** Create separate Kolibri accounts for each child to track their individual progress. --- ## Off-Grid Living Living away from reliable internet doesn't mean living without information. **What you can do:** - Research DIY projects and repairs - Look up gardening, animal husbandry, food preservation - Access medical references for remote healthcare - Learn new skills through educational videos - Get AI help with planning and problem-solving **Recommended content:** - How-to and DIY reference collections - Medical and first aid guides - Agricultural and homesteading references - Maps for your rural area - Practical skills courses in Kolibri --- ## Remote Work Sites Construction sites, research stations, ships, and remote facilities often lack reliable internet. **What you can do:** - Access technical references and documentation - Use AI for writing assistance and analysis - Upload technical manuals and SOPs to the Knowledge Base for document-aware AI responses - Look up regulations, standards, and procedures - Provide educational resources for workers - Maintain communication records with note-taking apps **Recommended content:** - Industry-specific technical references - Relevant Wikipedia categories - Maps of work areas - Documentation and compliance guides --- ## Travel and Expeditions International travel, cruises, camping trips — stay informed anywhere. **What you can do:** - Access maps without expensive roaming data - Research destinations, history, and culture - Translate concepts with AI assistance - Identify plants, animals, and geological features - Access travel health information **Recommended content:** - Maps for destination countries/regions - Wikipedia in relevant languages - Medical/health references - Cultural and historical content --- ## Privacy-Conscious Users Some people simply prefer to keep their searches and questions private. **What you can do:** - Search Wikipedia without being tracked - Ask AI questions that stay on your own hardware - Upload sensitive documents to the Knowledge Base — they never leave your server - Learn about sensitive topics privately - Keep your intellectual curiosity to yourself **How it works:** - All data stays on your server - No search history sent to companies - No AI conversations leave your network — the AI chat is built into the Command Center - All Knowledge Base processing happens locally - You control your own information --- ## Medical Reference When you can't reach a doctor, having reliable medical information can be critical. **What you can access:** - NHS Medicines A-Z (drug information and interactions) - Medical Library (field medicine, emergency procedures) - First aid guides - Anatomy and physiology references - Disease and symptom information **Important:** Medical references are for information only. They don't replace professional medical care. In emergencies, always seek professional help when possible. **Recommended content:** - Medical Essentials ZIM collection - NHS Medicines reference - First aid and emergency medicine guides --- ## Academic Research Students and researchers can work without depending on university networks. **What you can do:** - Access Wikipedia's extensive article database - Use AI for research assistance and summarization - Upload research papers to the Knowledge Base for AI-assisted analysis and cross-referencing - Work on papers and projects offline - Cross-reference multiple sources - Take notes with built-in tools **Recommended content:** - Full Wikipedia - Academic and educational references - Subject-specific ZIM collections - Note-taking apps (FlatNotes) --- ## Setting Up for Your Use Case ### Step 1: Identify Your Needs What situations might you face without internet? What information would you need? ### Step 2: Prioritize Content Storage is limited. Focus on: 1. Critical safety information (medical, emergency) 2. Content matching your primary use case 3. General reference (Wikipedia) 4. Nice-to-have additions ### Step 3: Upload Relevant Documents Add your own documents to the [Knowledge Base](/knowledge-base) — emergency plans, technical manuals, curriculum guides, or research papers. The AI can reference these when you ask questions. ### Step 4: Download While You Can Keep your server updated while you have internet. You never know when you'll need to go offline. ### Step 5: Practice Try using N.O.M.A.D. before you need it. Familiarity with the tools makes them more useful in a crisis. --- ## Need Something Specific? N.O.M.A.D. content is customizable. If you don't see what you need: 1. **Browse [Content Explorer](/settings/zim/remote-explorer)** — Thousands of ZIM files including Wikipedia packages 2. **Check [Content Manager](/settings/zim)** — Manage your installed content 3. **Browse Kolibri channels** — Educational content for many subjects 4. **Upload your own documents** — Add files to the [Knowledge Base](/knowledge-base) for AI-aware reference 5. **Request features** — Let us know what content would help you on [Discord](https://discord.com/invite/crosstalksolutions) Your offline server, your content choices. ================================================ FILE: admin/eslint.config.js ================================================ import { configApp } from '@adonisjs/eslint-config' import pluginQuery from '@tanstack/eslint-plugin-query' export default configApp(...pluginQuery.configs['flat/recommended']) ================================================ FILE: admin/inertia/app/app.tsx ================================================ /// /// import '../css/app.css' import { createRoot } from 'react-dom/client' import { createInertiaApp } from '@inertiajs/react' import { resolvePageComponent } from '@adonisjs/inertia/helpers' import ModalsProvider from '~/providers/ModalProvider' import { TransmitProvider } from 'react-adonis-transmit' import { generateUUID } from '~/lib/util' import { QueryClient, QueryClientProvider } from '@tanstack/react-query' import { ReactQueryDevtools } from '@tanstack/react-query-devtools' import NotificationsProvider from '~/providers/NotificationProvider' import { ThemeProvider } from '~/providers/ThemeProvider' import { UsePageProps } from '../../types/system' const appName = import.meta.env.VITE_APP_NAME || 'Project N.O.M.A.D.' const queryClient = new QueryClient() // Patch the global crypto object for non-HTTPS/localhost contexts if (!window.crypto?.randomUUID) { // @ts-ignore if (!window.crypto) window.crypto = {} // @ts-ignore window.crypto.randomUUID = generateUUID } createInertiaApp({ progress: { color: '#424420' }, title: (title) => `${title} - ${appName}`, resolve: (name) => { return resolvePageComponent(`../pages/${name}.tsx`, import.meta.glob('../pages/**/*.tsx')) }, setup({ el, App, props }) { const environment = (props.initialPage.props as unknown as UsePageProps).environment const showDevtools = ['development', 'staging'].includes(environment) createRoot(el).render( {showDevtools && } ) }, }) ================================================ FILE: admin/inertia/components/ActiveDownloads.tsx ================================================ import useDownloads, { useDownloadsProps } from '~/hooks/useDownloads' import HorizontalBarChart from './HorizontalBarChart' import { extractFileName } from '~/lib/util' import StyledSectionHeader from './StyledSectionHeader' import { IconAlertTriangle, IconX } from '@tabler/icons-react' import api from '~/lib/api' interface ActiveDownloadProps { filetype?: useDownloadsProps['filetype'] withHeader?: boolean } const ActiveDownloads = ({ filetype, withHeader = false }: ActiveDownloadProps) => { const { data: downloads, invalidate } = useDownloads({ filetype }) const handleDismiss = async (jobId: string) => { await api.removeDownloadJob(jobId) invalidate() } return ( <> {withHeader && }
{downloads && downloads.length > 0 ? ( downloads.map((download) => (
{download.status === 'failed' ? (

{extractFileName(download.filepath) || download.url}

Download failed{download.failedReason ? `: ${download.failedReason}` : ''}

) : ( )}
)) ) : (

No active downloads

)}
) } export default ActiveDownloads ================================================ FILE: admin/inertia/components/ActiveEmbedJobs.tsx ================================================ import useEmbedJobs from '~/hooks/useEmbedJobs' import HorizontalBarChart from './HorizontalBarChart' import StyledSectionHeader from './StyledSectionHeader' interface ActiveEmbedJobsProps { withHeader?: boolean } const ActiveEmbedJobs = ({ withHeader = false }: ActiveEmbedJobsProps) => { const { data: jobs } = useEmbedJobs() return ( <> {withHeader && ( )}
{jobs && jobs.length > 0 ? ( jobs.map((job) => (
)) ) : (

No files are currently being processed

)}
) } export default ActiveEmbedJobs ================================================ FILE: admin/inertia/components/ActiveModelDownloads.tsx ================================================ import useOllamaModelDownloads from '~/hooks/useOllamaModelDownloads' import HorizontalBarChart from './HorizontalBarChart' import StyledSectionHeader from './StyledSectionHeader' interface ActiveModelDownloadsProps { withHeader?: boolean } const ActiveModelDownloads = ({ withHeader = false }: ActiveModelDownloadsProps) => { const { downloads } = useOllamaModelDownloads() return ( <> {withHeader && }
{downloads && downloads.length > 0 ? ( downloads.map((download) => (
)) ) : (

No active model downloads

)}
) } export default ActiveModelDownloads ================================================ FILE: admin/inertia/components/Alert.tsx ================================================ import * as Icons from '@tabler/icons-react' import classNames from '~/lib/classNames' import DynamicIcon from './DynamicIcon' import StyledButton, { StyledButtonProps } from './StyledButton' export type AlertProps = React.HTMLAttributes & { title: string message?: string type: 'warning' | 'error' | 'success' | 'info' | 'info-inverted' children?: React.ReactNode dismissible?: boolean onDismiss?: () => void icon?: keyof typeof Icons variant?: 'standard' | 'bordered' | 'solid' buttonProps?: StyledButtonProps } export default function Alert({ title, message, type, children, dismissible = false, onDismiss, icon, variant = 'standard', buttonProps, ...props }: AlertProps) { const getDefaultIcon = (): keyof typeof Icons => { switch (type) { case 'warning': return 'IconAlertTriangle' case 'error': return 'IconXboxX' case 'success': return 'IconCircleCheck' case 'info': return 'IconInfoCircle' default: return 'IconInfoCircle' } } const getIconColor = () => { if (variant === 'solid') return 'text-white' switch (type) { case 'warning': return 'text-desert-orange' case 'error': return 'text-desert-red' case 'success': return 'text-desert-olive' case 'info': return 'text-desert-stone' default: return 'text-desert-stone' } } const getVariantStyles = () => { const baseStyles = 'rounded-lg transition-all duration-200' const variantStyles: string[] = [] switch (variant) { case 'bordered': variantStyles.push( type === 'warning' ? 'border-desert-orange' : type === 'error' ? 'border-desert-red' : type === 'success' ? 'border-desert-olive' : type === 'info' ? 'border-desert-stone' : type === 'info-inverted' ? 'border-desert-tan' : '' ) return classNames(baseStyles, 'border-2 bg-desert-white shadow-md', ...variantStyles) case 'solid': variantStyles.push( type === 'warning' ? 'bg-desert-orange text-white border border-desert-orange-dark' : type === 'error' ? 'bg-desert-red text-white border border-desert-red-dark' : type === 'success' ? 'bg-desert-olive text-white border border-desert-olive-dark' : type === 'info' ? 'bg-desert-green text-white border border-desert-green-dark' : type === 'info-inverted' ? 'bg-desert-tan text-white border border-desert-tan-dark' : '' ) return classNames(baseStyles, 'shadow-lg', ...variantStyles) default: variantStyles.push( type === 'warning' ? 'bg-desert-orange-lighter bg-opacity-20 border-desert-orange-light' : type === 'error' ? 'bg-desert-red-lighter bg-opacity-20 border-desert-red-light' : type === 'success' ? 'bg-desert-olive-lighter bg-opacity-20 border-desert-olive-light' : type === 'info' ? 'bg-desert-green bg-opacity-20 border-desert-green-light' : type === 'info-inverted' ? 'bg-desert-tan bg-opacity-20 border-desert-tan-light' : '' ) return classNames(baseStyles, 'border-l-4 border-y border-r shadow-sm', ...variantStyles) } } const getTitleColor = () => { if (variant === 'solid') return 'text-white' switch (type) { case 'warning': return 'text-desert-orange-dark' case 'error': return 'text-desert-red-dark' case 'success': return 'text-desert-olive-dark' case 'info': return 'text-desert-stone-dark' case 'info-inverted': return 'text-desert-tan-dark' default: return 'text-desert-stone-dark' } } const getMessageColor = () => { if (variant === 'solid') return 'text-white text-opacity-90' switch (type) { case 'warning': return 'text-desert-orange-dark text-opacity-80' case 'error': return 'text-desert-red-dark text-opacity-80' case 'success': return 'text-desert-olive-dark text-opacity-80' case 'info': return 'text-desert-stone-dark text-opacity-80' default: return 'text-desert-stone-dark text-opacity-80' } } const getCloseButtonStyles = () => { if (variant === 'solid') { return 'text-white hover:text-white hover:bg-black hover:bg-opacity-20' } switch (type) { case 'warning': return 'text-desert-orange hover:text-desert-orange-dark hover:bg-desert-orange-lighter hover:bg-opacity-30' case 'error': return 'text-desert-red hover:text-desert-red-dark hover:bg-desert-red-lighter hover:bg-opacity-30' case 'success': return 'text-desert-olive hover:text-desert-olive-dark hover:bg-desert-olive-lighter hover:bg-opacity-30' case 'info': return 'text-desert-stone hover:text-desert-stone-dark hover:bg-desert-stone-lighter hover:bg-opacity-30' default: return 'text-desert-stone hover:text-desert-stone-dark hover:bg-desert-stone-lighter hover:bg-opacity-30' } } return (

{title}

{message && (

{message}

)} {children &&
{children}
}
{buttonProps && (
)} {dismissible && ( )}
) } ================================================ FILE: admin/inertia/components/BouncingDots.tsx ================================================ import clsx from 'clsx' interface BouncingDotsProps { text: string containerClassName?: string textClassName?: string } export default function BouncingDots({ text, containerClassName, textClassName }: BouncingDotsProps) { return (
{text}
) } ================================================ FILE: admin/inertia/components/BouncingLogo.tsx ================================================ import { useState, useEffect } from 'react'; // Fading Image Component const FadingImage = ({ alt = "Fading image", className = "" }) => { const [isVisible, setIsVisible] = useState(true); const [shouldShow, setShouldShow] = useState(true); useEffect(() => { // Start fading out after 2 seconds const fadeTimer = setTimeout(() => { setIsVisible(false); }, 2000); // Remove from DOM after fade out completes const removeTimer = setTimeout(() => { setShouldShow(false); }, 3000); return () => { clearTimeout(fadeTimer); clearTimeout(removeTimer); }; }, []); if (!shouldShow) { return null; } return (
{alt}
); }; export default FadingImage; ================================================ FILE: admin/inertia/components/BuilderTagSelector.tsx ================================================ import { IconRefresh } from '@tabler/icons-react' import { useState, useEffect } from 'react' import { ADJECTIVES, NOUNS, generateRandomNumber, generateRandomBuilderTag, parseBuilderTag, buildBuilderTag, } from '~/lib/builderTagWords' interface BuilderTagSelectorProps { value: string | null onChange: (tag: string) => void disabled?: boolean } export default function BuilderTagSelector({ value, onChange, disabled = false, }: BuilderTagSelectorProps) { const [adjective, setAdjective] = useState(ADJECTIVES[0]) const [noun, setNoun] = useState(NOUNS[0]) const [number, setNumber] = useState(generateRandomNumber()) // Parse existing value on mount useEffect(() => { if (value) { const parsed = parseBuilderTag(value) if (parsed) { setAdjective(parsed.adjective) setNoun(parsed.noun) setNumber(parsed.number) } } else { // Generate a random tag for new users const randomTag = generateRandomBuilderTag() const parsed = parseBuilderTag(randomTag) if (parsed) { setAdjective(parsed.adjective) setNoun(parsed.noun) setNumber(parsed.number) onChange(randomTag) } } }, []) // Update parent when selections change const updateTag = (newAdjective: string, newNoun: string, newNumber: string) => { const tag = buildBuilderTag(newAdjective, newNoun, newNumber) onChange(tag) } const handleAdjectiveChange = (newAdjective: string) => { setAdjective(newAdjective) updateTag(newAdjective, noun, number) } const handleNounChange = (newNoun: string) => { setNoun(newNoun) updateTag(adjective, newNoun, number) } const handleRandomize = () => { const newAdjective = ADJECTIVES[Math.floor(Math.random() * ADJECTIVES.length)] const newNoun = NOUNS[Math.floor(Math.random() * NOUNS.length)] const newNumber = generateRandomNumber() setAdjective(newAdjective) setNoun(newNoun) setNumber(newNumber) updateTag(newAdjective, newNoun, newNumber) } const currentTag = buildBuilderTag(adjective, noun, number) return (
- - {number}
Your Builder Tag: {currentTag}
) } ================================================ FILE: admin/inertia/components/CategoryCard.tsx ================================================ import { formatBytes } from '~/lib/util' import DynamicIcon, { DynamicIconName } from './DynamicIcon' import type { CategoryWithStatus, SpecTier } from '../../types/collections' import classNames from 'classnames' import { IconChevronRight, IconCircleCheck } from '@tabler/icons-react' export interface CategoryCardProps { category: CategoryWithStatus selectedTier?: SpecTier | null onClick?: (category: CategoryWithStatus) => void } const CategoryCard: React.FC = ({ category, selectedTier, onClick }) => { // Calculate total size range across all tiers const getTierTotalSize = (tier: SpecTier, allTiers: SpecTier[]): number => { let total = tier.resources.reduce((acc, r) => acc + r.size_mb * 1024 * 1024, 0) // Add included tier sizes recursively if (tier.includesTier) { const includedTier = allTiers.find(t => t.slug === tier.includesTier) if (includedTier) { total += getTierTotalSize(includedTier, allTiers) } } return total } const minSize = getTierTotalSize(category.tiers[0], category.tiers) const maxSize = getTierTotalSize(category.tiers[category.tiers.length - 1], category.tiers) // Determine which tier to highlight: selectedTier (wizard) > installedTierSlug (persisted) const highlightedTierSlug = selectedTier?.slug || category.installedTierSlug return (
onClick?.(category)} >

{category.name}

{selectedTier ? (
{selectedTier.name}
) : ( )}

{category.description}

{category.tiers.length} tiers available {!highlightedTierSlug && ( - Click to choose )}

{category.tiers.map((tier) => { const isInstalled = tier.slug === highlightedTierSlug return ( {tier.name} ) })}

Size: {formatBytes(minSize, 1)} - {formatBytes(maxSize, 1)}

) } export default CategoryCard ================================================ FILE: admin/inertia/components/CuratedCollectionCard.tsx ================================================ import { formatBytes } from '~/lib/util' import DynamicIcon, { DynamicIconName } from './DynamicIcon' import type { CollectionWithStatus } from '../../types/collections' import classNames from 'classnames' import { IconCircleCheck } from '@tabler/icons-react' export interface CuratedCollectionCardProps { collection: CollectionWithStatus onClick?: (collection: CollectionWithStatus) => void; size?: 'small' | 'large' } const CuratedCollectionCard: React.FC = ({ collection, onClick, size = 'small' }) => { const totalSizeBytes = collection.resources?.reduce( (acc, resource) => acc + resource.size_mb * 1024 * 1024, 0 ) return (
{ if (collection.all_installed) { return } if (onClick) { onClick(collection) } }} >

{collection.name}

{collection.all_installed && (

All items downloaded

)}

{collection.description}

Items: {collection.resources?.length} | Size: {formatBytes(totalSizeBytes, 0)}

) } export default CuratedCollectionCard ================================================ FILE: admin/inertia/components/DebugInfoModal.tsx ================================================ import { useEffect, useState } from 'react' import { IconBug, IconCopy, IconCheck } from '@tabler/icons-react' import StyledModal from './StyledModal' import api from '~/lib/api' interface DebugInfoModalProps { open: boolean onClose: () => void } export default function DebugInfoModal({ open, onClose }: DebugInfoModalProps) { const [debugText, setDebugText] = useState('') const [loading, setLoading] = useState(false) const [copied, setCopied] = useState(false) useEffect(() => { if (!open) return setLoading(true) setCopied(false) api.getDebugInfo().then((text) => { if (text) { const browserLine = `Browser: ${navigator.userAgent}` setDebugText(text + '\n' + browserLine) } else { setDebugText('Failed to load debug info. Please try again.') } setLoading(false) }).catch(() => { setDebugText('Failed to load debug info. Please try again.') setLoading(false) }) }, [open]) const handleCopy = async () => { try { await navigator.clipboard.writeText(debugText) } catch { // Fallback for older browsers const textarea = document.querySelector('#debug-info-text') if (textarea) { textarea.select() document.execCommand('copy') } } setCopied(true) setTimeout(() => setCopied(false), 2000) } return ( } cancelText="Close" onCancel={onClose} >

This is non-sensitive system info you can share when reporting issues. No passwords, IPs, or API keys are included.