Full Code of CNTRLAI/Notate for AI

main bc5c346ab1d2 cached
298 files
1002.2 KB
246.8k tokens
573 symbols
1 requests
Download .txt
Showing preview only (1,088K chars total). Download the full file or copy to clipboard to get everything.
Repository: CNTRLAI/Notate
Branch: main
Commit: bc5c346ab1d2
Files: 298
Total size: 1002.2 KB

Directory structure:
gitextract_jmk8mdb7/

├── .gitignore
├── Backend/
│   ├── .gitignore
│   ├── ensure_dependencies.py
│   ├── main.py
│   ├── requirements.txt
│   ├── src/
│   │   ├── authentication/
│   │   │   ├── api_key_authorization.py
│   │   │   └── token.py
│   │   ├── data/
│   │   │   ├── dataFetch/
│   │   │   │   ├── webcrawler.py
│   │   │   │   └── youtube.py
│   │   │   ├── dataIntake/
│   │   │   │   ├── csvFallbackSplitting.py
│   │   │   │   ├── fileTypes/
│   │   │   │   │   └── loadX.py
│   │   │   │   ├── getHtmlFiles.py
│   │   │   │   ├── loadFile.py
│   │   │   │   └── textSplitting.py
│   │   │   └── database/
│   │   │       ├── checkAPIKey.py
│   │   │       ├── db.py
│   │   │       ├── getCollectionInfo.py
│   │   │       └── getLLMApiKey.py
│   │   ├── endpoint/
│   │   │   ├── api.py
│   │   │   ├── deleteStore.py
│   │   │   ├── devApiCall.py
│   │   │   ├── embed.py
│   │   │   ├── models.py
│   │   │   ├── ragQuery.py
│   │   │   ├── transcribe.py
│   │   │   ├── vectorQuery.py
│   │   │   └── webcrawl.py
│   │   ├── llms/
│   │   │   ├── llmQuery.py
│   │   │   ├── messages/
│   │   │   │   └── formMessages.py
│   │   │   └── providers/
│   │   │       ├── local.py
│   │   │       ├── ollama.py
│   │   │       ├── ooba.py
│   │   │       └── openai.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── exceptions.py
│   │   │   ├── loaders/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base.py
│   │   │   │   ├── exllama.py
│   │   │   │   ├── hqq.py
│   │   │   │   ├── llamaccphf.py
│   │   │   │   ├── llamacpp.py
│   │   │   │   ├── tensorrt.py
│   │   │   │   └── transformers.py
│   │   │   ├── manager.py
│   │   │   ├── streamer.py
│   │   │   └── utils/
│   │   │       ├── __init__.py
│   │   │       ├── detect_type.py
│   │   │       ├── device.py
│   │   │       ├── download.py
│   │   │       └── platform.py
│   │   ├── vectorstorage/
│   │   │   ├── embeddings.py
│   │   │   ├── helpers/
│   │   │   │   └── sanitizeCollectionName.py
│   │   │   ├── init_store.py
│   │   │   └── vectorstore.py
│   │   └── voice/
│   │       └── voice_to_text.py
│   └── tests/
│       ├── testApi.py
│       └── test_voice.py
├── Frontend/
│   ├── .gitignore
│   ├── build/
│   │   └── icons/
│   │       └── icon.icns
│   ├── components.json
│   ├── e2e/
│   │   └── app.spec.ts
│   ├── electron-builder.json
│   ├── eslint.config.js
│   ├── index.html
│   ├── package.json
│   ├── playwright.config.ts
│   ├── postcss.config.js
│   ├── src/
│   │   ├── app/
│   │   │   ├── App.tsx
│   │   │   ├── index.css
│   │   │   ├── main.tsx
│   │   │   └── vite-env.d.ts
│   │   ├── components/
│   │   │   ├── AppAlert/
│   │   │   │   └── SettingsAlert.tsx
│   │   │   ├── Authentication/
│   │   │   │   ├── CreateAccount.tsx
│   │   │   │   └── SelectAccount.tsx
│   │   │   ├── Chat/
│   │   │   │   ├── Chat.tsx
│   │   │   │   └── ChatComponents/
│   │   │   │       ├── ChatHeader.tsx
│   │   │   │       ├── ChatInput.tsx
│   │   │   │       ├── ChatMessage.tsx
│   │   │   │       ├── ChatMessagesArea.tsx
│   │   │   │       ├── LoadingIndicator.tsx
│   │   │   │       ├── NewConvoWelcome.tsx
│   │   │   │       ├── ReasoningMessage.tsx
│   │   │   │       ├── StreamingMessage.tsx
│   │   │   │       ├── StreamingReasoningMessage.tsx
│   │   │   │       ├── SyntaxHightlightedCode.tsx
│   │   │   │       └── suggestions.tsx
│   │   │   ├── CollectionModals/
│   │   │   │   ├── CollectionComponents/
│   │   │   │   │   ├── AddLibrary.tsx
│   │   │   │   │   ├── DataStoreSelect.tsx
│   │   │   │   │   ├── FIlesInCollection.tsx
│   │   │   │   │   ├── Ingest.tsx
│   │   │   │   │   ├── IngestProgress.tsx
│   │   │   │   │   ├── IngestTabs/
│   │   │   │   │   │   ├── FileIngestTab.tsx
│   │   │   │   │   │   └── LinkIngestTab.tsx
│   │   │   │   │   └── ingestTypes.tsx
│   │   │   │   └── LibraryModal.tsx
│   │   │   ├── FileExplorer/
│   │   │   │   └── FileExplorer.tsx
│   │   │   ├── Header/
│   │   │   │   ├── Header.tsx
│   │   │   │   └── HeaderComponents/
│   │   │   │       ├── MainWindowControl.tsx
│   │   │   │       ├── Search.tsx
│   │   │   │       ├── SettingsDialog.tsx
│   │   │   │       ├── ToolsDialog.tsx
│   │   │   │       └── WinLinuxControls.tsx
│   │   │   ├── History/
│   │   │   │   └── History.tsx
│   │   │   ├── SettingsModal/
│   │   │   │   ├── SettingsComponents/
│   │   │   │   │   ├── ChatSettings.tsx
│   │   │   │   │   ├── DevIntegration.tsx
│   │   │   │   │   ├── LLMModels/
│   │   │   │   │   │   ├── AddLocalModel.tsx
│   │   │   │   │   │   ├── AddOllamaModel.tsx
│   │   │   │   │   │   ├── AzureOpenAI.tsx
│   │   │   │   │   │   ├── CustomLLM.tsx
│   │   │   │   │   │   ├── External.tsx
│   │   │   │   │   │   ├── ExternalOllama.tsx
│   │   │   │   │   │   ├── LocalLLM.tsx
│   │   │   │   │   │   ├── Ollama.tsx
│   │   │   │   │   │   └── Openrouter.tsx
│   │   │   │   │   ├── LLMPanel.tsx
│   │   │   │   │   └── providers/
│   │   │   │   │       ├── SvgIcon.tsx
│   │   │   │   │       ├── defaultsProviderModels.tsx
│   │   │   │   │       └── providerIcons.tsx
│   │   │   │   └── SettingsModal.tsx
│   │   │   ├── Tools/
│   │   │   │   ├── ToolComponents/
│   │   │   │   │   ├── AddTools.tsx
│   │   │   │   │   └── EnableTools.tsx
│   │   │   │   └── Tools.tsx
│   │   │   └── ui/
│   │   │       ├── alert.tsx
│   │   │       ├── avatar.tsx
│   │   │       ├── badge.tsx
│   │   │       ├── button.tsx
│   │   │       ├── buttonVariants.tsx
│   │   │       ├── card.tsx
│   │   │       ├── command.tsx
│   │   │       ├── dialog.tsx
│   │   │       ├── form.tsx
│   │   │       ├── icons.tsx
│   │   │       ├── input.tsx
│   │   │       ├── label.tsx
│   │   │       ├── menubar.tsx
│   │   │       ├── popover.tsx
│   │   │       ├── progress.tsx
│   │   │       ├── radio-group.tsx
│   │   │       ├── scroll-area.tsx
│   │   │       ├── select.tsx
│   │   │       ├── separator.tsx
│   │   │       ├── sheet.tsx
│   │   │       ├── slider.tsx
│   │   │       ├── switch.tsx
│   │   │       ├── tabs.tsx
│   │   │       ├── textarea.tsx
│   │   │       ├── toast.tsx
│   │   │       ├── toaster.tsx
│   │   │       └── tooltip.tsx
│   │   ├── context/
│   │   │   ├── ChatInputContext.tsx
│   │   │   ├── LibraryContext.tsx
│   │   │   ├── SysSettingsContext.tsx
│   │   │   ├── UserClientProviders.tsx
│   │   │   ├── UserContext.tsx
│   │   │   ├── ViewContext.tsx
│   │   │   ├── useChatInput.tsx
│   │   │   ├── useLibrary.tsx
│   │   │   ├── useSysSettings.tsx
│   │   │   ├── useUser.tsx
│   │   │   └── useView.tsx
│   │   ├── data/
│   │   │   ├── models.ts
│   │   │   └── sysSpecs.ts
│   │   ├── electron/
│   │   │   ├── authentication/
│   │   │   │   ├── devApi.ts
│   │   │   │   ├── secret.ts
│   │   │   │   └── token.ts
│   │   │   ├── crawl/
│   │   │   │   ├── cancelWebcrawl.ts
│   │   │   │   └── webcrawl.ts
│   │   │   ├── db.ts
│   │   │   ├── embedding/
│   │   │   │   ├── cancelEmbed.ts
│   │   │   │   └── vectorstoreQuery.ts
│   │   │   ├── handlers/
│   │   │   │   ├── azureHandlers.ts
│   │   │   │   ├── chatHandlers.ts
│   │   │   │   ├── closeEventHandler.ts
│   │   │   │   ├── collectionHandlers.ts
│   │   │   │   ├── customApiHandlers.ts
│   │   │   │   ├── dbHandlers.ts
│   │   │   │   ├── fileHandlers.ts
│   │   │   │   ├── handlers.test.ts
│   │   │   │   ├── ipcHandlers.ts
│   │   │   │   ├── localModelHandlers.ts
│   │   │   │   ├── menuHandlers.ts
│   │   │   │   ├── ollamaHandlers.ts
│   │   │   │   ├── openRouterHandlers.ts
│   │   │   │   └── voiceHandlers.ts
│   │   │   ├── helpers/
│   │   │   │   └── spawnAsync.ts
│   │   │   ├── llms/
│   │   │   │   ├── agentLayer/
│   │   │   │   │   ├── anthropicAgent.ts
│   │   │   │   │   ├── geminiAgent.ts
│   │   │   │   │   ├── ollamaAgent.ts
│   │   │   │   │   ├── openAiAgent.ts
│   │   │   │   │   └── tools/
│   │   │   │   │       └── websearch.ts
│   │   │   │   ├── apiCheckProviders/
│   │   │   │   │   ├── anthropic.ts
│   │   │   │   │   ├── deepseek.ts
│   │   │   │   │   ├── gemini.ts
│   │   │   │   │   ├── openai.ts
│   │   │   │   │   ├── openrouter.ts
│   │   │   │   │   └── xai.ts
│   │   │   │   ├── chatCompletion.ts
│   │   │   │   ├── generateTitle.ts
│   │   │   │   ├── keyValidation.ts
│   │   │   │   ├── llmHelpers/
│   │   │   │   │   ├── addAssistantMessage.ts
│   │   │   │   │   ├── addUserMessage.ts
│   │   │   │   │   ├── collectionData.ts
│   │   │   │   │   ├── countMessageTokens.ts
│   │   │   │   │   ├── getUserPrompt.ts
│   │   │   │   │   ├── ifNewConvo.ts
│   │   │   │   │   ├── prepMessages.ts
│   │   │   │   │   ├── providerInit.ts
│   │   │   │   │   ├── providersMap.ts
│   │   │   │   │   ├── returnReasoningPrompt.ts
│   │   │   │   │   ├── returnSystemPrompt.ts
│   │   │   │   │   ├── sendMessageChunk.ts
│   │   │   │   │   └── truncateMessages.ts
│   │   │   │   ├── llms.ts
│   │   │   │   ├── providers/
│   │   │   │   │   ├── anthropic.ts
│   │   │   │   │   ├── azureOpenAI.ts
│   │   │   │   │   ├── customEndpoint.ts
│   │   │   │   │   ├── deepseek.ts
│   │   │   │   │   ├── externalOllama.ts
│   │   │   │   │   ├── gemini.ts
│   │   │   │   │   ├── localModel.ts
│   │   │   │   │   ├── ollama.ts
│   │   │   │   │   ├── openai.ts
│   │   │   │   │   ├── openrouter.ts
│   │   │   │   │   └── xai.ts
│   │   │   │   └── reasoningLayer/
│   │   │   │       └── openAiChainOfThought.ts
│   │   │   ├── loadingWindow.ts
│   │   │   ├── localLLMs/
│   │   │   │   ├── getDirModels.ts
│   │   │   │   ├── loadModel.ts
│   │   │   │   ├── modelInfo.ts
│   │   │   │   └── unloadModel.ts
│   │   │   ├── main.ts
│   │   │   ├── mainWindow.test.ts
│   │   │   ├── mainWindow.ts
│   │   │   ├── menu.ts
│   │   │   ├── ollama/
│   │   │   │   ├── checkOllama.ts
│   │   │   │   ├── fetchLocalModels.ts
│   │   │   │   ├── getRunningModels.ts
│   │   │   │   ├── isOllamaRunning.ts
│   │   │   │   ├── ollamaPath.ts
│   │   │   │   ├── pullModel.ts
│   │   │   │   ├── runOllama.ts
│   │   │   │   ├── unloadAllModels.ts
│   │   │   │   └── unloadModel.ts
│   │   │   ├── pathResolver.ts
│   │   │   ├── preload.cts
│   │   │   ├── python/
│   │   │   │   ├── ensurePythonAndVenv.ts
│   │   │   │   ├── extractFromAsar.ts
│   │   │   │   ├── getLinuxPackageManager.ts
│   │   │   │   ├── ifFedora.ts
│   │   │   │   ├── installDependencies.ts
│   │   │   │   ├── installLlamaCpp.ts
│   │   │   │   ├── killProcessOnPort.ts
│   │   │   │   ├── python.test.ts
│   │   │   │   ├── runWithPrivileges.ts
│   │   │   │   └── startAndStopPython.ts
│   │   │   ├── resourceManager.ts
│   │   │   ├── specs/
│   │   │   │   └── systemSpecs.ts
│   │   │   ├── storage/
│   │   │   │   ├── deleteCollection.ts
│   │   │   │   ├── getFiles.ts
│   │   │   │   ├── getUserFiles.ts
│   │   │   │   ├── newFile.ts
│   │   │   │   ├── openCollectionFolder.ts
│   │   │   │   ├── removeFileorFolder.ts
│   │   │   │   ├── renameFile.ts
│   │   │   │   └── websiteFetch.ts
│   │   │   ├── tray.test.ts
│   │   │   ├── tray.ts
│   │   │   ├── tsconfig.json
│   │   │   ├── util.ts
│   │   │   ├── voice/
│   │   │   │   └── audioTranscription.ts
│   │   │   └── youtube/
│   │   │       └── youtubeIngest.ts
│   │   ├── hooks/
│   │   │   ├── use-toast.ts
│   │   │   ├── useAppInitialization.tsx
│   │   │   ├── useChatLogic.ts
│   │   │   ├── useChatManagement.ts
│   │   │   ├── useConversationManagement.ts
│   │   │   ├── useModelManagement.ts
│   │   │   ├── useStatistics.tsx
│   │   │   └── useUIState.ts
│   │   ├── lib/
│   │   │   ├── shikiHightlight.ts
│   │   │   └── utils.ts
│   │   ├── loading.html
│   │   ├── types/
│   │   │   └── contextTypes/
│   │   │       ├── LibraryContextTypes.ts
│   │   │       ├── SystemSettingsTypes.ts
│   │   │       ├── UserContextType.ts
│   │   │       └── UserViewTypes.ts
│   │   └── utils/
│   │       ├── chatUtilts.ts
│   │       └── webAudioRecorder.ts
│   ├── tailwind.config.js
│   ├── tsconfig.app.json
│   ├── tsconfig.json
│   ├── tsconfig.node.json
│   ├── types.d.ts
│   ├── vite.config.d.ts
│   ├── vite.config.js
│   └── vite.config.ts
├── LICENSE
└── README.md

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
# Python cache files
__pycache__/
*.py[cod]
*$py.class

.venv

/Frontend/node_modules
/Frontend/dist

.env.local

database.sqlite

Backend/venv
Backend/venvs
models/*

.DS_Store

*.tsbuildinfo

Collections/*

FileCollections

.dev.secret

VectorStores/*
Frontend/chroma_db/chroma.sqlite3

monitor_resources.ps1

Frontend/models/*
Backend/models/*
test_curl.txt

================================================
FILE: Backend/.gitignore
================================================
venv
testData


================================================
FILE: Backend/ensure_dependencies.py
================================================
import sys
import os
import subprocess
import asyncio
from concurrent.futures import ThreadPoolExecutor, as_completed
import warnings
import logging

# Filter transformers model warnings
warnings.filterwarnings('ignore', category=UserWarning)
os.environ['TRANSFORMERS_NO_ADVISORY_WARNINGS'] = 'true'

# Configure logging to handle progress messages
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


def find_python310():
    python_commands = ["python3.12", "python3"] if sys.platform != "win32" else [
        "python3.11", "py -3.11", "python"]

    for cmd in python_commands:
        try:
            result = subprocess.run(
                [cmd, "--version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
            if sys.platform == "win32":
                if "Python 3.11" in result.stdout:
                    return cmd
            else:
                if "Python 3.12" in result.stdout:
                    return cmd
        except:
            continue
    return None


def create_venv(venv_path=None):
    if venv_path is None:
        venv_path = os.path.join(os.path.dirname(__file__), 'venv')
    if not os.path.exists(venv_path):
        print("Creating virtual environment...")
        python310 = find_python310()
        if not python310:
            if sys.platform == "win32":
                raise RuntimeError(
                    "Python 3.11 is required but not found. Please install Python 3.11.")
            else:
                raise RuntimeError(
                    "Python 3.12 is required but not found. Please install Python 3.12.")

        subprocess.check_call([python310, "-m", "venv", venv_path])
        print(f"Created virtual environment with {python310}")
    return venv_path


def get_venv_python(venv_path):
    if sys.platform == "win32":
        return os.path.join(venv_path, "Scripts", "python.exe")
    return os.path.join(venv_path, "bin", "python")


def install_package(python_path, package):
    try:
        subprocess.check_call(
            [python_path, '-m', 'pip', 'install', '--no-deps',
                '--upgrade-strategy', 'only-if-needed', package],
            stdout=subprocess.DEVNULL,
            stderr=subprocess.DEVNULL
        )
        return package, None
    except subprocess.CalledProcessError as e:
        return package, str(e)


def get_installed_packages(python_path):
    result = subprocess.run(
        [python_path, '-m', 'pip', 'list', '--format=freeze'],
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        text=True
    )
    return {line.split('==')[0].lower(): line.split('==')[1] for line in result.stdout.splitlines()}


async def async_init_store():
    try:
        # Suppress model initialization warnings
        import transformers
        from src.vectorstorage.init_store import init_store
        transformers.logging.set_verbosity_error()
        logging.getLogger(
            "transformers.modeling_utils").setLevel(logging.ERROR)

        # Configure huggingface_hub logging
        hf_logger = logging.getLogger("huggingface_hub")
        hf_logger.setLevel(logging.INFO)
        sys.stdout.write(
            "Downloading initial embedding model (HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5) ...|85\n")
        sys.stdout.flush()

        # Redirect stderr to capture progress messages
        with open(os.devnull, 'w') as devnull:
            old_stderr = sys.stderr
            sys.stderr = devnull
            try:
                model_path = await init_store()
                sys.stdout.write(
                    f"Model downloaded successfully to {model_path}|95\n")
            finally:
                sys.stderr = old_stderr

        sys.stdout.flush()
    except Exception as e:
        sys.stdout.write(f"Error downloading model: {str(e)}|85\n")
        sys.stdout.flush()
        raise e


def get_package_version(python_path, package_name):
    try:
        result = subprocess.run(
            [python_path, '-m', 'pip', 'show', package_name],
            capture_output=True,
            text=True
        )
        for line in result.stdout.split('\n'):
            if line.startswith('Version: '):
                version = line.split('Version: ')[1].strip()
                # Handle CUDA variants of PyTorch
                if package_name == 'torch' and '+cu' in version:
                    # Strip CUDA suffix for version comparison
                    version = version.split('+')[0]
                return version
    except:
        return None
    return None


def install_requirements(custom_venv_path=None):
    try:
        venv_path = create_venv(custom_venv_path)
        python_path = get_venv_python(venv_path)

        # Install core dependencies first

        requirements_path = os.path.join(
            os.path.dirname(__file__), 'requirements.txt')

        # Handle remaining requirements
        with open(requirements_path, 'r') as f:
            requirements = [
                line.strip() for line in f
                if line.strip()
                and not line.startswith('#')
            ]

        total_deps = len(requirements)
        sys.stdout.write(f"Total packages to process: {total_deps}|50\n")
        sys.stdout.flush()

        installed_packages = get_installed_packages(python_path)

        to_install = []
        for req in requirements:
            pkg_name = req.split('==')[0] if '==' in req else req
            if pkg_name.lower() not in installed_packages:
                to_install.append(req)

        completed_deps = total_deps - len(to_install)
        progress = 50 + (completed_deps / total_deps) * \
            30  # Scale from 50 to 80
        sys.stdout.write(f"Checked installed packages|{progress:.1f}\n")
        sys.stdout.flush()

        with ThreadPoolExecutor(max_workers=5) as executor:
            future_to_pkg = {executor.submit(
                install_package, python_path, req): req for req in to_install}
            for future in as_completed(future_to_pkg):
                pkg = future_to_pkg[future]
                pkg_name = pkg.split('==')[0] if '==' in pkg else pkg
                result, error = future.result()
                completed_deps += 1
                progress = 50 + (completed_deps / total_deps) * \
                    30  # Scale from 50 to 80

                if error:
                    sys.stdout.write(
                        f"Error installing {pkg_name}: {error}|{progress:.1f}\n")
                else:
                    sys.stdout.write(f"Installed {pkg_name}|{progress:.1f}\n")
                sys.stdout.flush()

        # Now we can safely import init_store after all dependencies are installed
        sys.stdout.write(
            "All dependencies installed, initializing model store...|85\n")
        sys.stdout.flush()

        # Initialize the store to download the model
        asyncio.run(async_init_store())

        sys.stdout.write(
            "Dependencies installed and model initialized successfully!|99\n")
        sys.stdout.flush()

    except Exception as e:
        sys.stdout.write(f"Error installing dependencies: {str(e)}|0\n")
        sys.stdout.flush()
        sys.exit(1)


if __name__ == "__main__":
    custom_venv_path = sys.argv[1] if len(sys.argv) > 1 else None
    install_requirements(custom_venv_path)


================================================
FILE: Backend/main.py
================================================
import logging
from src.authentication.api_key_authorization import api_key_auth
from src.authentication.token import verify_token, verify_token_or_api_key
from src.data.database.checkAPIKey import check_api_key
from src.data.dataFetch.youtube import youtube_transcript
from src.endpoint.deleteStore import delete_vectorstore_collection
from src.endpoint.models import EmbeddingRequest, QueryRequest, ChatCompletionRequest, VectorStoreQueryRequest, DeleteCollectionRequest, YoutubeTranscriptRequest, WebCrawlRequest, ModelLoadRequest
from src.endpoint.embed import embed
from src.endpoint.vectorQuery import query_vectorstore
from src.endpoint.devApiCall import rag_call, llm_call, vector_call
from src.endpoint.transcribe import transcribe_audio
from src.endpoint.webcrawl import webcrawl
from src.models.manager import model_manager
from fastapi import FastAPI, Depends, File, UploadFile, Request, BackgroundTasks
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse, JSONResponse
import asyncio
import os
import signal
import sys
import psutil
import threading
import uvicorn
import json
from src.endpoint.api import chat_completion_stream

app = FastAPI()
embedding_task = None
embedding_event = None
crawl_task = None
crawl_event = None

origins = ["http://localhost", "http://127.0.0.1"]

app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
    max_age=3600,  # Cache preflight requests for 1 hour
    expose_headers=["*"]
)

# Configure FastAPI app settings for long-running requests


@app.middleware("http")
async def timeout_middleware(request: Request, call_next):
    try:
        # Set a long timeout for the request
        # 1 hour timeout
        response = await asyncio.wait_for(call_next(request), timeout=3600)
        return response
    except asyncio.TimeoutError:
        return JSONResponse(
            status_code=504,
            content={"detail": "Request timeout"}
        )

logger = logging.getLogger(__name__)


@app.post("/chat/completions")
async def chat_completion(request: ChatCompletionRequest, user_id: str = Depends(verify_token_or_api_key)) -> StreamingResponse:
    """Stream chat completion from the model"""
    print("Chat completion request received")
    print(user_id, request)
    info = model_manager.get_model_info()
    print(info)
    if request.model != info["model_name"]:
        model_load_request = ModelLoadRequest(
            model_name=request.model)
        model, tokenizer = model_manager.load_model(model_load_request)
        print("Model mismatch")
        return {"status": "error", "message": "Model mismatch"}
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    print("Authorized")
    print(request)
    return StreamingResponse(
        chat_completion_stream(request),
        media_type="text/event-stream"
    )


@app.get("/model-info")
async def get_model_info(user_id: str = Depends(verify_token_or_api_key)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    """Get information about the currently loaded model"""
    return JSONResponse(content=model_manager.get_model_info())


@app.post("/load-model")
async def load_model_endpoint(request: ModelLoadRequest, user_id: str = Depends(verify_token_or_api_key)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    """Load a model with the specified configuration"""
    print("Loading model")
    print(request)
    model_type = request.model_type or "auto"
    if model_type != "auto":
        is_compatible, message = model_manager.check_platform_compatibility(
            model_type)
        logger.info(f"is_compatible: {is_compatible}, message: {message}")
        # Return early if platform is not compatible
        if not is_compatible:
            response_data = model_manager._make_json_serializable({
                "status": "error",
                "message": f"Cannot load model: {message}",
                "model_info": model_manager.get_model_info()
            })
            return JSONResponse(content=response_data)
    try:
        model, tokenizer = model_manager.load_model(request)
        response_data = model_manager._make_json_serializable({
            "status": "success",
            "message": f"Successfully loaded model {request.model_name}",
            "model_info": model_manager.get_model_info()
        })
        print(response_data)
        logger.info(response_data)
        return JSONResponse(content=response_data)
    except Exception as e:
        response_data = model_manager._make_json_serializable({
            "status": "error",
            "message": str(e),
            "model_info": model_manager.get_model_info()
        })
        return JSONResponse(status_code=500, content=response_data)


@app.post("/unload-model")
async def unload_model_endpoint(user_id: str = Depends(verify_token_or_api_key)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    """Unload the currently loaded model"""

    try:
        model_manager.clear_model()
        return JSONResponse(content={
            "status": "success",
            "message": "Model unloaded successfully",
            "model_info": model_manager.get_model_info()
        })
    except Exception as e:
        return JSONResponse(
            status_code=500,
            content={
                "status": "error",
                "message": str(e),
                "model_info": model_manager.get_model_info()
            }
        )


@app.post("/webcrawl")
async def webcrawl_endpoint(data: WebCrawlRequest, user_id: str = Depends(verify_token)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}

    global crawl_task, crawl_event
    if crawl_task is not None:
        return {"status": "error", "message": "A crawl process is already running"}

    crawl_event = asyncio.Event()

    async def event_generator():
        global crawl_task, crawl_event
        try:
            for result in webcrawl(data, crawl_event):
                if crawl_event.is_set():
                    yield f"data: {{'type': 'cancelled', 'message': 'Crawl process cancelled'}}\n\n"
                    break
                yield f"{result}\n\n"
                await asyncio.sleep(0.1)
        except Exception as e:
            error_data = {
                "status": "error",
                "data": {
                    "message": str(e)
                }
            }
            yield f"data: {json.dumps(error_data)}\n\n"
        finally:
            crawl_task = None
            crawl_event = None

    response = StreamingResponse(
        event_generator(), media_type="text/event-stream")
    crawl_task = asyncio.create_task(event_generator().__anext__())
    return response


@app.post("/transcribe")
async def transcribe_audio_endpoint(audio_file: UploadFile = File(...), model_name: str = "base", user_id: str = Depends(verify_token)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    return await transcribe_audio(audio_file, model_name)


@app.post("/embed")
async def add_embedding(data: EmbeddingRequest, user_id: str = Depends(verify_token)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    print("Metadata:", data.metadata)
    global embedding_task, embedding_event

    if embedding_task is not None:
        return {"status": "error", "message": "An embedding process is already running"}

    embedding_event = asyncio.Event()

    async def event_generator():
        global embedding_task, embedding_event
        try:
            async for result in embed(data):
                if embedding_event.is_set():
                    yield f"data: {{'type': 'cancelled', 'message': 'Embedding process cancelled'}}\n\n"
                    break

                if result["status"] == "progress":
                    progress_data = result["data"]
                    yield f"data: {{'type': 'progress', 'chunk': {progress_data['chunk']}, 'totalChunks': {progress_data['total_chunks']}, 'percent_complete': '{progress_data['percent_complete']}', 'est_remaining_time': '{progress_data['est_remaining_time']}'}}\n\n"
                else:
                    yield f"data: {{'type': '{result['status']}', 'message': '{result['message']}'}}\n\n"
                await asyncio.sleep(0.1)  # Prevent overwhelming the connection
        except Exception as e:
            logger.error(f"Error in embedding process: {str(e)}")
            yield f"data: {{'type': 'error', 'message': '{str(e)}'}}\n\n"
        finally:
            embedding_task = None
            embedding_event = None
            logger.info("Embedding task cleanup completed")

    response = StreamingResponse(
        event_generator(),
        media_type="text/event-stream"
    )

    # Set response headers for better connection handling
    response.headers["Cache-Control"] = "no-cache"
    response.headers["Connection"] = "keep-alive"
    response.headers["X-Accel-Buffering"] = "no"
    response.headers["Transfer-Encoding"] = "chunked"

    embedding_task = asyncio.create_task(event_generator().__anext__())
    return response


@app.post("/youtube-ingest")
async def youtube_ingest(data: YoutubeTranscriptRequest, user_id: str = Depends(verify_token)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}

    async def event_generator():
        try:
            for result in youtube_transcript(data):
                if result["status"] == "progress":
                    progress_data = result["data"]
                    yield f"data: {{'type': 'progress', 'chunk': {progress_data['chunk']}, 'totalChunks': {progress_data['total_chunks']}, 'percent_complete': '{progress_data['percent_complete']}', 'message': '{progress_data['message']}'}}\n\n"
                else:
                    yield f"data: {{'type': '{result['status']}', 'message': '{result['message']}'}}\n\n"
                await asyncio.sleep(0.1)
        except Exception as e:
            yield f"data: {{'type': 'error', 'message': '{str(e)}'}}\n\n"

    return StreamingResponse(event_generator(), media_type="text/event-stream")


@app.post("/cancel-embed")
async def cancel_embedding(user_id: str = Depends(verify_token)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    global embedding_task, embedding_event
    if embedding_event:
        embedding_event.set()
        return {"status": "success", "message": "Embedding process cancelled"}
    return {"status": "error", "message": "No embedding process running"}


@app.post("/restart-server")
async def restart_server(user_id: str = Depends(verify_token)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}

    def restart():
        pid = os.getpid()
        parent = psutil.Process(pid)
        # Kill all child processes
        for child in parent.children(recursive=True):
            child.kill()
        # Kill the current process
        os.kill(pid, signal.SIGTERM)
        # Start a new instances
        python = sys.executable
        os.execl(python, python, *sys.argv)

    threading.Thread(target=restart).start()
    return {"status": "success", "message": "Server restart initiated"}


@app.post("/vector-query")
async def vector_query(data: VectorStoreQueryRequest, user_id: str = Depends(verify_token)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    try:
        result = query_vectorstore(data, data.is_local)
        return result
    except Exception as e:
        print(f"Error querying vectorstore: {str(e)}")
        return {"status": "error", "message": str(e)}


@app.post("/delete-collection")
async def delete_collection(data: DeleteCollectionRequest, user_id: str = Depends(verify_token)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    print("Authorized")
    return delete_vectorstore_collection(data)


@app.post("/api/vector")
async def api_vector(query_request: QueryRequest, user_id: str = Depends(api_key_auth)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    """ check to see if the userId has API key in SQLite """
    if not query_request.collection_name:
        print("No collection name provided")
        return {"status": "error", "message": "No collection name provided"}
    if check_api_key(int(user_id)) == False:
        print("Unauthorized")
        return {"status": "error", "message": "Unauthorized"}
    print("Authorized")
    return vector_call(query_request, user_id)


@app.post("/api/llm")
async def api_llm(query_request: ChatCompletionRequest, user_id: str = Depends(api_key_auth)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    """ check to see if the userId has API key in SQLite """
    if not query_request.model:
        print("No model provided")
        return {"status": "error", "message": "No model provided"}
    if check_api_key(int(user_id)) == False:
        print("Unauthorized")
        return {"status": "error", "message": "Unauthorized"}
    print("Authorized")
    return await llm_call(query_request, user_id)


@app.post("/api/rag")
async def api_rag(query_request: QueryRequest, user_id: str = Depends(api_key_auth)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    """ check to see if the userId has API key in SQLite """
    if not query_request.model:
        print("No model provided")
        return {"status": "error", "message": "No model provided"}
    if not query_request.collection_name:
        print("No collection name provided")
        return {"status": "error", "message": "No collection name provided"}
    if check_api_key(int(user_id)) == False:
        print("Unauthorized")
        return {"status": "error", "message": "Unauthorized"}
    print("Authorized")
    return await rag_call(query_request, user_id)


@app.post("/cancel-crawl")
async def cancel_crawl(user_id: str = Depends(verify_token)):
    if user_id is None:
        return {"status": "error", "message": "Unauthorized"}
    global crawl_task, crawl_event
    if crawl_event:
        crawl_event.set()
        return {"status": "success", "message": "Crawl process cancelled"}
    return {"status": "error", "message": "No crawl process running"}


if __name__ == "__main__":
    print("Starting server...")
    uvicorn.run(
        app,
        host="127.0.0.1",
        port=47372,
        timeout_keep_alive=3600,
        timeout_graceful_shutdown=300,
        limit_concurrency=10,
        backlog=2048
    )


================================================
FILE: Backend/requirements.txt
================================================
annotated-types==0.7.0
anyio==4.6.2.post1
asgiref==3.8.1
backoff==2.2.1
bcrypt==4.2.1
build==1.2.2.post1
cachetools==5.5.0
certifi==2024.8.30
charset-normalizer==3.4.0
chromadb==0.6.3
chroma-hnswlib==0.7.6
click==8.1.7
coloredlogs==15.0.1
Deprecated==1.2.15
dnspython==2.7.0
durationpy==0.9
ecdsa==0.19.0
email_validator==2.2.0
exceptiongroup==1.2.2
fastapi==0.115.6
fastapi-cli==0.0.6
filelock==3.16.1
flatbuffers==24.3.25
fsspec==2024.10.0
google-auth==2.36.0
googleapis-common-protos==1.66.0
grpcio==1.68.1
h11==0.14.0
httpcore==1.0.7
httptools==0.6.4
httpx==0.28.0
huggingface-hub==0.26.5
humanfriendly==10.0
idna==3.10
importlib_metadata==8.5.0
importlib_resources==6.4.5
iniconfig==2.0.0
Jinja2==3.1.5
kubernetes==31.0.0
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mdurl==0.1.2
mmh3==5.0.1
monotonic==1.6
mpmath==1.3.0
numba==0.58.1
oauthlib==3.2.2
onnxruntime==1.20.1
opentelemetry-api==1.28.2
opentelemetry-exporter-otlp-proto-common==1.28.2
opentelemetry-exporter-otlp-proto-grpc==1.28.2
opentelemetry-instrumentation==0.49b2
opentelemetry-instrumentation-asgi==0.49b2
opentelemetry-instrumentation-fastapi==0.49b2
opentelemetry-proto==1.28.2
opentelemetry-sdk==1.28.2
opentelemetry-semantic-conventions==0.49b2
opentelemetry-util-http==0.49b2
orjson==3.10.12
overrides==7.7.0
packaging==24.2
passlib==1.7.4
pluggy==1.5.0
posthog==3.7.4
protobuf==5.29.1
pyasn1==0.6.1
pyasn1_modules==0.4.1
pydantic>=2.9.0,<3.0.0
pydantic_core==2.14.6
Pygments==2.18.0
PyPika==0.48.9
pyproject_hooks==1.2.0
pytest==8.3.4
python-dateutil==2.9.0.post0
python-dotenv==1.0.1
PyJWT==2.10.1
python-multipart==0.0.19
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rich==13.9.4
rich-toolkit==0.11.3
rsa==4.9
shellingham==1.5.4
six==1.17.0
sniffio==1.3.1
starlette==0.41.3
sympy==1.13.3
tenacity==9.0.0
tokenizers==0.21.0
tomli==2.2.1
tqdm==4.67.1
typer==0.15.1
urllib3==2.2.3
uvicorn==0.32.1
watchfiles==1.0.0
websocket-client==1.8.0
websockets==14.1
wrapt==1.17.0
zipp==3.21.0
pypdf[full]==5.1.0
python-docx==0.8.11
beautifulsoup4==4.12.2
markdown==3.5.1
python-pptx==0.6.21
openpyxl==3.1.2
lxml==5.3.0
pandas==2.2.3
pytz==2024.2
pillow==11.0.0
soupsieve==2.6
openai==1.58.1
distro==1.9.0
nest_asyncio==1.5.6
hypercorn==0.14.3
toml==0.10.2
h2==4.1.0
hyperframe==6.0.1
hpack==4.0.0
http3==0.6.7
h11==0.14.0
httpcore==1.0.7
sentence-transformers==3.3.1
threadpoolctl==3.5.0
joblib==1.4.2
scipy==1.15.1
httpx==0.28.0
priority==2.0.0
wsproto==1.2.0
jiter==0.8.2
langchain==0.3.16
langchain-text-splitters==0.3.4
langchain_core==0.3.28
langsmith==0.2.3
requests_toolbelt==1.0.0
jsonpatch==1.33
jsonpointer==3.0.0
langchain_community==0.3.16
tiktoken==0.8.0
regex==2024.11.6
langchain-openai==0.2.14
langchain-chroma==0.2.1
psutil==6.1.1
ollama==0.4.4
docx2txt==0.8
yt-dlp==2024.12.23
webvtt-py==0.4.6
langchain-ollama==0.2.2
openai-whisper==20240930
accelerate>=0.20.3
bitsandbytes>=0.41.1
safetensors>=0.4.0
llvmlite==0.43.0
einops==0.8.0
optimum==1.23.3
datasets==3.2.0
pyarrow==18.1.0
multiprocess==0.70.17
dill>=0.3.6
aiohttp==3.11.11
multidict==6.1.0
attrs>=23.1.0
yarl==1.18.3
propcache==0.2.1
async-timeout==5.0.1
aiohappyeyeballs==2.4.4
aiosignal==1.3.2
frozenlist==1.5.0
xxhash==3.5.0
diskcache==5.6.3
hqq==0.2.2
termcolor==2.5.0
langchain-huggingface==0.1.2
pypdf==5.2.0

================================================
FILE: Backend/src/authentication/api_key_authorization.py
================================================
from fastapi import Depends
from fastapi.security import OAuth2PasswordBearer
from typing import Optional
import jwt

import logging
import os

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
SECRET_KEY = os.environ.get("SECRET_KEY")

if not SECRET_KEY:
    raise RuntimeError("Could not get JWT secret for API key authorization")


async def get_optional_token(token: Optional[str] = Depends(oauth2_scheme)):
    return token


async def api_key_auth(token: Optional[str] = Depends(get_optional_token)):
    if token is None:
        return None
    try:
        payload = jwt.decode(token, SECRET_KEY, algorithms=["HS256"])
        user_id: str = payload.get("userId")
        logger.info(f"User ID: {user_id}")
        if user_id is None:
            return None
        return user_id
    except jwt.exceptions.InvalidTokenError:
        logger.error("Invalid token")
        return None


================================================
FILE: Backend/src/authentication/token.py
================================================
from fastapi import Depends, Request
from fastapi.security import OAuth2PasswordBearer
from typing import Optional
import os
import jwt
import logging

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")

# Get secret from environment variable
SECRET_KEY = os.environ.get("JWT_SECRET")
if not SECRET_KEY:
    raise RuntimeError("JWT_SECRET environment variable is not set")


async def get_optional_token(token: Optional[str] = Depends(oauth2_scheme)):
    return token


async def verify_token(token: Optional[str] = Depends(get_optional_token)):
    if token is None:
        return None
    try:
        payload = jwt.decode(token, SECRET_KEY, algorithms=["HS256"])
        print(f"Payload: {payload}")
        user_id: str = payload.get("userId")
        logger.info(f"User ID: {user_id}")
        if user_id is None:
            return None
        return user_id

    except jwt.exceptions.InvalidTokenError:
        logger.error("Invalid token")
        return None


async def optional_auth(request: Request):
    if "Authorization" in request.headers:
        token = request.headers["Authorization"].split("Bearer ")[1]
        try:
            payload = jwt.decode(token, SECRET_KEY, algorithms=["HS256"])
            return payload.get("userId")
        except jwt.exceptions.InvalidTokenError:
            return None
    return None


async def verify_token_or_api_key(token: Optional[str] = Depends(get_optional_token)):
    """Verify token using normal auth, falling back to API key auth if that fails"""
    # Try normal token verification first
    user_id = await verify_token(token)
    if user_id:
        return user_id
        
    # Fall back to API key verification
    from src.authentication.api_key_authorization import api_key_auth
    return await api_key_auth(token)


================================================
FILE: Backend/src/data/dataFetch/webcrawler.py
================================================
import os
import json
import logging
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import time
import threading
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from queue import Queue, Empty
import threading


class WebCrawler:
    def __init__(self, base_url, user_id, user_name, collection_id, collection_name, max_workers, cancel_event=None):
        self.base_url = base_url
        self.output_dir = self._get_collection_path(
            user_id, user_name, collection_id, collection_name)
        self.visited_urls = set()
        self.failed_urls = set()
        self.delay = 0  # Reduced delay since we're rate limiting with max_workers
        self.max_workers = 35
        self.url_queue = Queue()
        self.url_lock = threading.Lock()
        self.progress_bar = None
        self.total_urls = 0
        self.current_urls = 0
        self.update_callback = None
        self.cancel_event = cancel_event

        # Setup logging
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s'
        )

        # Create output directory if it doesn't exist
        os.makedirs(self.output_dir, exist_ok=True)

    def _get_collection_path(self, user_id, user_name, collection_id, collection_name):
        """Generate the collection path matching the frontend structure"""
        app_data_path = os.path.abspath(os.path.join(
            os.path.dirname(os.path.dirname(os.path.dirname(__file__))), ".."
        ))
        return os.path.join(
            app_data_path,
            "..",
            "FileCollections",
            f"{user_id}_{user_name}",
            f"{collection_id}_{collection_name}"
        )

    def _print_progress(self):
        """Print progress as JSON"""
        if self.total_urls > 0:
            percent = (self.current_urls / self.total_urls) * 100
            progress_data = {
                "status": "progress",
                "data": {
                    "message": f"Part 1 of 2: Scraping page {self.current_urls} out of {self.total_urls} from {self.base_url}",
                    "chunk": self.current_urls,
                    "total_chunks": self.total_urls,
                    "percent_complete": f"{percent:.1f}%"
                }
            }
            json_str = json.dumps(progress_data)
            print(f"data: {json_str}")
            return progress_data

    def is_valid_url(self, url):
        """Check if URL belongs to the same domain and is a documentation page"""
        # Remove fragment identifier (#) and anything that follows
        url = url.split('#')[0]
        if not url:  # Skip empty URLs after fragment removal
            return False

        # First check if URL starts with base_url
        if not url.startswith(self.base_url):
            logging.debug(f"Filtered URL (not starting with base URL): {url}")
            return False

        # Remove trailing slashes for consistency
        url = url.rstrip('/')

        # Skip obviously invalid URLs
        invalid_patterns = [
            '.pdf', '.zip', '.png', '.jpg',  # File extensions
            'github.com', 'twitter.com',      # External sites
            '/api/', '/examples/',            # Common non-doc paths
            '?', 'mailto:', 'javascript:'     # Special URLs
        ]

        if any(pattern in url for pattern in invalid_patterns):
            logging.debug(f"Filtered URL (invalid pattern): {url}")
            return False

        # Ensure not a resource file
        return not url.endswith(('js', 'css', 'json'))

    def save_page(self, url, html_content):
        """Save the HTML content to a file"""
        try:
            # Create base_url_docs directory
            parsed_base_url = urlparse(self.base_url)
            base_url_dir = parsed_base_url.netloc.replace(".", "_") + "_docs"
            base_dir = os.path.join(self.output_dir, base_url_dir)
            os.makedirs(base_dir, exist_ok=True)

            # Create a file path based on the URL structure
            parsed_url = urlparse(url)
            path_parts = parsed_url.path.strip('/').split('/')

            # Create subdirectories if needed
            current_dir = base_dir
            for part in path_parts[:-1]:
                current_dir = os.path.join(current_dir, part)
                os.makedirs(current_dir, exist_ok=True)

            # Save the file
            filename = path_parts[-1] if path_parts else 'index'
            filepath = os.path.join(current_dir, f"{filename}.html")

            with open(filepath, 'w', encoding='utf-8') as f:
                f.write(html_content)

            return True

        except Exception as e:
            logging.error(f"Error saving {url}: {str(e)}")
            return False

    def get_links(self, soup, current_url):
        """Extract valid documentation links from the page"""
        links = set()
        for a in soup.find_all('a', href=True):
            # Get the full URL
            url = urljoin(current_url, a['href'])

            # Remove fragment identifier (#) and anything that follows
            url = url.split('#')[0]

            # Skip empty URLs after fragment removal
            if not url:
                continue

            # Remove trailing slashes for consistency
            url = url.rstrip('/')

            # Only add if it's valid and not already visited
            if self.is_valid_url(url) and url not in self.visited_urls:
                links.add(url)

        return links

    def scrape_page(self, url):
        """Scrape a single page and return its content and links"""
        try:
            response = requests.get(url, timeout=10)
            response.raise_for_status()
            html_content = response.text

            # Create BeautifulSoup object with the response text
            soup = BeautifulSoup(html_content, 'html.parser')

            # Remove unwanted elements before getting links
            for element in soup.find_all(['header', 'footer', 'nav', 'script', 'style', 'meta']):
                if element is not None:
                    element.decompose()

            # Get links from the cleaned soup
            links = self.get_links(soup, url)

            return soup, links

        except Exception as e:
            error_data = {
                "status": "error",
                "data": {
                    "message": str(e)
                }
            }
            print(f"data: {json.dumps(error_data)}")
            logging.error(f"Error scraping {url}: {str(e)}")
            self.failed_urls.add(url)
            return None, set()

    def scrape(self):
        """Main scraping method using thread pool"""
        # Initialize with start URL
        self.url_queue.put(self.base_url)
        self.total_urls = 1  # Initialize with 1 for the base URL
        self.current_urls = 0

        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            active_tasks = set()

            while True:
                try:
                    # Check for cancellation
                    if self.cancel_event and self.cancel_event.is_set():
                        break

                    # Get next URL with timeout
                    try:
                        current_url = self.url_queue.get(timeout=5)
                    except Empty:
                        # If no active tasks and queue is empty, we're done
                        if not active_tasks:
                            break
                        continue

                    if current_url in self.visited_urls:
                        continue

                    with self.url_lock:
                        if current_url in self.visited_urls:
                            continue
                        self.visited_urls.add(current_url)
                        yield self._print_progress()

                    # Submit the scraping task to thread pool
                    future = executor.submit(self._process_url, current_url)
                    active_tasks.add(future)
                    future.add_done_callback(lambda f: active_tasks.remove(f))
                    future.add_done_callback(self._update_progress)

                except Exception as e:
                    error_data = {
                        "status": "error",
                        "data": {
                            "message": str(e)
                        }
                    }
                    print(f"data: {json.dumps(error_data)}")
                    logging.error(f"Error in scrape loop: {str(e)}")
                    continue

            # Wait for remaining tasks to complete
            for future in concurrent.futures.as_completed(list(active_tasks)):
                try:
                    future.result()
                except Exception as e:
                    error_data = {
                        "status": "error",
                        "data": {
                            "message": str(e)
                        }
                    }
                    print(f"data: {json.dumps(error_data)}")
                    logging.error(f"Error in remaining tasks: {str(e)}")

    def _update_progress(self, future):
        """Callback to update progress"""
        try:
            with self.url_lock:
                self.current_urls += 1
                progress_data = self._print_progress()
                if progress_data:
                    json_str = json.dumps(progress_data)
                    print(f"data: {json_str}")
        except Exception as e:
            error_data = {
                "status": "error",
                "data": {
                    "message": str(e)
                }
            }
            print(f"data: {json.dumps(error_data)}")

    def _process_url(self, url):
        """Process a single URL - called by thread pool"""
        try:
            # Check for cancellation
            if self.cancel_event and self.cancel_event.is_set():
                return

            # Respectful delay
            time.sleep(self.delay)

            # Scrape the page
            soup, new_links = self.scrape_page(url)
            if soup is None:
                return

            # Save the page
            if self.save_page(url, str(soup)):
                # Add new links to queue
                with self.url_lock:
                    for link in new_links:
                        if link not in self.visited_urls and link not in self.url_queue.queue:
                            self.url_queue.put(link)
                            self.total_urls += 1
        except Exception as e:
            error_data = {
                "status": "error",
                "data": {
                    "message": str(e)
                }
            }
            print(f"data: {json.dumps(error_data)}")
            logging.error(f"Error processing URL {url}: {str(e)}")

    def save_progress(self):
        """Save progress information"""
        with open('scraping_progress.txt', 'w') as f:
            f.write(f"Visited URLs: {len(self.visited_urls)}\n")
            f.write(f"Failed URLs: {len(self.failed_urls)}\n")
            f.write("\nFailed URLs:\n")
            for url in self.failed_urls:
                f.write(f"{url}\n")


================================================
FILE: Backend/src/data/dataFetch/youtube.py
================================================
import os
from src.endpoint.models import YoutubeTranscriptRequest
from src.vectorstorage.vectorstore import get_vectorstore
from src.vectorstorage.helpers.sanitizeCollectionName import sanitize_collection_name

from langchain_core.documents import Document
import yt_dlp
import logging
import requests
import webvtt
from io import StringIO
from typing import Generator
import json

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)


def _get_collection_path(user_id, user_name, collection_id, collection_name):
    """Generate the collection path matching the frontend structure"""
    app_data_path = os.path.abspath(os.path.join(
        os.path.dirname(os.path.dirname(os.path.dirname(__file__))), ".."
    ))
    return os.path.join(
        app_data_path,
        "..",
        "FileCollections",
        f"{user_id}_{user_name}",
        f"{collection_id}_{collection_name}"
    )


def youtube_transcript(request: YoutubeTranscriptRequest) -> Generator[dict, None, None]:
    """
    Fetch video transcript and metadata using yt-dlp
    """
    logger.info(f"Starting transcript fetch for URL: {request.url}")
    yield {"status": "progress", "data": {"message": f"Starting transcript fetch for URL: {request.url}", "chunk": 1, "total_chunks": 4, "percent_complete": "0%"}}

    ydl_opts = {
        'writesubtitles': True,
        'writeautomaticsub': True,
        'subtitlesformat': 'vtt',
        'skip_download': True,
        'quiet': True,  # Suppress yt-dlp's own output
        'no_warnings': True  # Suppress warnings
    }

    try:
        with yt_dlp.YoutubeDL(ydl_opts) as ydl:
            # Video info extraction (0-5%)
            yield {"status": "progress", "data": {"message": "Extracting video information...", "chunk": 1, "total_chunks": 4, "percent_complete": "5%"}}
            info = ydl.extract_info(request.url, download=False)

            video_info = f"Found video: '{info.get('title', 'Unknown')}' by {info.get('uploader', 'Unknown')}, duration: {info.get('duration', 'Unknown')} seconds"
            logger.info(video_info)
            yield {"status": "progress", "data": {"message": video_info, "chunk": 1, "total_chunks": 4, "percent_complete": "10%"}}

            # Get automatic captions if available
            subtitles = None
            if 'automatic_captions' in info and 'en' in info['automatic_captions']:
                logger.info("Using automatic captions")
                yield {"status": "progress", "data": {"message": "Found automatic captions, processing...", "chunk": 0, "total_chunks": 0, "percent_complete": "0%"}}
                subtitles = info['automatic_captions']['en']
            # Fall back to manual subtitles if available
            elif 'subtitles' in info and 'en' in info['subtitles']:
                logger.info("Using manual subtitles")
                yield {"status": "progress", "data": {"message": "Found manual subtitles, processing...", "chunk": 0, "total_chunks": 0, "percent_complete": "0%"}}
                subtitles = info['subtitles']['en']

            if not subtitles:
                error_msg = "No English subtitles or automatic captions available"
                logger.error(error_msg)
                raise Exception(error_msg)

            # Download the VTT format subtitles
            subtitle_url = None
            for fmt in subtitles:
                if fmt.get('ext') == 'vtt':
                    subtitle_url = fmt['url']
                    break

            if not subtitle_url:
                error_msg = "No VTT format subtitles found"
                logger.error(error_msg)
                raise Exception(error_msg)

            # Update progress for subtitle download (10-15%)
            yield {"status": "progress", "data": {"message": "Downloading subtitles...", "chunk": 2, "total_chunks": 4, "percent_complete": "15%"}}

            # Download the VTT content
            response = requests.get(subtitle_url)
            if response.status_code != 200:
                error_msg = "Failed to download subtitles"
                logger.error(error_msg)
                raise Exception(error_msg)

            # Parse the VTT content
            vtt_content = response.text
            vtt_file = StringIO(vtt_content)
            vtt_captions = webvtt.read_buffer(vtt_file)

            # Start of transcript processing (15-35%)
            yield {"status": "progress", "data": {"message": "Processing subtitles...", "chunk": 2, "total_chunks": 4, "percent_complete": "15%"}}

            def clean_caption(text):
                # Remove common VTT artifacts and clean text
                text = ' '.join(text.split())  # Remove extra whitespace
                # Remove text within brackets (often contains sound effects or speaker labels)
                if text.startswith('[') and text.endswith(']'):
                    return ""
                # Remove common YouTube caption artifacts
                text = text.replace('>>>', '').replace('>>', '')
                # Remove any remaining brackets and their contents
                while '[' in text and ']' in text:
                    start = text.find('[')
                    end = text.find(']') + 1
                    text = text[:start] + text[end:]
                return text.strip()

            def is_substantial_difference(text1, text2):
                # More aggressive deduplication
                if not text1 or not text2:
                    return True

                # Convert to lowercase and split into words
                words1 = text1.lower().split()
                words2 = text2.lower().split()

                # If either text is too short, consider them different
                if len(words1) < 3 or len(words2) < 3:
                    return True

                # Create word sequences for comparison
                seq1 = ' '.join(words1)
                seq2 = ' '.join(words2)

                # Check if one is contained within the other
                if seq1 in seq2 or seq2 in seq1:
                    return False

                # Calculate word overlap
                words1_set = set(words1)
                words2_set = set(words2)
                overlap = len(words1_set.intersection(words2_set))
                max_words = max(len(words1_set), len(words2_set))

                # If more than 50% overlap, consider it a duplicate
                return (overlap / max_words) < 0.5 if max_words > 0 else True

            # Create documents from transcript chunks
            documents = []
            total_captions = len(vtt_captions)
            processed_captions = 0
            chunk_size = 60  # Increased chunk size to 60 seconds
            current_chunk = []
            chunk_start = 0
            chunk_count = 0
            last_text = ""

            # Process captions with progress updates from 15-35%
            for caption in vtt_captions:
                cleaned_text = clean_caption(caption.text)
                if not cleaned_text:
                    continue

                start_seconds = _time_to_seconds(caption.start)

                # Only add text if it's substantially different from the last added text
                if is_substantial_difference(last_text, cleaned_text):
                    # Don't add if it's just a subset of any recent text in current chunk
                    if not any(cleaned_text in existing or existing in cleaned_text
                               for existing in current_chunk[-3:] if current_chunk):
                        current_chunk.append(cleaned_text)
                        last_text = cleaned_text

                # Create new chunk every chunk_size seconds or if chunk is getting too long
                if (start_seconds - chunk_start >= chunk_size and current_chunk) or \
                   (len(' '.join(current_chunk)) > 1000):  # Limit chunk size to ~1000 chars
                    if current_chunk:  # Only create chunk if there's content
                        chunk_count += 1
                        doc = Document(
                            page_content=" ".join(current_chunk),
                            metadata={
                                "title": info.get('title', ''),
                                "description": info.get('description', ''),
                                "author": info.get('uploader', ''),
                                "source": request.url,
                                "chunk_start": chunk_start,
                                "chunk_end": start_seconds,
                                "chunk_number": chunk_count
                            }
                        )
                        documents.append(doc)
                        current_chunk = []
                        chunk_start = start_seconds
                        last_text = ""

                processed_captions += 1
                if processed_captions % 100 == 0:  # Update every 100 captions
                    # Progress from 15% to 35%
                    percent = 15 + ((processed_captions / total_captions) * 20)
                    yield {"status": "progress", "data": {
                        "message": f"Processing transcript: {processed_captions}/{total_captions} captions",
                        "chunk": 2,
                        "total_chunks": 4,
                        "percent_complete": f"{percent:.1f}%"
                    }}

            # Add final chunk if any remains
            if current_chunk:
                chunk_count += 1
                doc = Document(
                    page_content=" ".join(current_chunk),
                    metadata={
                        "title": info.get('title', ''),
                        "description": info.get('description', ''),
                        "author": info.get('uploader', ''),
                        "source": request.url,
                        "chunk_start": chunk_start,
                        "chunk_end": _time_to_seconds(vtt_captions[-1].end),
                        "chunk_number": chunk_count
                    }
                )
                documents.append(doc)

            # Vectorstore initialization (35-40%)
            yield {"status": "progress", "data": {
                "message": "Initializing vector database...",
                "chunk": 3,
                "total_chunks": 4,
                "percent_complete": "40%"
            }}

            # Store documents in ChromaDB
            collection_name = sanitize_collection_name(
                str(request.collection_name))
            vectordb = get_vectorstore(
                request.api_key, collection_name, request.is_local, request.local_embedding_model)
            if not vectordb:
                raise Exception("Failed to initialize vector database")

            # Add documents in batches with progress updates (40-95%)
            total_docs = len(documents)
            docs_processed = 0
            batch_size = 100

            for i in range(0, len(documents), batch_size):
                batch = documents[i:i + batch_size]
                vectordb.add_documents(batch)

                docs_processed += len(batch)
                percent = 40 + ((docs_processed / total_docs)
                                * 55)  # Progress from 40% to 95%
                yield {"status": "progress", "data": {
                    "message": f"Embedding chunks in vector database: {docs_processed}/{total_docs}",
                    "chunk": 4,
                    "total_chunks": 4,
                    "percent_complete": f"{percent:.1f}%"
                }}

            # Final completion (95-100%)
            success_msg = f"Successfully processed and stored {chunk_count} transcript chunks. Total length: {sum(len(doc.page_content) for doc in documents)} characters"
            logger.info(success_msg)
            yield {"status": "progress", "data": {"message": success_msg, "chunk": 4, "total_chunks": 4, "percent_complete": "100%"}}

            # Save transcript to file
            collection_path = _get_collection_path(
                request.user_id,
                request.username,
                request.collection_id,
                request.collection_name
            )

            if not os.path.exists(collection_path):
                os.makedirs(collection_path, exist_ok=True)

            # Create filename using video title and timestamp
            safe_title = "".join(c for c in info.get(
                'title', 'unknown') if c.isalnum() or c in (' ', '-', '_')).rstrip()
            folder_name = f"{safe_title}_youtube"
            folder_path = os.path.join(collection_path, folder_name)
            os.makedirs(folder_path, exist_ok=True)

            # Save metadata
            metadata = {
                "title": info.get('title', ''),
                "uploader": info.get('uploader', ''),
                "duration": info.get('duration', ''),
                "description": info.get('description', ''),
                "url": request.url
            }
            with open(os.path.join(folder_path, "metadata.json"), "w", encoding="utf-8") as f:
                json.dump(metadata, f, ensure_ascii=False, indent=2)

            # Save full transcript
            with open(os.path.join(folder_path, "transcript.txt"), "w", encoding="utf-8") as f:
                f.write(f"Title: {info.get('title', 'Unknown')}\n")
                f.write(f"Author: {info.get('uploader', 'Unknown')}\n")
                f.write(f"Duration: {info.get('duration', 'Unknown')} seconds\n")
                f.write(f"Source URL: {request.url}\n")
                f.write("\n--- Transcript ---\n\n")
                for doc in documents:
                    f.write(f"[{doc.metadata['chunk_start']:.1f}s - {doc.metadata['chunk_end']:.1f}s]\n")
                    f.write(f"{doc.page_content}\n\n")

            # Save chunked transcripts with timestamps
            with open(os.path.join(folder_path, "transcript_chunks.json"), "w", encoding="utf-8") as f:
                chunks = [{
                    "content": doc.page_content,
                    "start_time": doc.metadata.get("chunk_start", 0),
                    "end_time": doc.metadata.get("chunk_end", 0),
                    "chunk_number": doc.metadata.get("chunk_number", 0)
                } for doc in documents]
                json.dump(chunks, f, ensure_ascii=False, indent=2)

            # Log success
            logger.info(f"Saved transcript to {folder_path}")

            return documents

    except Exception as e:
        error_msg = f"Error processing YouTube transcript: {str(e)}"
        logger.error(error_msg, exc_info=True)
        raise Exception(error_msg)


def _time_to_seconds(time_str):
    """Convert VTT timestamp to seconds"""
    h, m, s = time_str.split(':')
    return float(h) * 3600 + float(m) * 60 + float(s)


================================================
FILE: Backend/src/data/dataIntake/csvFallbackSplitting.py
================================================
from langchain_core.documents import Document
import pandas as pd
import io
import time
from typing import Generator


def split_csv_text(text: str, file_path: str, metadata: dict = None) -> Generator[dict | list, None, None]:
    """Split CSV text into chunks for embedding while preserving row integrity."""
    try:
        # Convert text back to DataFrame using StringIO
        yield {"status": "progress", "data": {"message": "Loading CSV data...", "chunk": 1, "total_chunks": 4, "percent_complete": "25%"}}
        df = pd.read_csv(io.StringIO(text))

        # Get headers
        headers = df.columns.tolist()

        # Calculate approximate number of rows per chunk (targeting ~2000 characters per chunk)
        yield {"status": "progress", "data": {"message": "Calculating chunk sizes...", "chunk": 2, "total_chunks": 4, "percent_complete": "50%"}}
        sample_row = df.iloc[0].to_string(index=False)
        chars_per_row = len(sample_row)
        rows_per_chunk = max(1, int(2000 / chars_per_row))

        documents = []
        total_rows = len(df)
        start_time = time.time()

        # Process DataFrame in chunks
        for i in range(0, total_rows, rows_per_chunk):
            # Calculate progress
            progress = min(100, int((i / total_rows) * 100))
            elapsed_time = time.time() - start_time
            est_remaining_time = "calculating..." if i == 0 else f"{(elapsed_time / (i + 1)) * (total_rows - i):.1f}s"

            yield {
                "status": "progress",
                "data": {
                    "message": f"Processing rows {i} to {min(i + rows_per_chunk, total_rows)}...",
                    "chunk": 3,
                    "total_chunks": 4,
                    "percent_complete": f"{progress}%",
                    "est_remaining_time": est_remaining_time
                }
            }

            chunk_df = df.iloc[i:i + rows_per_chunk]

            # Convert chunk to string more efficiently
            chunk_text = []
            chunk_text.append(",".join(headers))  # Add headers

            # Convert rows to strings efficiently
            for _, row in chunk_df.iterrows():
                chunk_text.append(",".join(str(val) for val in row))

            chunk_content = "\n".join(chunk_text)

            # Create document with metadata
            doc_metadata = {"source": file_path, "chunk_start": i}
            if metadata:
                doc_metadata.update(metadata)

            documents.append(
                Document(page_content=chunk_content, metadata=doc_metadata))

        yield {"status": "progress", "data": {"message": "Finalizing chunks...", "chunk": 4, "total_chunks": 4, "percent_complete": "100%"}}
        print(f"Split CSV into {len(documents)} chunks")
        return documents

    except Exception as e:
        print(f"Error splitting CSV text: {str(e)}")
        yield {"status": "error", "message": f"Error splitting CSV text: {str(e)}"}
        return []


================================================
FILE: Backend/src/data/dataIntake/fileTypes/loadX.py
================================================
import pandas as pd
import json
import markdown
from bs4 import BeautifulSoup
from pptx import Presentation
from langchain_community.document_loaders import Docx2txtLoader
from langchain_community.document_loaders.csv_loader import CSVLoader
from pypdf import PdfReader
from langchain_core.documents import Document
import logging
import os
import asyncio


async def load_pdf(file_path):
    try:
        logging.info(f"Starting to load PDF: {file_path}")

        # Verify file exists and is readable
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"PDF file not found: {file_path}")

        def read_pdf():
            reader = PdfReader(file_path)
            pages = []
            for i, page in enumerate(reader.pages):
                text = page.extract_text()
                if text.strip():  # Only include pages with content
                    pages.append(
                        Document(
                            page_content=text,
                            metadata={"source": file_path, "page": i}
                        )
                    )
            return pages

        # Run PDF reading in a thread pool to avoid blocking
        pages = await asyncio.get_event_loop().run_in_executor(None, read_pdf)

        if not pages:
            logging.error(f"No valid pages found in {file_path}")
            return None

        logging.info(
            f"Successfully loaded {len(pages)} pages from {file_path}")
        logging.info(f"First page metadata: {pages[0].metadata}")
        logging.info(
            f"First page content sample: {pages[0].page_content[:200]}...")

        return pages
    except Exception as e:
        logging.error(
            f"Error loading PDF {file_path}: {str(e)}", exc_info=True)
        return None


async def load_py(file):
    try:
        with open(file, 'r', encoding='utf-8') as f:
            content = f.read()
            return content.strip()
    except Exception as e:
        print(f"Error loading PY: {str(e)}")
        return None


async def load_docx(file):
    try:
        loader = Docx2txtLoader(file)
        data = loader.load()
        print(data)
        return data[0].page_content
    except Exception as e:
        print(f"Error loading DOCX: {str(e)}")
        return None


async def load_txt(file):
    try:
        with open(file, 'r', encoding='utf-8') as f:
            return f.read().strip()
    except Exception as e:
        print(f"Error loading TXT: {str(e)}")
        return None


async def load_md(file):
    try:
        with open(file, 'r', encoding='utf-8') as f:
            md_text = f.read()
            html = markdown.markdown(md_text)
            soup = BeautifulSoup(html, 'html.parser')
            return soup.get_text().strip()
    except Exception as e:
        print(f"Error loading MD: {str(e)}")
        return None


async def load_html(file_path: str) -> str:
    """Load and process HTML file content"""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()

        # Parse HTML with BeautifulSoup
        soup = BeautifulSoup(content, 'html.parser')

        # Remove script and style elements
        for script in soup(["script", "style"]):
            script.decompose()

        # Get text content
        text = soup.get_text()

        # Break into lines and remove leading/trailing space
        lines = (line.strip() for line in text.splitlines())

        # Break multi-headlines into a line each
        chunks = (phrase.strip()
                  for line in lines for phrase in line.split("  "))

        # Drop blank lines
        text = ' '.join(chunk for chunk in chunks if chunk)

        return text
    except Exception as e:
        logging.error(f"Error loading HTML file {file_path}: {str(e)}")
        return None


async def load_csv(file):
    try:
        loader = CSVLoader(file)
        data = loader.load()
        return data
    except Exception as e:
        print(f"Error loading CSV: {str(e)}")
        return None


async def load_json(file):
    try:
        with open(file, 'r', encoding='utf-8') as f:
            data = json.load(f)
            return json.dumps(data, indent=2)
    except Exception as e:
        print(f"Error loading JSON: {str(e)}")
        return None


def load_pptx(file):
    try:
        prs = Presentation(file)
        text = []
        for slide in prs.slides:
            for shape in slide.shapes:
                if hasattr(shape, "text"):
                    text.append(shape.text)
        return "\n".join(text).strip()
    except Exception as e:
        print(f"Error loading PPTX: {str(e)}")
        return None


def load_xlsx(file):
    try:
        df = pd.read_excel(file)
        return df.to_string().strip()
    except Exception as e:
        print(f"Error loading XLSX: {str(e)}")
        return None


async def load_docx(file):
    try:
        # Run the synchronous loader in a thread pool to avoid blocking
        def load_docx_sync():
            loader = Docx2txtLoader(file)
            data = loader.load()
            return data[0].page_content if data else None

        content = await asyncio.get_event_loop().run_in_executor(None, load_docx_sync)
        if content:
            logging.info(f"Successfully loaded DOCX file: {file}")
            return content
        return None
    except Exception as e:
        logging.error(f"Error loading DOCX: {str(e)}")
        return None


================================================
FILE: Backend/src/data/dataIntake/getHtmlFiles.py
================================================
import os


def get_html_files(directory):
    """Recursively get all HTML files in a directory and its subdirectories"""
    html_files = []
    for root, _, files in os.walk(directory):
        for file in files:
            if file.endswith('.html'):
                file_path = os.path.join(root, file)
                html_files.append(file_path)
    return html_files


================================================
FILE: Backend/src/data/dataIntake/loadFile.py
================================================
import os
import logging

logger = logging.getLogger(__name__)

from src.data.dataIntake.fileTypes.loadX import (
    load_csv,
    load_docx,
    load_html,
    load_json,
    load_md,
    load_pptx,
    load_txt,
    load_xlsx,
    load_py,
    load_pdf,
)

file_handlers = {
    "pdf": load_pdf,
    "docx": load_docx,
    "txt": load_txt,
    "md": load_md,
    "html": load_html,
    "csv": load_csv,
    "json": load_json,
    "pptx": load_pptx,
    "xlsx": load_xlsx,
    "py": load_py,
}

async def load_document(file: str):
    try:
        file_type = file.split(".")[-1].lower()
        logger.info(f"Loading file of type: {file_type}")
        
        # Get file size
        file_size = os.path.getsize(file)
        logger.info(f"File size: {file_size / (1024*1024):.2f}MB")

        handler = file_handlers.get(file_type)
        print(handler)
        if not handler:
            logger.error(f"Unsupported file type: {file_type}")
            return None

        # Special handling for large PDFs
        if file_type == "pdf" and file_size > 25 * 1024 * 1024:  # 25MB
            logger.info("Large PDF detected - using chunked processing")
            return await handler(file, chunk_size=50)  # Process 50 pages at a time
        
        return await handler(file)

    except Exception as e:
        logger.error(f"Error loading file: {str(e)}")
        return None


================================================
FILE: Backend/src/data/dataIntake/textSplitting.py
================================================
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.documents import Document
import logging


def split_text(text: str, file_path: str, metadata: dict = None) -> list:
    """Split text into chunks for embedding."""
    try:
        # Handle None or empty text
        if not text:
            logging.error(f"Empty or None text received from {file_path}")
            return []

        # Pre-process text to remove excessive whitespace
        text = " ".join(text.split())

        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=20,
            length_function=len,
            is_separator_regex=False,
            # Prioritize sentence boundaries
            separators=[". ", "? ", "! ", "\n\n", "\n", " ", ""]
        )

        # Directly split text and create documents in one go
        texts = text_splitter.split_text(text)

        # Create metadata if none provided
        if metadata is None:
            metadata = {}
        metadata["source"] = file_path

        docs = [Document(page_content=t.strip(), metadata=metadata.copy())
                for t in texts]

        if not docs:
            logging.warning(
                f"No documents created after splitting text from {file_path}")
        else:
            logging.info(
                f"Successfully split text into {len(docs)} chunks from {file_path}")

        return docs
    except Exception as e:
        logging.error(f"Error splitting text from {file_path}: {str(e)}")
        return []


================================================
FILE: Backend/src/data/database/checkAPIKey.py
================================================
from src.data.database.db import db


def check_api_key(user_id: int):
    """ check to see if the userId has API key in SQLite """
    print("Checking API key for user:", user_id)
    try:
        conn = db()
        if not conn:
            print("Failed to connect to database")
            return False

        cursor = conn.cursor()

        # Check for valid, non-expired API key
        cursor.execute("""
            SELECT * FROM dev_api_keys 
            WHERE user_id = ? 
        """, (user_id,))

        api_key = cursor.fetchone()
        conn.close()
        print(f"API key count for user {user_id}: {api_key}")
        return api_key is not None

    except Exception as e:
        print(f"Error checking API key: {e}")
        return False


================================================
FILE: Backend/src/data/database/db.py
================================================
import sqlite3
import os
import pathlib
import platform

IS_DEV = os.environ.get("IS_DEV") == "1"


def get_user_data_path():
    system = platform.system()
    home = os.path.expanduser("~")

    if system == "Darwin":  # macOS
        base_path = os.path.join(
            home, "Library", "Application Support", "notate")
    elif system == "Windows":
        base_path = os.path.join(os.getenv("APPDATA"), "notate")
    else:  # Linux and others
        base_path = os.path.join(home, ".config", "notate")

    # Add development subdirectory if in dev mode
    if IS_DEV:
        return os.path.join(base_path, "development")
    return base_path


def db():
    if IS_DEV:
        try:
            # Get the absolute path to the project root
            root_dir = pathlib.Path(__file__).parent.parent.parent.parent
            db_path = os.path.join(root_dir, "..", 'Database', 'database.sqlite')
            # Ensure the Database directory exists
            os.makedirs(os.path.dirname(db_path), exist_ok=True)
            print(f"Connected to Database at: {db_path}")

            return sqlite3.connect(db_path)

        except Exception as e:
            print(f"Error connecting to database: {e}")
            return None

    else:
        # For production, use the user data directory
        user_data_path = get_user_data_path()
        db_dir = os.path.join(user_data_path, "Database")
        db_path = os.path.join(db_dir, "database.sqlite")

        # Ensure the Database directory exists
        os.makedirs(db_dir, exist_ok=True)
        print(f"Connected to Database at: {db_path}")

        return sqlite3.connect(db_path)


================================================
FILE: Backend/src/data/database/getCollectionInfo.py
================================================
from src.data.database.db import db
from dataclasses import dataclass
from typing import Optional


@dataclass
class CollectionSettings:
    id: int
    user_id: int
    name: str
    description: str
    is_local: bool
    local_embedding_model: Optional[str]
    type: str
    files: Optional[str]
    created_at: str


def get_collection_settings(user_id: str, collection_name: str) -> Optional[CollectionSettings]:
    """
    Get collection settings for a specific user and collection name
    Args:
        user_id (str): The user ID
        collection_name (str): The name of the collection
    Returns:
        CollectionSettings: Collection settings object or None if not found
    """
    try:
        conn = db()
        if not conn:
            print("Failed to connect to database")
            return None

        cursor = conn.cursor()

        cursor.execute("""
            SELECT id, user_id, name, description, is_local, local_embedding_model, type, files, created_at 
            FROM collections
            WHERE name = ? AND user_id = ?
        """, (collection_name, user_id))

        row = cursor.fetchone()
        conn.close()

        if not row:
            return None

        return CollectionSettings(
            id=row[0],
            user_id=row[1],
            name=row[2],
            description=row[3],
            is_local=bool(row[4]),
            local_embedding_model=row[5],
            type=row[6],
            files=row[7],
            created_at=row[8]
        )

    except Exception as e:
        print(f"Error retrieving collection settings: {e}")
        return None


================================================
FILE: Backend/src/data/database/getLLMApiKey.py
================================================
from src.data.database.db import db


def get_llm_api_key(user_id, provider):
    try:
        conn = db()
        cursor = conn.cursor()
        cursor.execute(
            "SELECT key FROM api_keys WHERE user_id = ? AND provider = ?", (user_id, provider))
        result = cursor.fetchone()
        conn.close()
        return result[0] if result else None
    except Exception as e:
        print(f"Error retrieving OpenAI API key: {e}")
        return None


================================================
FILE: Backend/src/endpoint/api.py
================================================
from typing import AsyncGenerator
import json
from src.endpoint.models import ChatCompletionRequest
from transformers import TextIteratorStreamer
from threading import Thread
import logging
from src.models.manager import model_manager
from src.models.streamer import TextGenerator, StopOnInterrupt
import uuid
import time
import torch
import transformers


logger = logging.getLogger(__name__)


async def chat_completion_stream(request: ChatCompletionRequest) -> AsyncGenerator[str, None]:
    """Stream chat completion from the model"""
    try:
        model = model_manager.current_model
        if not model:
            yield f"data: {json.dumps({'error': 'No model loaded'})}\n\n"
            return
        print(request.messages)
        # Convert messages to prompts

        try:
            prompt = ""  # Initialize prompt variable
            # Format messages without explicit User/Assistant markers
            for msg in request.messages:
                if msg.role == "system":
                    prompt += f"{msg.content}\n"
                elif msg.role == "user":
                    prompt += f"Question: {msg.content}\n"
                elif msg.role == "assistant":
                    prompt += f"Response: {msg.content}\n"
            prompt += "Response: "

            logger.info(f"Generated prompt: {prompt}")
        except Exception as e:
            logger.error(f"Error formatting prompt: {str(e)}", exc_info=True)
            raise

        # Create text generator
        try:
            generator = TextGenerator(
                model, model_manager.current_tokenizer, model_manager.device)

            # For llama.cpp models, we don't need to pre-encode the input
            if model_manager.model_type != "llama.cpp":
                # Only encode for transformers models
                input_ids = model_manager.current_tokenizer.encode(
                    prompt, return_tensors="pt")
                attention_mask = torch.ones_like(input_ids)
                if hasattr(model, "device"):
                    input_ids = input_ids.to(model.device)
                    attention_mask = attention_mask.to(model.device)
        except Exception as e:
            logger.error(
                f"Error setting up generator: {str(e)}", exc_info=True)
            raise

        if request.stream:
            try:
                # Different handling for llama.cpp vs transformers models
                if model_manager.model_type == "llama.cpp":
                    # Use the TextGenerator's built-in streaming for llama.cpp
                    stream_iterator = generator.generate(
                        prompt=prompt,
                        max_new_tokens=min(request.max_tokens or 2048, 2048),
                        temperature=request.temperature or 0.7,
                        top_p=request.top_p or 0.95,
                        top_k=request.top_k or 40,
                        repetition_penalty=1.2,
                        stream=True
                    )
                    async for chunk in stream_iterator:
                        yield chunk
                    yield "data: [DONE]\n\n"
                else:
                    # Set up generation config for transformers models
                    gen_config = {
                        # Cap at 2048 if not specified
                        "max_new_tokens": min(request.max_tokens or 2048, 2048),
                        "temperature": request.temperature or 0.7,
                        "top_p": request.top_p or 0.95,
                        "top_k": request.top_k or 40,  # Slightly lower for more focused sampling
                        "repetition_penalty": 1.2,  # Increased to reduce repetition
                        "do_sample": True,
                        "pad_token_id": model_manager.current_tokenizer.pad_token_id,
                        "eos_token_id": model_manager.current_tokenizer.eos_token_id,
                        "no_repeat_ngram_size": 5,  # Increased to catch longer repetitive phrases
                        "min_new_tokens": 32,  # Increased minimum for more complete thoughts
                        "max_time": 30.0,
                        "stopping_criteria": transformers.StoppingCriteriaList([StopOnInterrupt()]),
                        "forced_eos_token_id": model_manager.current_tokenizer.eos_token_id,
                        "length_penalty": 0.8,  # Slight penalty for longer sequences
                        "num_return_sequences": 1,
                        "remove_invalid_values": True
                    }

                    # Add [END] token to the tokenizer's special tokens
                    special_tokens = {"additional_special_tokens": ["[END]"]}
                    model_manager.current_tokenizer.add_special_tokens(
                        special_tokens)

                    logger.info(f"Generation config: {gen_config}")

                    # Create streamer with token-by-token streaming
                    streamer = TextIteratorStreamer(
                        model_manager.current_tokenizer,
                        skip_prompt=True,
                        skip_special_tokens=True,
                        timeout=None,  # No timeout to prevent queue.Empty errors
                        skip_word_before_colon=False,
                        spaces_between_special_tokens=False,
                        tokenizer_decode_kwargs={"skip_special_tokens": True}
                    )
                    generation_kwargs = dict(
                        input_ids=input_ids,
                        attention_mask=attention_mask,
                        streamer=streamer,
                        **gen_config
                    )

                    # Create thread for generation
                    thread = Thread(target=model.generate,
                                    kwargs=generation_kwargs)
                    thread.start()

                    # Generate a consistent ID for this completion
                    completion_id = f"chatcmpl-{uuid.uuid4()}"

                    # Send the initial role message
                    response = {
                        "id": completion_id,
                        "object": "chat.completion.chunk",
                        "created": int(time.time()),
                        "model": "local-model",
                        "choices": [{
                            "index": 0,
                            "delta": {"role": "assistant"},
                            "finish_reason": None
                        }]
                    }
                    yield f"data: {json.dumps(response)}\n\n"

                    # Stream the output
                    accumulated_text = ""
                    for new_text in streamer:
                        if not new_text:
                            continue

                        # Split into individual characters/tokens for smoother streaming
                        chars = list(new_text)
                        for char in chars:
                            accumulated_text += char
                            response = {
                                "id": completion_id,
                                "object": "chat.completion.chunk",
                                "created": int(time.time()),
                                "model": "local-model",
                                "choices": [{
                                    "index": 0,
                                    "delta": {"content": char},
                                    "finish_reason": None
                                }]
                            }
                            yield f"data: {json.dumps(response)}\n\n"

                    # Send the final message
                    response = {
                        "id": completion_id,
                        "object": "chat.completion.chunk",
                        "created": int(time.time()),
                        "model": "local-model",
                        "choices": [{
                            "index": 0,
                            "delta": {},
                            "finish_reason": "stop"
                        }]
                    }
                    yield f"data: {json.dumps(response)}\n\n"
                    yield "data: [DONE]\n\n"

            except Exception as e:
                logger.error(
                    f"Error during streaming: {str(e)}", exc_info=True)
                raise

    except Exception as e:
        logger.error(f"Error in chat completion: {str(e)}", exc_info=True)
        error_response = {
            "id": f"chatcmpl-{uuid.uuid4()}",
            "object": "chat.completion.chunk",
            "created": int(time.time()),
            "model": "local-model",
            "choices": [{
                "index": 0,
                "delta": {
                    "content": f"Error: {str(e)}"
                },
                "finish_reason": "error"
            }]
        }
        yield f"data: {json.dumps(error_response)}\n\n"
        yield "data: [DONE]\n\n"  # Make sure to send DONE even on error


================================================
FILE: Backend/src/endpoint/deleteStore.py
================================================
from src.endpoint.models import DeleteCollectionRequest
from src.vectorstorage.vectorstore import get_vectorstore
import logging

logger = logging.getLogger(__name__)


def delete_vectorstore_collection(data: DeleteCollectionRequest):
    try:
        logger.info(f"Deleting vectorstore collection: {data.collection_name}")
        vectorstore = get_vectorstore(
            data.api_key, data.collection_name, data.is_local)
        if vectorstore:
            vectorstore.delete_collection()
            return True
        return False
    except Exception as e:
        logger.error(f"Error deleting vectorstore collection: {str(e)}")
        return False


================================================
FILE: Backend/src/endpoint/devApiCall.py
================================================
from src.data.database.getCollectionInfo import get_collection_settings
from src.data.database.getLLMApiKey import get_llm_api_key
from src.endpoint.models import VectorStoreQueryRequest
from src.endpoint.ragQuery import rag_query
from src.endpoint.vectorQuery import query_vectorstore
from src.llms.llmQuery import llm_query
from src.endpoint.models import ChatCompletionRequest


def vector_call(query_request: VectorStoreQueryRequest, user_id: str):
    print(f"API vector query received for user {user_id}")
    if not query_request.model:
        print(f"No model provided in request body for user {user_id}")
        """ VECTORSTORE QUERY IF NO MODEL PROVIDED IN REQUEST BODY """
        collectionSettings = get_collection_settings(
            user_id, query_request.collection_name)
        if collectionSettings.is_local == False:
            api_key = get_llm_api_key(int(user_id), "openai")
        else:
            api_key = None
        if not collectionSettings:
            raise ValueError("Collection settings not found")

        vectorStoreData = VectorStoreQueryRequest(
            query=query_request.input,
            collection=collectionSettings.id,
            collection_name=query_request.collection_name,
            user=user_id,
            api_key=api_key,
            top_k=query_request.top_k,
            is_local=collectionSettings.is_local,
            local_embedding_model=collectionSettings.local_embedding_model
        )
        return query_vectorstore(vectorStoreData, collectionSettings.is_local)


async def rag_call(query_request: VectorStoreQueryRequest, user_id: str):
    print(f"Model provided in request body for user {user_id}")
    """ MODEL + VECTORSTORE QUERY IF MODEL AND COLLECTION NAME PROVIDED IN REQUEST BODY """
    collectionSettings = get_collection_settings(
        user_id, query_request.collection_name)
    if not collectionSettings:
        raise ValueError("Collection settings not found")
    if query_request.is_local == False:
        api_key = get_llm_api_key(int(user_id), query_request.provider)
    else:
        api_key = None
    ragData = VectorStoreQueryRequest(
        query=query_request.input,
        collection=collectionSettings.id,
        collection_name=query_request.collection_name,
        user=user_id,
        api_key=api_key,
        top_k=query_request.top_k,
        is_local=collectionSettings.is_local,
        local_embedding_model=collectionSettings.local_embedding_model,
        temperature=query_request.temperature,
        max_completion_tokens=query_request.max_completion_tokens,
        top_p=query_request.top_p,
        frequency_penalty=query_request.frequency_penalty,
        presence_penalty=query_request.presence_penalty,
        provider=query_request.provider,
        model=query_request.model,
        is_ooba=query_request.is_ooba
    )
    return await rag_query(ragData, collectionSettings)


async def llm_call(query_request: ChatCompletionRequest, user_id: str):
    print(
        f"Model and collection name provided in request body for user {user_id}")
    """ MODEL QUERY IF MODEL BUT NO COLLECTION NAME PROVIDED IN REQUEST BODY """
    if query_request.is_local == False:
        api_key = get_llm_api_key(int(user_id), query_request.provider)
    else:
        api_key = None
    return await llm_query(query_request, api_key)


================================================
FILE: Backend/src/endpoint/embed.py
================================================
from src.data.dataIntake.textSplitting import split_text
from src.data.dataIntake.loadFile import load_document
from src.endpoint.models import EmbeddingRequest
from src.vectorstorage.helpers.sanitizeCollectionName import sanitize_collection_name
from src.vectorstorage.vectorstore import get_vectorstore
from src.vectorstorage.embeddings import embed_chunk, chunk_list

import os
import multiprocessing
import concurrent.futures
import time
from typing import AsyncGenerator
from collections import deque
import logging

logger = logging.getLogger(__name__)


async def embed(data: EmbeddingRequest) -> AsyncGenerator[dict, None]:
    file_name = os.path.basename(data.file_path)
    try:
        yield {"status": "info", "message": f"Starting embedding process for file: {file_name}"}

        # Get file size
        file_size = os.path.getsize(data.file_path)
        if file_size > 25 * 1024 * 1024:  # If file is larger than 25MB
            yield {"status": "info", "message": f"Processing large file ({file_size / (1024*1024):.1f}MB). This may take longer."}

        text_output = await load_document(data.file_path)

        if text_output is None:
            raise Exception("Failed to load document")

        # Handle generator output from CSV loader
        if hasattr(text_output, '__iter__') and not isinstance(text_output, (str, list)):
            texts = []
            for item in text_output:
                if isinstance(item, dict) and "status" in item:
                    # Forward progress updates from CSV processing
                    yield item
                else:
                    texts = item
        else:
            yield {"status": "info", "message": "File loaded successfully"}

        # Check if file is CSV or PDF
        if file_name.lower().endswith('.csv'):
            texts = text_output  # CSV loader already returns list of documents
        elif file_name.lower().endswith('.pdf'):
            # PDF loader returns list of Documents, no need to split
            texts = text_output
        else:
            # Pass metadata to split_text if it exists
            texts = split_text(text_output, data.file_path,
                             data.metadata if hasattr(data, 'metadata') else None)

        if not texts:
            raise Exception("No text content extracted from file")

        yield {"status": "info", "message": f"Split text into {len(texts)} chunks"}

        collection_name = sanitize_collection_name(str(data.collection_name))
        vectordb = get_vectorstore(
            data.api_key, collection_name, data.is_local, data.local_embedding_model)
        if not vectordb:
            raise Exception("Failed to initialize vector database")

        # Adjust chunk size based on file size
        chunk_size = min(50, max(10, int(1000000 / file_size)))  # Dynamic chunk size
        chunks = list(chunk_list(texts, chunk_size))
        total_chunks = len(chunks)
        yield {"status": "info", "message": f"Split into {total_chunks} chunks of {chunk_size} documents each"}

        start_time = time.time()
        time_history = deque(maxlen=5)

        # Process chunks with reduced parallelism for large files
        num_cores = max(1, min(multiprocessing.cpu_count() - 1, 4))  # Use fewer cores for large files
        yield {"status": "info", "message": f"Using {num_cores} CPU cores for processing"}

        with concurrent.futures.ThreadPoolExecutor(max_workers=num_cores) as executor:
            futures = []
            for i, chunk in enumerate(chunks):
                chunk_arg = (vectordb, chunk, i + 1, total_chunks, start_time, time_history)
                future = executor.submit(embed_chunk, chunk_arg)
                futures.append(future)
                
                # Process results as they complete
                for completed in concurrent.futures.as_completed(futures):
                    try:
                        result = completed.result()
                        yield {"status": "progress", "data": result}
                    except Exception as e:
                        logger.error(f"Error processing chunk: {str(e)}")
                        yield {"status": "error", "message": f"Error processing chunk: {str(e)}"}
                
                futures = [f for f in futures if not f.done()]  # Clean up completed futures

        yield {"status": "success", "message": "Embedding completed successfully"}

    except Exception as e:
        error_msg = f"Error embedding file: {str(e)}"
        logger.error(error_msg)
        yield {"status": "error", "message": error_msg}


================================================
FILE: Backend/src/endpoint/models.py
================================================
from pydantic import BaseModel
from typing import Optional, Dict, Any, List, Literal


class EmbeddingRequest(BaseModel):
    file_path: str
    api_key: Optional[str] = None
    collection: int
    collection_name: str
    user: int
    metadata: Optional[Dict[str, Any]] = None
    is_local: Optional[bool] = False
    local_embedding_model: Optional[str] = "granite-embedding:278m"


class ModelLoadRequest(BaseModel):
    model_name: str
    model_type: Optional[str] = "auto"  # 'auto', 'Transformers', 'llama.cpp', 'llamacpp_HF', 'ExLlamav2', 'ExLlamav2_HF', 'HQQ', 'TensorRT-LLM'
    device: Optional[str] = "auto"  # 'cpu', 'cuda', 'auto'
    
    # Transformers specific settings
    load_in_8bit: Optional[bool] = False
    load_in_4bit: Optional[bool] = False
    use_flash_attention: Optional[bool] = False
    trust_remote_code: Optional[bool] = True
    use_safetensors: Optional[bool] = True
    max_memory: Optional[Dict[str, str]] = None
    compute_dtype: Optional[str] = "float16"  # float16, bfloat16, float32
    rope_scaling: Optional[Dict[str, Any]] = None
    use_cache: Optional[bool] = True
    revision: Optional[str] = None
    padding_side: Optional[str] = "right"
    use_fast_tokenizer: Optional[bool] = True
    hf_token: Optional[str] = None  # HuggingFace token for gated models
    
    # ExLlamav2 specific settings
    max_seq_len: Optional[int] = None
    compress_pos_emb: Optional[float] = 1.0
    alpha_value: Optional[float] = 1
    
    # llama.cpp specific settings
    n_ctx: Optional[int] = 2048
    n_batch: Optional[int] = 512
    n_threads: Optional[int] = None
    n_threads_batch: Optional[int] = None
    n_gpu_layers: Optional[int] = 32
    main_gpu: Optional[int] = 0
    tensor_split: Optional[List[float]] = None
    mul_mat_q: Optional[bool] = True
    use_mmap: Optional[bool] = True
    use_mlock: Optional[bool] = False
    offload_kqv: Optional[bool] = False
    split_mode: Optional[str] = None
    flash_attn: Optional[bool] = False
    cache_type: Optional[str] = None
    cache_size: Optional[int] = None
    rope_scaling_type: Optional[str] = None
    rope_freq_base: Optional[float] = None
    rope_freq_scale: Optional[float] = None
    
    # HQQ specific settings
    hqq_backend: Optional[str] = "PYTORCH_COMPILE"  # PYTORCH_COMPILE, ATEN, TENSORRT
    
    # TensorRT-LLM specific settings
    engine_dir: Optional[str] = None
    max_batch_size: Optional[int] = 1
    max_input_len: Optional[int] = 2048
    max_output_len: Optional[int] = 512
    
    # Common settings
    model_path: Optional[str] = None  # Custom path to model files if not in default location
    tokenizer_path: Optional[str] = None  # Custom path to tokenizer if different from model path
    
    class Config:
        protected_namespaces = ()

class VectorStoreQueryRequest(BaseModel):
    query: str
    collection: Optional[int] = None
    collection_name: str
    user: int
    api_key: Optional[str] = None
    top_k: int = 5
    is_local: Optional[bool] = False
    local_embedding_model: Optional[str] = "granite-embedding:278m"
    prompt: Optional[str] = None
    provider: Optional[str] = None
    model: Optional[str] = None
    temperature: Optional[float] = 0.5
    max_completion_tokens: Optional[int] = 2048
    top_p: Optional[float] = 1
    frequency_penalty: Optional[float] = 0
    presence_penalty: Optional[float] = 0
    is_ooba: Optional[bool] = False
    character: Optional[str] = None
    is_ollama: Optional[bool] = False


class YoutubeTranscriptRequest(BaseModel):
    url: str
    user_id: int
    collection_id: int
    username: str
    collection_name: str
    api_key: Optional[str] = None
    is_local: Optional[bool] = False
    local_embedding_model: Optional[str] = "granite-embedding:278m"


class DeleteCollectionRequest(BaseModel):
    collection_id: int
    collection_name: str
    is_local: Optional[bool] = False
    api_key: Optional[str] = None


class WebCrawlRequest(BaseModel):
    base_url: str
    max_workers: int
    collection_name: str
    collection_id: int
    user_id: int
    user_name: str
    api_key: Optional[str] = None
    is_local: Optional[bool] = False
    local_embedding_model: Optional[str] = "granite-embedding:278m"


class QueryRequest(BaseModel):
    input: str
    prompt: Optional[str] = None
    provider: Optional[str] = None
    model: Optional[str] = None
    collection_name: Optional[str] = None
    top_k: Optional[int] = 5
    temperature: Optional[float] = 0.5
    max_completion_tokens: Optional[int] = 2048
    top_p: Optional[float] = 1
    frequency_penalty: Optional[float] = 0
    presence_penalty: Optional[float] = 0
    is_local: Optional[bool] = False
    is_ooba: Optional[bool] = False
    local_embedding_model: Optional[str] = "granite-embedding:278m"
    character: Optional[str] = None
    is_ollama: Optional[bool] = False


class Message(BaseModel):
    """A single message in a chat completion request"""
    role: Literal["system", "user", "assistant"]
    content: str
    name: Optional[str] = None


class ChatCompletionRequest(BaseModel):
    """Request model for chat completion"""
    messages: List[Message]
    model: str = "local-model"
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 0.95
    top_k: Optional[int] = 50
    n: Optional[int] = 1
    max_tokens: Optional[int] = 2048
    presence_penalty: Optional[float] = 0.1
    frequency_penalty: Optional[float] = 0.1
    repetition_penalty: Optional[float] = 1.1
    stop: Optional[List[str]] = None
    stream: Optional[bool] = True
    is_local: Optional[bool] = False
    is_ooba: Optional[bool] = False
    is_ollama: Optional[bool] = False

class GenerateRequest(BaseModel):
    """Request model for raw text generation"""
    prompt: str
    max_tokens: Optional[int] = 512
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 0.95
    top_k: Optional[int] = 50
    repetition_penalty: Optional[float] = 1.1
    stop_sequences: Optional[List[str]] = None
    echo: Optional[bool] = False
    stream: Optional[bool] = True


================================================
FILE: Backend/src/endpoint/ragQuery.py
================================================
from src.endpoint.models import VectorStoreQueryRequest, ChatCompletionRequest
from src.endpoint.vectorQuery import query_vectorstore
from src.llms.llmQuery import llm_query


async def rag_query(data: VectorStoreQueryRequest, collectionInfo):
    try:
        results = query_vectorstore(data, data.is_local)
        data.prompt = f"The following is the data that the user has provided via their custom data collection: " + \
            f"\n\n{results}" + \
            f"\n\nCollection/Store Name: {collectionInfo.name}" + \
            f"\n\nCollection/Store Files: {collectionInfo.files}" + \
            f"\n\nCollection/Store Description: {collectionInfo.description}"

        chat_completion_request = ChatCompletionRequest(
            messages=[
                {
                    "role": "system",
                    "content": data.prompt
                },
                {
                    "role": "user",
                    "content": data.query
                }
            ],
            model=data.model,
            temperature=data.temperature,
            max_completion_tokens=data.max_completion_tokens,
            top_p=data.top_p,
            frequency_penalty=data.frequency_penalty,
            presence_penalty=data.presence_penalty,
            provider=data.provider,
            is_local=data.is_local
        )
        llm_response = await llm_query(chat_completion_request, data.api_key)
        return llm_response
    except Exception as e:
        print(e)
        raise e


================================================
FILE: Backend/src/endpoint/transcribe.py
================================================
from src.voice.voice_to_text import initialize_model

import os
import tempfile
from fastapi import UploadFile, File, HTTPException

# Global variables
model = None
ffmpeg_path = None


async def transcribe_audio(audio_file: UploadFile = File(...), model_name: str = "base") -> dict:
    """Transcribe audio using Whisper."""
    temp_file = None
    try:
        # Initialize model and verify FFmpeg is available
        model = initialize_model(model_name)
        if not model:
            raise HTTPException(
                status_code=500, detail="FFmpeg not found or not working")

        # Create temporary file
        temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
        content = await audio_file.read()
        temp_file.write(content)
        temp_file.flush()
        temp_file.close()

        result = model.transcribe(temp_file.name)

        return {
            "status": "success",
            "text": result["text"],
            "language": result.get("language", "unknown"),
            "segments": result.get("segments", [])
        }

    except Exception as e:
        print(f"Error transcribing audio: {str(e)}")
        return {
            "status": "error",
            "error": str(e)
        }
    finally:
        if temp_file and os.path.exists(temp_file.name):
            try:
                os.unlink(temp_file.name)
                print(f"Deleted temporary file: {temp_file.name}")
            except Exception as e:
                print(
                    f"Warning: Could not delete temporary file {temp_file.name}: {str(e)}")


================================================
FILE: Backend/src/endpoint/vectorQuery.py
================================================
from src.endpoint.models import VectorStoreQueryRequest
from src.vectorstorage.helpers.sanitizeCollectionName import sanitize_collection_name
from src.vectorstorage.vectorstore import get_vectorstore


def query_vectorstore(data: VectorStoreQueryRequest, is_local: bool):
    try:
        collection_name = sanitize_collection_name(str(data.collection_name))
        vectordb = get_vectorstore(
            data.api_key, collection_name, is_local, data.local_embedding_model)
        results = vectordb.similarity_search(data.query, k=data.top_k)
        return {
            "status": "success",
            "results": [{"content": doc.page_content, "metadata": doc.metadata} for doc in results],
        }
    except Exception as e:
        print(f"Error querying vectorstore: {str(e)}")
        return {"status": "error", "message": str(e)}


================================================
FILE: Backend/src/endpoint/webcrawl.py
================================================
from src.data.dataIntake.fileTypes.loadX import load_html
from src.data.dataIntake.textSplitting import split_text
from src.data.dataIntake.getHtmlFiles import get_html_files
from src.data.dataFetch.webcrawler import WebCrawler
from src.endpoint.models import WebCrawlRequest
from src.vectorstorage.vectorstore import get_vectorstore

from typing import Generator
import json
import os
from urllib.parse import urlparse
import logging


def webcrawl(data: WebCrawlRequest, cancel_event=None) -> Generator[dict, None, None]:
    try:
        # Create web crawler instance with all required fields
        scraper = WebCrawler(
            data.base_url,
            data.user_id,
            data.user_name,
            data.collection_id,
            data.collection_name,
            max_workers=data.max_workers,
            cancel_event=cancel_event
        )

        # Yield progress updates during scraping
        for progress in scraper.scrape():
            if progress:
                yield f"data: {json.dumps(progress)}"

        # After scraping, process and embed all HTML files
        root_url_dir = urlparse(
            data.base_url).netloc.replace(".", "_") + "_docs"
        collection_path = os.path.join(scraper.output_dir, root_url_dir)
        vector_store = get_vectorstore(
            data.api_key, data.collection_name, data.is_local, data.local_embedding_model)

        # Get all HTML files recursively
        html_files = get_html_files(collection_path)
        print(f"Found {len(html_files)} HTML files")

        # Process files in batches for better performance
        batch_size = 50
        total_batches = (len(html_files) + batch_size - 1) // batch_size
        for i in range(0, len(html_files), batch_size):
            batch = html_files[i:i + batch_size]
            batch_docs = []

            for file_path in batch:
                content = load_html(file_path)
                if content:
                    split_content = split_text(content, file_path)
                    batch_docs.extend(split_content)

            if batch_docs:
                vector_store.add_documents(batch_docs)

            current_batch = i//batch_size + 1
            progress_data = {
                "status": "progress",
                "data": {
                    "message": f"Part 2 of 2: Processing documents batch {current_batch}/{total_batches}",
                    "chunk": current_batch,
                    "total_chunks": total_batches,
                    "percent_complete": f"{(current_batch/total_batches * 100):.1f}%"
                }
            }
            yield f"data: {json.dumps(progress_data)}"

        final_message = f"Successfully crawled and embedded {len(scraper.visited_urls)} pages from {data.base_url}"
        success_data = {
            "status": "success",
            "data": {
                "message": final_message
            }
        }
        yield f"data: {json.dumps(success_data)}"
    except Exception as e:
        error_message = str(e)
        print(f"Error during webcrawl: {error_message}")
        logging.error(f"Error during webcrawl: {error_message}")
        error_data = {
            "status": "error",
            "data": {
                "message": error_message
            }
        }
        yield f"data: {json.dumps(error_data)}"


================================================
FILE: Backend/src/llms/llmQuery.py
================================================
from src.endpoint.models import ChatCompletionRequest
from src.llms.providers.ooba import ooba_query
from src.llms.providers.openai import openai_query
from src.llms.providers.ollama import ollama_query
from src.llms.providers.local import local_query
from typing import Optional


async def llm_query(data: ChatCompletionRequest, api_key: Optional[str] = None):
    try:
        if data.is_ooba:
            return ooba_query(data, data.messages)
        elif data.is_ollama is None:
            return ollama_query(data, data.messages)
        elif data.is_local:
            return await local_query(data)
        else:
            return openai_query(data, api_key, data.messages)

    except Exception as e:
        print(f"Error in llm_query: {str(e)}")
        raise e


================================================
FILE: Backend/src/llms/messages/formMessages.py
================================================
from src.endpoint.models import QueryRequest


def form_messages(data: QueryRequest):
    try:
        if not data.prompt:
            raise ValueError("System prompt cannot be null")

        query_content = data.query if hasattr(
            data, 'query') else data.input

        if not query_content:
            raise ValueError("User query/input cannot be null")

        messages = [
            {"role": "system", "content": data.prompt},
            {"role": "user", "content": query_content}
        ]
        return messages
    except Exception as e:
        print(f"Error in form_messages: {str(e)}")
        raise e


================================================
FILE: Backend/src/llms/providers/local.py
================================================
import asyncio
import json
import time
import logging
from src.endpoint.api import chat_completion_stream
from src.endpoint.models import ChatCompletionRequest, ModelLoadRequest
from src.models.manager import model_manager
from src.models.exceptions import ModelLoadError

logger = logging.getLogger(__name__)


async def local_query(data: ChatCompletionRequest):
    try:
        # Check if model is loaded and load it if necessary
        if not model_manager.is_model_loaded() or model_manager.model_name != data.model:
            logger.info(f"Loading model {data.model} as it is not currently loaded")
            # Create model load request
            load_request = ModelLoadRequest(
                model_name=data.model,
                model_type="Transformers",  # Default to Transformers for now
                device="auto",
                trust_remote_code=True,
                use_safetensors=True,
                compute_dtype="float16"
            )
            try:
                # Load the model
                model_manager.load_model(load_request)
                logger.info(f"Successfully loaded model {data.model}")
            except ModelLoadError as e:
                logger.error(f"Failed to load model {data.model}: {str(e)}")
                raise

        # Get the generator
        response_gen = chat_completion_stream(data)
        combined_content = ""
        response_id = None
        finish_reason = None

        # Process each chunk
        async for chunk in response_gen:
            if chunk.startswith("data: "):
                chunk = chunk[6:]  # Remove "data: " prefix
                if chunk.strip() == "[DONE]":
                    continue

                try:
                    chunk_data = json.loads(chunk)
                    if "choices" in chunk_data and len(chunk_data["choices"]) > 0:
                        choice = chunk_data["choices"][0]
                        if "delta" in choice:
                            delta = choice["delta"]
                            if "content" in delta:
                                combined_content += delta["content"]
                            if "finish_reason" in choice and choice["finish_reason"]:
                                finish_reason = choice["finish_reason"]
                        if not response_id:
                            response_id = chunk_data.get("id")
                except json.JSONDecodeError as e:
                    logger.warning(f"Failed to parse chunk as JSON: {str(e)}")
                    continue

        # Create final response structure
        response = {
            "id": response_id or f"chatcmpl-{int(time.time())}",
            "object": "chat.completion",
            "created": int(time.time()),
            "model": data.model,
            "choices": [{
                "index": 0,
                "message": {
                    "role": "assistant",
                    "content": combined_content
                },
                "finish_reason": finish_reason or "stop"
            }]
        }

        return response

    except Exception as e:
        logger.error(f"Error in local_query: {str(e)}", exc_info=True)
        raise


================================================
FILE: Backend/src/llms/providers/ollama.py
================================================
from src.endpoint.models import QueryRequest
import requests
import json
import time



def ollama_query(data: QueryRequest, messages: list = None):
    try:
        print("Local Ollama model enabled")
        model_data = {
            "model": data.model,
            "messages": messages,
            "stream": False,  # Disable streaming for now
            "keep_alive": -1,
            "max_tokens": data.max_completion_tokens,
            "keep_alive": -1,
        }
        print(f"Model data: {model_data}")
        response = requests.post(
            "http://localhost:11434/api/chat", json=model_data)

        print(f"Raw response: {response.text}")

        if response.status_code == 200:
            try:
                response_json = response.json()
                print(f"Parsed response: {response_json}")
                # Extract content from the nested message structure
                content = response_json.get("message", {}).get(
                    "content", "No response from model")

                # Standardized response format
                return {
                    "id": f"local-{data.model}-{int(time.time())}",
                    "choices": [{
                        "finish_reason": "stop",
                        "index": 0,
                        "message": {
                                "content": content,
                                "role": "assistant"
                                }
                    }],
                    "created": int(time.time()),
                    "model": data.model,
                    "object": "chat.completion",
                    "usage": {
                        "completion_tokens": -1,  # Token count not available for local models
                        "prompt_tokens": -1,
                        "total_tokens": -1
                    }
                }
            except json.JSONDecodeError as e:
                print(f"JSON decode error: {e}")
                raise ValueError(
                    f"Failed to parse response from Ollama: {e}")
        return ollama_query(data)
    except Exception as e:
        print(f"Error in ollama_query: {str(e)}")
        raise e


================================================
FILE: Backend/src/llms/providers/ooba.py
================================================
from src.endpoint.models import QueryRequest
import requests


def ooba_query(data: QueryRequest, messages: list = None):
    try:
        print("Ooba mode enabled")
        ooba_data = {
            "messages": messages,
            "mode": "chat",
            "character": data.character
        }
        response = requests.post(
            "http://127.0.0.1:5000/v1/chat/completions", json=ooba_data)
        return response.json()
    except Exception as e:
        print(f"Error in ooba_query: {str(e)}")
        raise e


================================================
FILE: Backend/src/llms/providers/openai.py
================================================
from src.endpoint.models import QueryRequest
from openai import OpenAI
from typing import Optional


def openai_query(data: QueryRequest, api_key: Optional[str] = None, messages: list = None):
    try:
        print(f"API key3: {api_key}")
        client = OpenAI(api_key=api_key)
        response = client.chat.completions.create(
            model=data.model,
            messages=messages,
            response_format={
                "type": "text"
            },
            temperature=data.temperature,
            max_completion_tokens=data.max_completion_tokens,
            top_p=data.top_p,
            frequency_penalty=data.frequency_penalty,
            presence_penalty=data.presence_penalty
        )
        # Convert OpenAI response to dict for consistent format
        return response.model_dump()
    except Exception as e:
        print(f"Error in openai_query: {str(e)}")
        raise e


================================================
FILE: Backend/src/models/__init__.py
================================================


================================================
FILE: Backend/src/models/exceptions.py
================================================
class ModelLoadError(Exception):
    """Exception raised when there is an error loading a model."""
    pass

class ModelNotFoundError(Exception):
    """Exception raised when a requested model cannot be found."""
    pass

class ModelDownloadError(Exception):
    """Exception raised when there is an error downloading a model."""
    pass


================================================
FILE: Backend/src/models/loaders/__init__.py
================================================
from .transformers import TransformersLoader
from .llamacpp import LlamaCppLoader
from .llamaccphf import LlamaCppHFLoader
from .exllama import ExLlamaV2Loader, ExLlamaV2HFLoader
from .hqq import HQQLoader
from .tensorrt import TensorRTLoader

__all__ = [
    'TransformersLoader',
    'LlamaCppLoader',
    'LlamaCppHFLoader',
    'ExLlamaV2Loader',
    'ExLlamaV2HFLoader',
    'HQQLoader',
    'TensorRTLoader',
] 

================================================
FILE: Backend/src/models/loaders/base.py
================================================
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, Optional, Tuple
import logging
from dataclasses import asdict

from src.endpoint.models import ModelLoadRequest
from src.models.exceptions import ModelLoadError

logger = logging.getLogger(__name__)


class BaseLoader(ABC):
    """
    Abstract base class for model loaders.

    This class defines the interface that all model loaders must implement
    and provides some common utility methods.

    Attributes:
        request (ModelLoadRequest): The request object containing loading parameters
        manager (Any): Reference to the model manager instance
        model_path (Path): Path to the model files
    """

    def __init__(self, request: ModelLoadRequest, manager: Any):
        """
        Initialize the loader with request parameters and manager reference.

        Args:
            request: ModelLoadRequest object containing all loading parameters
            manager: Reference to the ModelManager instance
        """
        self.request = request
        self.manager = manager
        self.model_path = self._resolve_model_path()

    @abstractmethod
    def load(self) -> Tuple[Any, Any]:
        """
        Load the model and tokenizer.

        Returns:
            Tuple containing (model, tokenizer)

        Raises:
            ModelLoadError: If there's an error during model loading
        """
        pass

    @abstractmethod
    def get_metadata(self) -> Optional[Dict[str, Any]]:
        """
        Get model metadata without loading the full model.

        Returns:
            Dictionary containing model metadata or None if not available
        """
        pass

    @abstractmethod
    def get_config(self) -> Dict[str, Any]:
        """
        Get the current model configuration.

        Returns:
            Dictionary containing model configuration
        """
        pass

    def _resolve_model_path(self) -> Path:
        """
        Resolve the model path from the request parameters.

        Returns:
            Path object pointing to the model location

        Raises:
            ModelLoadError: If the path cannot be resolved
        """
        try:
            if self.request.model_path:
                path = Path(self.request.model_path)
            else:
                path = Path(f"models/{self.request.model_name}")

            # Create parent directories if they don't exist
            path.parent.mkdir(parents=True, exist_ok=True)

            return path
        except Exception as e:
            raise ModelLoadError(f"Failed to resolve model path: {str(e)}")

    def get_request_dict(self) -> Dict[str, Any]:
        """
        Convert the request object to a dictionary, filtering out None values.

        Returns:
            Dictionary containing all non-None request parameters
        """
        return {k: v for k, v in asdict(self.request).items() if v is not None}

    def log_loading_info(self) -> None:
        """Log information about the model being loaded."""
        logger.info(f"Loading model: {self.request.model_name}")
        logger.info(f"Model type: {self.request.model_type}")
        logger.info(f"Model path: {self.model_path}")
        logger.info(f"Device: {self.request.device}")

    @staticmethod
    def cleanup(model: Any) -> None:
        """
        Clean up model resources.

        Args:
            model: The model instance to clean up
        """
        try:
            if hasattr(model, 'cpu'):
                model.cpu()
            del model
        except Exception as e:
            logger.warning(f"Error during model cleanup: {str(e)}")

    def validate_model_path(self) -> None:
        """
        Validate that the model path exists and is accessible.

        Raises:
            ModelLoadError: If the model path is invalid or inaccessible
        """
        if not self.model_path.exists():
            raise ModelLoadError(
                f"Model path does not exist: {self.model_path}")

    def get_common_metadata(self) -> Dict[str, Any]:
        """
        Get common metadata that applies to all model types.

        Returns:
            Dictionary containing common metadata fields
        """
        return {
            "model_name": self.request.model_name,
            "model_type": self.request.model_type,
            "model_path": str(self.model_path),
            "device": self.request.device,
            "file_size": self.model_path.stat().st_size if self.model_path.exists() else None,
        }

    def validate_request(self) -> None:
        """
        Validate the model load request parameters.

        Raises:
            ModelLoadError: If the request parameters are invalid
        """
        if not self.request.model_name:
            raise ModelLoadError("Model name is required")

        if not self.request.model_type:
            raise ModelLoadError("Model type is required")

    def check_dependencies(self) -> None:
        """
        Check if all required dependencies are installed.

        Raises:
            ModelLoadError: If any required dependency is missing
        """
        pass  # Implement in specific loaders

    def prepare_loading(self) -> None:
        """
        Prepare for model loading by performing all necessary checks.

        This method combines several validation steps and should be
        called at the start of the load method in implementing classes.

        Raises:
            ModelLoadError: If any preparation step fails
        """
        try:
            self.validate_request()
            self.check_dependencies()
            self.validate_model_path()
            self.log_loading_info()
        except Exception as e:
            raise ModelLoadError(
                f"Failed to prepare for model loading: {str(e)}")

    def get_device_config(self) -> Dict[str, Any]:
        """
        Get device-specific configuration.

        Returns:
            Dictionary containing device configuration
        """
        import torch

        return {
            "device": self.request.device,
            "cuda_available": torch.cuda.is_available(),
            "cuda_device_count": torch.cuda.device_count() if torch.cuda.is_available() else 0,
            "mps_available": hasattr(torch.backends, "mps") and torch.backends.mps.is_available(),
        }

    def get_memory_info(self) -> Dict[str, Any]:
        """
        Get system memory information.

        Returns:
            Dictionary containing memory information
        """
        try:
            import psutil
            vm = psutil.virtual_memory()
            return {
                "total_memory": vm.total,
                "available_memory": vm.available,
                "memory_percent": vm.percent,
            }
        except ImportError:
            return {}

    def get_system_info(self) -> Dict[str, Any]:
        """
        Get system information.

        Returns:
            Dictionary containing system information
        """
        import platform

        return {
            "platform": platform.system(),
            "platform_release": platform.release(),
            "python_version": platform.python_version(),
            "device_config": self.get_device_config(),
            "memory_info": self.get_memory_info(),
        }

    def log_error(self, error: Exception, context: str = "") -> None:
        """
        Log an error with context.

        Args:
            error: The exception that occurred
            context: Additional context about where/why the error occurred
        """
        error_msg = f"{context + ': ' if context else ''}{str(error)}"
        logger.error(error_msg, exc_info=True)

    def __repr__(self) -> str:
        """
        Get string representation of the loader.

        Returns:
            String representation including model name and type
        """
        return f"{self.__class__.__name__}(model_name={self.request.model_name}, model_type={self.request.model_type})"


================================================
FILE: Backend/src/models/loaders/exllama.py
================================================
import logging
from typing import Any, Dict, Optional, Tuple

from src.models.loaders.base import BaseLoader
from src.models.exceptions import ModelLoadError
from transformers import AutoTokenizer

logger = logging.getLogger(__name__)


class ExLlamaV2Loader(BaseLoader):
    """Loader for ExLlamaV2 models."""

    def load(self) -> Tuple[Any, Any]:
        """Load an ExLlamav2 model."""
        try:
            from exllamav2 import ExLlamaV2, ExLlamaV2Config, ExLlamaV2Tokenizer
            import torch
        except ImportError:
            raise ModelLoadError(
                "exllamav2 is not installed. Please install it from the ExLlamaV2 repository")

        if not self.model_path.exists():
            raise ModelLoadError(
                f"Model path does not exist: {self.model_path}")

        # Clear CUDA cache
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            logger.info(f"CUDA Device: {torch.cuda.get_device_name(0)}")
            logger.info(
                f"CUDA Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**2:.0f}MB")

        if not torch.cuda.is_available():
            raise ModelLoadError("GPU is required for ExLlama2")

        # Force CUDA device
        torch.set_default_device('cuda')
        torch.set_default_tensor_type('torch.cuda.FloatTensor')

        config = ExLlamaV2Config()
        config.model_dir = str(self.model_path)
        config.max_seq_len = self.request.max_seq_len or 2048
        config.compress_pos_emb = self.request.compress_pos_emb
        config.alpha_value = self.request.alpha_value
        config.calculate_rotary_embedding_base()  # Important for GPU performance

        logger.info(f"Loading model with config: {config.__dict__}")
        model = ExLlamaV2(config)

        # Force model to GPU
        model.load()
        for param in model.parameters():
            param.data = param.data.cuda()

        logger.info(
            f"Model loaded on GPU. CUDA Memory: {torch.cuda.memory_allocated() / 1024**2:.0f}MB")
        logger.info(
            f"Device for first parameter: {next(model.parameters()).device}")

        tokenizer = ExLlamaV2Tokenizer(config)
        logger.info("Model and tokenizer loaded successfully")

        return model, tokenizer

    def get_metadata(self) -> Optional[Dict[str, Any]]:
        """Get model metadata."""
        if not self.model_path.exists():
            return None
        return {
            "model_type": "ExLlamav2",
            "model_path": str(self.model_path),
            "file_size": self.model_path.stat().st_size
        }

    def get_config(self) -> Dict[str, Any]:
        """Get model configuration."""
        return {
            "model_type": "ExLlamav2",
            "model_name": self.request.model_name,
            "device": self.request.device,
            "max_seq_len": self.request.max_seq_len,
            "compress_pos_emb": self.request.compress_pos_emb,
            "alpha_value": self.request.alpha_value
        }


class ExLlamaV2HFLoader(BaseLoader):
    """Loader for ExLlamaV2 models with HuggingFace tokenizer."""

    def load(self) -> Tuple[Any, Any]:
        """Load an ExLlamav2 model with HF tokenizer."""
        model = ExLlamaV2Loader(self.request, self.manager).load()[0]
        tokenizer_path = self.request.tokenizer_path or self.model_path

        tokenizer = AutoTokenizer.from_pretrained(
            tokenizer_path,
            trust_remote_code=self.request.trust_remote_code,
            use_fast=self.request.use_fast_tokenizer,
        )

        return model, tokenizer

    def get_metadata(self) -> Optional[Dict[str, Any]]:
        """Get model metadata."""
        return ExLlamaV2Loader(self.request, self.manager).get_metadata()

    def get_config(self) -> Dict[str, Any]:
        """Get model configuration."""
        return ExLlamaV2Loader(self.request, self.manager).get_config()


================================================
FILE: Backend/src/models/loaders/hqq.py
================================================
import logging
from typing import Any, Dict, Optional, Tuple
import requests
from tqdm import tqdm

from src.models.loaders.base import BaseLoader
from src.models.exceptions import ModelLoadError, ModelDownloadError
from transformers import AutoTokenizer

logger = logging.getLogger(__name__)


class HQQLoader(BaseLoader):
    """Loader for HQQ quantized models."""

    def load(self) -> Tuple[Any, Any]:
        """Load an HQQ model."""
        try:
            from hqq.core.quantize import HQQBackend, HQQLinear
            from hqq.models.hf.base import AutoHQQHFModel
        except ImportError:
            raise ModelLoadError(
                "hqq is not installed. Please install it from the HQQ repository")

        try:
            # Create models directory if it doesn't exist
            self.model_path.parent.mkdir(parents=True, exist_ok=True)
            logger.info(f"Using model path: {self.model_path}")

            # If it's a HuggingFace model ID and doesn't exist locally, try to download it
            if '/' in self.request.model_name and not self.model_path.exists():
                self._download_model()

            if not self.model_path.exists():
                raise ModelLoadError(
                    f"Model path does not exist: {self.model_path}")

            logger.info(f"Loading HQQ model from {self.model_path}")
            model = AutoHQQHFModel.from_quantized(str(self.model_path))
            logger.info("Model loaded successfully")

            logger.info(f"Setting HQQ backend to {self.request.hqq_backend}")
            HQQLinear.set_backend(
                getattr(HQQBackend, self.request.hqq_backend))
            logger.info("HQQ backend set successfully")

            logger.info("Loading tokenizer")
            tokenizer = AutoTokenizer.from_pretrained(
                self.request.tokenizer_path or self.model_path,
                trust_remote_code=self.request.trust_remote_code,
                use_fast=self.request.use_fast_tokenizer,
            )
            logger.info("Tokenizer loaded successfully")

            return model, tokenizer

        except Exception as e:
            raise ModelLoadError(f"Failed to load HQQ model: {str(e)}")

    def _download_model(self) -> None:
        """Download model from HuggingFace."""
        try:
            # Get repository contents
            api_url = f"https://huggingface.co/api/models/{self.request.model_name}/tree/main"
            headers = {"Accept": "application/json"}
            if self.request.hf_token:
                headers["Authorization"] = f"Bearer {self.request.hf_token}"

            logger.info(f"Fetching repository contents from {api_url}")
            response = requests.get(api_url, headers=headers)
            response.raise_for_status()
            files = response.json()
            logger.info(f"Found {len(files)} files in repository")

            # Required files for HQQ models
            required_files = ['qmodel.pt', 'config.json',
                              'tokenizer.model', 'tokenizer_config.json', 'tokenizer.json']
            logger.info(f"Required files: {required_files}")

            # Download each required file
            for file_name in required_files:
                file_info = next(
                    (f for f in files if f['path'] == file_name), None)
                if not file_info:
                    logger.error(
                        f"Required file {file_name} not found in repository. Available files: {[f['path'] for f in files]}")
                    raise ModelDownloadError(
                        f"Required file {file_name} not found in repository {self.request.model_name}")

                download_url = f"https://huggingface.co/{self.request.model_name}/resolve/main/{file_name}"
                file_path = self.model_path / file_name

                # Download the file with progress bar
                logger.info(
                    f"Downloading {file_name} ({file_info.get('size', 'unknown size')}) from {download_url}")
                response = requests.get(
                    download_url, stream=True, headers=headers)
                response.raise_for_status()

                total_size = int(response.headers.get('content-length', 0))
                block_size = 8192  # 8 KB

                with open(file_path, 'wb') as f, tqdm(
                    desc=file_name,
                    total=total_size,
                    unit='iB',
                    unit_scale=True,
                    unit_divisor=1024,
                ) as pbar:
                    for data in response.iter_content(block_size):
                        size = f.write(data)
                        pbar.update(size)

                logger.info(
                    f"Successfully downloaded {file_name} to {file_path}")

        except Exception as e:
            logger.error(
                f"Failed to download model: {str(e)}", exc_info=True)
            # Clean up any partially downloaded files
            if self.model_path.exists():
                import shutil
                shutil.rmtree(self.model_path)
            raise ModelDownloadError(f"Failed to download model: {str(e)}")

    def get_metadata(self) -> Optional[Dict[str, Any]]:
        """Get model metadata."""
        if not self.model_path.exists():
            return None
        return {
            "model_type": "HQQ",
            "model_path": str(self.model_path),
            "file_size": self.model_path.stat().st_size,
            "backend": self.request.hqq_backend
        }

    def get_config(self) -> Dict[str, Any]:
        """Get model configuration."""
        return {
            "model_type": "HQQ",
            "model_name": self.request.model_name,
            "device": self.request.device,
            "backend": self.request.hqq_backend
        }


================================================
FILE: Backend/src/models/loaders/llamaccphf.py
================================================
from typing import Any, Tuple

from src.models.loaders.llamacpp import LlamaCppLoader


class LlamaCppHFLoader(LlamaCppLoader):
    """
    Loader for llama.cpp models with HuggingFace tokenizer.
    Inherits from LlamaCppLoader but uses a separate HF tokenizer.
    """

    def load(self) -> Tuple[Any, Any]:
        """Load model with HuggingFace tokenizer."""
        from transformers import AutoTokenizer

        # Load the base model
        model, _ = super().load()

        # Load HuggingFace tokenizer
        tokenizer_path = self.request.tokenizer_path or (
            self.request.model_path if self.request.model_path else f"models/{self.request.model_name}")

        tokenizer = AutoTokenizer.from_pretrained(
            tokenizer_path,
            trust_remote_code=self.request.trust_remote_code,
            use_fast=self.request.use_fast_tokenizer,
        )

        return model, tokenizer

================================================
FILE: Backend/src/models/loaders/llamacpp.py
================================================
import os
import logging
import requests
from pathlib import Path
from typing import Any, Dict, Optional, Tuple
from tqdm import tqdm
import sys

from src.models.loaders.base import BaseLoader
from src.endpoint.models import ModelLoadRequest
from src.models.exceptions import ModelDownloadError, ModelLoadError

logger = logging.getLogger(__name__)


class LlamaCppLoader(BaseLoader):
    """
    Loader for llama.cpp models. Handles both local and remote model loading,
    with support for GGUF format and various optimizations.
    """

    def __init__(self, request: ModelLoadRequest, manager: Any):
        super().__init__(request, manager)
        self.llama = None
        self.cache = None

    def load(self) -> Tuple[Any, Any]:
        """Load a llama.cpp model and return the model and tokenizer."""
        try:
            import torch
            from llama_cpp import Llama

            # Force CUDA environment variables before anything else
            if torch.cuda.is_available():
                os.environ['CUDA_VISIBLE_DEVICES'] = '0'
                os.environ['LLAMA_CUDA_FORCE'] = '1'

                # Log CUDA information
                logger.info("CUDA is available")
                logger.info(f"CUDA Device: {torch.cuda.get_device_name(0)}")
                logger.info(
                    f"Total CUDA Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**2:.0f}MB")
                torch.cuda.empty_cache()

            # Get model path and ensure it exists
            model_path = self._get_model_path()
            if not model_path.exists():
                raise ModelLoadError(f"Model file not found: {model_path}")

            logger.info(f"Loading model from path: {model_path}")

            # Simple CUDA parameters that match working Q8 configurations
            model_params = {
                "model_path": str(model_path),
                "n_ctx": int(self.request.n_ctx) if self.request.n_ctx is not None else 2048,
                "n_batch": int(self.request.n_batch) if self.request.n_batch is not None else 512,
                "n_gpu_layers": -1,
                "main_gpu": 0,
                "use_mmap": True,  # Enable memory mapping
                "use_mlock": False,
                "verbose": True
            }

            # Log parameters
            logger.info(f"Loading model with parameters: {model_params}")

            # Load model
            model = Llama(**model_params)
            logger.info("Initial model load successful")

            # Simple CUDA test
            if torch.cuda.is_available():
                try:
                    logger.info("Testing model...")
                    # Basic tokenization test
                    tokens = model.tokenize(b"test")
                    logger.info("Tokenization successful")

                    # Log memory usage
                    allocated = torch.cuda.memory_allocated() / 1024**2
                    reserved = torch.cuda.memory_reserved() / 1024**2
                    logger.info(f"CUDA Memory allocated: {allocated:.2f}MB")
                    logger.info(f"CUDA Memory reserved: {reserved:.2f}MB")

                except Exception as e:
                    logger.error(f"Model test failed: {e}")
                    raise ModelLoadError(f"Failed to initialize model: {e}")

            logger.info("Model loaded successfully")
            return model, model

        except Exception as e:
            logger.error(f"Error loading model: {str(e)}", exc_info=True)
            raise ModelLoadError(f"Failed to load llama.cpp model: {str(e)}")

    def _get_model_path(self) -> Path:
        """Get and validate the model path, downloading if necessary."""
        # Handle both direct file paths and model names
        if self.request.model_path:
            model_path = Path(self.request.model_path)
        else:
            # Convert HF style paths to filesystem paths
            safe_name = self.request.model_name.replace('/', os.path.sep)
            model_path = Path('models') / safe_name

        model_dir = model_path if model_path.is_dir() else model_path.parent
        model_dir.mkdir(parents=True, exist_ok=True)

        # Special handling for Ollama paths
        if '.ollama' in str(model_path):
            logger.info("Detected Ollama model path")

            # Determine Ollama directory based on OS
            if sys.platform == 'darwin':  # macOS specific path
                ollama_dir = Path(os.path.expanduser('~/.ollama'))
                logger.info(f"Using macOS Ollama directory: {ollama_dir}")
            else:  # Windows and Linux
                ollama_dir = Path(os.path.expandvars('%USERPROFILE%\\.ollama'))
                if not ollama_dir.exists():
                    ollama_dir = Path(os.path.expanduser('~/.ollama'))

            if not ollama_dir.exists():
                raise ModelLoadError(
                    f"Ollama directory not found at: {ollama_dir}")

            # Extract model name from path
            model_name = self.request.model_name
            if not model_name and 'registry.ollama.ai/library/' in str(model_path):
                model_name = str(model_path).split(
                    'registry.ollama.ai/library/')[-1].split('/')[0]
            logger.info(f"Using model name: {model_name}")

            # First check for the model file in the models directory
            models_dir = ollama_dir / 'models'
            logger.info(f"Checking Ollama models directory: {models_dir}")

            if models_dir.exists():
                # First try to find a .gguf file
                gguf_files = list(models_dir.glob("**/*.gguf"))
                if gguf_files:
                    logger.info(f"Found Ollama GGUF file: {gguf_files[0]}")
                    return gguf_files[0]

                # Look for manifest
                manifest_dir = models_dir / 'manifests' / \
                    'registry.ollama.ai' / 'library' / model_name
                manifest_path = manifest_dir / 'latest'
                logger.info(f"Looking for manifest at: {manifest_path}")

                if manifest_path.exists():
                    with open(manifest_path, 'r') as f:
                        manifest = f.read()
                        logger.info(f"Manifest content: {manifest}")
                        import json
                        try:
                            manifest_data = json.loads(manifest)
                            for layer in manifest_data.get('layers', []):
                                if layer.get('mediaType') == 'application/vnd.ollama.image.model':
                                    blob_hash = layer.get('digest', '').replace(
                                        'sha256:', 'sha256-')
                                    if blob_hash:
                                        # Check both blobs and models directories for the file
                                        possible_paths = [
                                            models_dir / 'blobs' / blob_hash,
                                            ollama_dir / 'blobs' / blob_hash
                                        ]

                                        for blob_path in possible_paths:
                                            logger.info(
                                                f"Checking for blob at: {blob_path}")
                                            if blob_path.exists():
                                                logger.info(
                                                    f"Found Ollama model blob: {blob_path}")
                                                return blob_path

                        except json.JSONDecodeError as e:
                            logger.error(f"Failed to parse manifest: {e}")
                            pass

            logger.warning(f"No Ollama model files found in: {models_dir}")
            raise ModelLoadError(
                f"Could not find Ollama model files in {models_dir}")

        # Check for existing GGUF files in the directory
        if model_dir.exists():
            existing_gguf = list(model_dir.glob("*.gguf"))
            if existing_gguf:
                logger.info(f"Found existing GGUF model: {existing_gguf[0]}")
                return existing_gguf[0]

        # Only attempt to download if it looks like a HF model ID
        if '/' in self.request.model_name:
            return self._download_model(model_dir)

        raise ModelLoadError(f"No model files found in: {model_dir}")

    def _download_model(self, model_dir: Path) -> Path:
        """Download model from Hugging Face."""
        logger.info(f"Attempting to download model: {self.request.model_name}")

        try:
            # Setup API request
            api_url = f"https://huggingface.co/api/models/{self.request.model_name}/tree/main"
            headers = {"Accept": "application/json"}
            if self.request.hf_token:
                headers["Authorization"] = f"Bearer {self.request.hf_token}"

            # Get repository contents
            response = requests.get(api_url, headers=headers)
            response.raise_for_status()
            files = response.json()

            # Find GGUF files
            gguf_files = [f for f in files if f.get(
                'path', '').endswith('.gguf')]
            if not gguf_files:
                raise ModelDownloadError(
                    f"No GGUF files found in repository {self.request.model_name}")

            # Sort by preference (q4_k_m) and size
            gguf_files.sort(key=lambda x: (
                0 if 'q4_k_m' in x['path'].lower() else 1,
                x.get('size', float('inf'))
            ))

            # Download the best candidate
            file_info = gguf_files[0]
            file_name = file_info['path']
            download_url = f"https://huggingface.co/{self.request.model_name}/resolve/main/{file_name}"
            model_path = model_dir / file_name

            if not model_path.exists() or model_path.stat().st_size == 0:
                self._download_file(download_url, model_path, headers)

            return model_path

        except Exception as e:
            raise ModelDownloadError(f"Failed to download model: {str(e)}")

    def _download_file(self, url: str, path: Path, headers: Dict[str, str]) -> None:
        """Download a file with progress bar."""
        response = requests.get(url, stream=True, headers=headers)
        response.raise_for_status()

        total_size = int(response.headers.get('content-length', 0))
        block_size = 8192

        with open(path, 'wb') as f, tqdm(
            desc=path.name,
            total=total_size,
            unit='iB',
            unit_scale=True,
            unit_divisor=1024,
        ) as pbar:
            for data in response.iter_content(block_size):
                size = f.write(data)
                pbar.update(size)

    def _get_model_params(self) -> Dict[str, Any]:
        """Configure model parameters based on request and system capabilities."""
        import torch

        # Base parameters
        params = {
            "n_ctx": int(self.request.n_ctx) if self.request.n_ctx is not None else 2048,
            "n_batch": int(self.request.n_batch) if self.request.n_batch is not None else 512,
            "n_threads": int(self.request.n_threads) if self.request.n_threads is not None else os.cpu_count(),
            "verbose": True,  # Enable verbose output for debugging
        }

        # Add CUDA parameters if available
        if torch.cuda.is_available():
            logger.info("Configuring CUDA parameters...")

            # Force CUDA environment variables
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
            os.environ['LLAMA_CUDA_FORCE'] = '1'
            os.environ['LLAMA_FORCE_GPU'] = '1'  # Force GPU usage
            os.environ['LLAMA_CPU_DISABLE'] = '1'  # Disable CPU fallback

            # Enhanced CUDA parameters - optimized for GPU usage
            cuda_params = {
                "n_gpu_layers": -1,    # Use all layers on GPU
                "main_gpu": 0,         # Use the first GPU
                "tensor_split": None,   # No tensor splitting
                "use_mmap": False,     # Disable memory mapping
                "use_mlock": True,     # Lock memory to prevent swapping
                "mul_mat_q": True,     # Enable matrix multiplication
                "offload_kqv": True,   # Keep KQV on GPU
                "f16_kv": True,        # Use float16 for KV cache
                "logits_all": True,    # Compute logits for all tokens
                "embedding": True      # Use GPU for embeddings
            }

            params.update(cuda_params)
            logger.info(f"CUDA parameters configured: {cuda_params}")

            # Log CUDA device info
            logger.info(f"CUDA Device: {torch.cuda.get_device_name(0)}")
            logger.info(
                f"CUDA Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**2:.0f}MB")

        # Add optional parameters if specified in request
        optional_params = {
            "tensor_split": self.request.tensor_split,
            "split_mode": self.request.split_mode,
            "cache_type": self.request.cache_type,
        }

        # Only add optional params if they have non-None values
        params.update(
            {k: v for k, v in optional_params.items() if v is not None})

        logger.info(f"Final model parameters: {params}")
        return params

    def _configure_gpu_layers(self) -> int:
        """Configure the number of GPU layers based on hardware and request."""
        import torch

        if not torch.cuda.is_available():
            return 0

        # Force environment variables for CUDA
        os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        os.environ['LLAMA_CUDA_FORCE'] = '1'

        # If n_gpu_layers is specified in request, use that
        if self.request.n_gpu_layers is not None:
            return self.request.n_gpu_layers

        # Otherwise, use all layers on GPU
        return -1  # -1 means use all layers on GPU

    def _setup_cache(self, model: Any) -> None:
        """Setup model cache if supported."""
        try:
            from llama_cpp import LlamaCache
            if hasattr(model, 'set_cache'):
                # Convert GB to bytes
                cache_size = self.request.cache_size * 1024 * 1024 * 1024
                cache_type = "fp16"  # or q8_0 or q4_0 depending on your needs
                model.set_cache(LlamaCache(capacity_bytes=cache_size))
                logger.info(
                    f"Initialized LLM cache with {self.request.cache_size}GB capacity using {cache_type}")
        except Exception as e:
            logger.warning(f"Failed to initialize cache: {e}")

    def get_metadata(self) -> Optional[Dict[str, Any]]:
        """Get model metadata without loading the full model."""
        try:
            model_path = self._get_model_path()
            if not model_path.exists():
                return None

            # Basic metadata
            metadata = {
                "model_type": "llama.cpp",
                "model_path": str(model_path),
                "file_size": model_path.stat().st_size,
                "format": "GGUF" if model_path.suffix == '.gguf' else "Unknown"
            }

            # Try to get additional metadata from the GGUF file
            try:
                from llama_cpp import Llama
                model = Llama(model_path=str(model_path),
                              n_ctx=8, n_gpu_layers=0)
                metadata.update({
                    "n_vocab": model.n_vocab(),
                    "n_ctx_train": model.n_ctx_train(),
                    "n_embd": model.n_embd(),
                    "desc": model.desc(),
                })
            except:
                pass

            return metadata
        except Exception as e:
            logger.error(f"Error getting model metadata: {str(e)}")
            return None

    def get_config(self) -> Dict[str, Any]:
        """Get the current model configuration."""
        return {
            "model_type": "llama.cpp",
            "n_ctx": self.request.n_ctx,
            "n_batch": self.request.n_batch,
            "n_gpu_layers": self.request.n_gpu_layers,
            "device": self.request.device,
        }

    @staticmethod
    def cleanup(model: Any) -> None:
        """Clean up model resources."""
        try:
            del model
        except:
            pass


================================================
FILE: Backend/src/models/loaders/tensorrt.py
================================================
import logging
from typing import Any, Dict, Optional, Tuple

from src.models.loaders.base import BaseLoader
from src.models.exceptions import ModelLoadError
from transformers import AutoTokenizer

logger = logging.getLogger(__name__)


class TensorRTLoader(BaseLoader):
    """Loader for TensorRT-LLM models."""

    def load(self) -> Tuple[Any, Any]:
        """Load a TensorRT-LLM model."""
        try:
            import tensorrt_llm
            from tensorrt_llm.runtime import ModelConfig
        except ImportError:
            raise ModelLoadError(
                "tensorrt-llm is not installed. Please install it from the TensorRT-LLM repository")

        engine_path = self.request.engine_dir if self.request.engine_dir else self.model_path
        if not engine_path.exists():
            raise ModelLoadError(f"Engine path does not exist: {engine_path}")

        config = ModelConfig(
            engine_dir=str(engine_path),
            max_batch_size=self.request.max_batch_size,
            max_input_len=self.request.max_input_len,
            max_output_len=int(
                self.request.max_output_len) if self.request.max_output_len is not None else None,
        )

        model = tensorrt_llm.runtime.GenerationSession(config)

        tokenizer = AutoTokenizer.from_pretrained(
            self.request.tokenizer_path or str(engine_path),
            trust_remote_code=self.request.trust_remote_code,
            use_fast=self.request.use_fast_tokenizer,
        )

        return model, tokenizer

    def get_metadata(self) -> Optional[Dict[str, Any]]:
        """Get model metadata."""
        if not self.model_path.exists():
            return None
        return {
            "model_type": "TensorRT-LLM",
            "model_path": str(self.model_path),
            "file_size": self.model_path.stat().st_size,
            "engine_dir": self.request.engine_dir
        }

    def get_config(self) -> Dict[str, Any]:
        """Get model configuration."""
        return {
            "model_type": "TensorRT-LLM",
            "model_name": self.request.model_name,
            "device": self.request.device,
            "engine_dir": self.request.engine_dir,
            "max_batch_size": self.request.max_batch_size,
            "max_input_len": self.request.max_input_len,
            "max_output_len": self.request.max_output_len
        }


================================================
FILE: Backend/src/models/loaders/transformers.py
================================================
import logging
from pathlib import Path
from typing import Any, Dict, Optional, Tuple
import torch
from transformers import (
    BitsAndBytesConfig,
    PreTrainedModel,
)

from src.models.loaders.base import BaseLoader
from src.models.exceptions import ModelLoadError

logger = logging.getLogger(__name__)


class TransformersLoader(BaseLoader):
    """
    Loader for Hugging Face Transformers models.
    Handles both local and remote model loading with various optimizations.
    """

    def load(self) -> Tuple[Any, Any]:
        """Load a transformers model and return the model and tokenizer."""
        try:
            from transformers import AutoModelForCausalLM, AutoTokenizer

            logger.info(f"Loading model: {self.request.model_name}")
            logger.info(f"Model type: {self.request.model_type}")
            logger.info(f"Model path: {self.request.model_path}")
            logger.info(f"Device: {self.request.device}")

            # Configure model loading parameters
            model_kwargs = self._get_model_kwargs()

            # If we have a local path, use it directly
            if self.request.model_path and Path(self.request.model_path).exists():
                logger.info(
                    f"Loading model from local path: {self.request.model_path}")
                try:
                    # Try to load tokenizer from local path first
                    tokenizer = AutoTokenizer.from_pretrained(
                        self.request.model_path,
                        trust_remote_code=self.request.trust_remote_code,
                        use_fast=self.request.use_fast_tokenizer,
                        padding_side=self.request.padding_side
                    )
                    logger.info("Loaded tokenizer from local path")

                    # Load model from local path
                    model = AutoModelForCausalLM.from_pretrained(
                        self.request.model_path,
                        **model_kwargs
                    )
                    logger.info("Loaded model from local path")

                    # Ensure model is on the correct device if not using device_map
                    if model_kwargs.get("device_map") is None and hasattr(model, "to"):
                        # Handle device placement
                        if self.request.device == "auto":
                            device = "cuda" if torch.cuda.is_available() else "cpu"
                        else:
                            device = self.request.device

                        model = model.to(device)
                        logger.info(f"Moved model to device: {device}")

                    return model, tokenizer
                except Exception as e:
                    logger.warning(f"Failed to load from local path: {e}")
                    raise ModelLoadError(
                        f"Failed to load model from local path: {str(e)}")
            else:
                # Download from HuggingFace
                logger.info(
                    "Attempting to download from HuggingFace: " + self.request.model_name)

                try:
                    # Download and save tokenizer
                    tokenizer = AutoTokenizer.from_pretrained(
                        self.request.model_name,
                        trust_remote_code=self.request.trust_remote_code,
                        use_fast=self.request.use_fast_tokenizer,
                        padding_side=self.request.padding_side
                    )
                    if self.request.model_path:
                        tokenizer.save_pretrained(self.request.model_path)
                        logger.info(
                            f"Tokenizer downloaded and saved to {self.request.model_path}")

                    # Download and save config
                    if self.request.model_path:
                        from transformers import AutoConfig
                        config = AutoConfig.from_pretrained(
                            self.request.model_name,
                            trust_remote_code=self.request.trust_remote_code
                        )
                        config.save_pretrained(self.request.model_path)
                        logger.info(
                            f"Config downloaded and saved to {self.request.model_path}")

                    # Download model weights
                    logger.info(
                        "Downloading model weights (this may take a while)...")
                    model = AutoModelForCausalLM.from_pretrained(
                        self.request.model_name,
                        **model_kwargs
                    )

                    # Save the model if we have a path
                    if self.request.model_path:
                        model.save_pretrained(self.request.model_path)
                        logger.info(
                            f"Model weights saved to {self.request.model_path}")

                    return model, tokenizer
                except Exception as e:
                    raise ModelLoadError(f"Failed to download model: {str(e)}")

        except Exception as e:
            raise ModelLoadError(
                f"Failed to load transformers model: {str(e)}")

    def _get_model_kwargs(self) -> Dict[str, Any]:
        """Get model loading parameters."""
        # Get the compute dtype
        compute_dtype = torch.bfloat16 if self.request.compute_dtype == "bfloat16" else torch.float16

        # Determine device map
        device_map = None
        if self.request.device == "cuda":
            if torch.cuda.is_available():
                device_map = "auto"
            else:
                logger.warning(
                    "CUDA requested but not available, falling back to CPU")
                self.request.device = "cpu"

        # Base parameters without gradient checkpointing
        load_params = {
            "low_cpu_mem_usage": True,
            "torch_dtype": compute_dtype,
            "trust_remote_code": self.request.trust_remote_code,
            "use_flash_attention_2": self.request.use_flash_attention,
            "device_map": device_map,
            "revision": self.request.revision,
        }

        # Only add gradient checkpointing for explicitly supported models
        model_name_lower = self.request.model_name.lower()
        if ("llama" in model_name_lower or
            "mistral" in model_name_lower or
                "mpt" in model_name_lower):
            load_params["use_gradient_checkpointing"] = True

        # Configure quantization
        if self.request.load_in_8bit or self.request.load_in_4bit:
            load_params["quantization_config"] = self._get_quantization_config()

        # Add optional parameters
        if self.request.max_memory is not None and self.request.device == "cuda":
            load_params["max_memory"] = self.request.max_memory

        if self.request.rope_scaling is not None:
            load_params["rope_scaling"] = self.request.rope_scaling

        if self.request.use_cache is False:
            load_params["use_cache"] = False

        # For model loading, return the original params with torch.dtype
        if not hasattr(self, '_serializing_for_response'):
            return load_params

        # For JSON response, convert torch.dtype to string
        response_params = load_params.copy()
        response_params["torch_dtype"] = str(compute_dtype)
        return response_params  # Return string version for JSON serialization

    def _get_quantization_config(self) -> BitsAndBytesConfig:
        """Get quantization configuration."""
        return BitsAndBytesConfig(
            load_in_8bit=self.request.load_in_8bit,
            load_in_4bit=self.request.load_in_4bit,
            bnb_4bit_compute_dtype=eval(f"torch.{self.request.compute_dtype}"),
            llm_int8_enable_fp32_cpu_offload=True,
            bnb_4bit_use_double_quant=True
        )

    def get_metadata(self) -> Optional[Dict[str, Any]]:
        """Get model metadata without loading the full model."""
        try:
            if '/' in self.request.model_name and not self.model_path.exists():
                config = self._load_config(self.request.model_name)
                metadata = self._make_json_serializable(config.to_dict())
                metadata['model_type'] = 'Transformers'
                return metadata

            if self.model_path.exists():
                config = self._load_config(self.model_path)
                metadata = self._make_json_serializable(config.to_dict())
                metadata['model_type'] = 'Transformers'
                return metadata

            return None
        except Exception as e:
            logger.error(f"Error getting model metadata: {str(e)}")
            return None

    def get_config(self) -> Dict[str, Any]:
        """Get the current model configuration."""
        # Set flag to get JSON serializable params
        self._serializing_for_response = True
        load_params = self._get_model_kwargs()
        delattr(self, '_serializing_for_response')

        config = {
            "model_type": "Transformers",
            "model_name": self.request.model_name,
            "device": self.request.device,
            "load_params": load_params
        }

        if self.model_path.exists():
            try:
                model_config = self._load_config(self.model_path)
                config["model_config"] = model_config.to_dict()
            except Exception as e:
                logger.warning(f"Could not load model config: {str(e)}")

        return self._make_json_serializable(config)

    def _make_json_serializable(self, obj: Any) -> Any:
        """Convert a dictionary with torch dtypes to JSON serializable format."""
        if isinstance(obj, dict):
            return {k: self._make_json_serializable(v) for k, v in obj.items()}
        elif isinstance(obj, list):
            return [self._make_json_serializable(v) for v in obj]
        elif hasattr(obj, 'dtype'):  # Handle torch dtypes
            return str(obj)
        return obj

    @staticmethod
    def cleanup(model: PreTrainedModel) -> None:
        """Clean up model resources."""
        try:
            if hasattr(model, 'cpu'):
                model.cpu()
            del model
        except Exception as e:
            logger.warning(f"Error during model cleanup: {str(e)}")


================================================
FILE: Backend/src/models/manager.py
================================================
import logging
from pathlib import Path
from typing import Optional, Tuple, Any, Dict, Union

from src.endpoint.models import ModelLoadRequest
from src.models.utils.device import get_device
from src.models.utils.platform import check_platform_compatibility
from src.models.utils.detect_type import detect_model_type
from src.models.exceptions import ModelLoadError, ModelNotFoundError
from src.models.loaders import (
    TransformersLoader,
    LlamaCppLoader,
    LlamaCppHFLoader,
    ExLlamaV2Loader,
    ExLlamaV2HFLoader,
    HQQLoader,
    TensorRTLoader
)

logger = logging.getLogger(__name__)

class ModelManager:
    """
    Manages the loading, unloading, and switching of different AI models.
    Supports multiple model types and handles resource management.
    """

    def __init__(self):
        """Initialize the model manager with empty state."""
        self.current_model: Optional[Any] = None
        self.current_tokenizer: Optional[Any] = None
        self.model_type: Optional[str] = None
        self.device: Optional[str] = None
        self.model_name: Optional[str] = None
        self._is_loading: bool = False
        self.model_config: Optional[Dict[str, Any]] = None

        # Map model types to their respective loaders
        self.loader_mapping = {
            'Transformers': TransformersLoader,
            'llama.cpp': LlamaCppLoader,
            'llamacpp_HF': LlamaCppHFLoader,
            'ExLlamav2': ExLlamaV2Loader,
            'ExLlamav2_HF': ExLlamaV2HFLoader,
            'HQQ': HQQLoader,
            'TensorRT-LLM': TensorRTLoader
        }

    def check_platform_compatibility(self, model_type: str) -> Tuple[bool, str]:
        """Check if the current platform is compatible with the specified model type."""
        return check_platform_compatibility(model_type)

    def get_model_metadata(self, request: ModelLoadRequest) -> Optional[Dict[str, Any]]:
        """
        Get model metadata without loading the full model.
        
        Args:
            request: Model load request containing model information
            
        Returns:
            Dictionary containing model metadata or None if not found
        """
        try:
            model_path = Path(request.model_path) if request.model_path else Path(
                f"models/{request.model_name}")

            # Get the appropriate loader
            loader_class = self.loader_mapping.get(request.model_type)
            if loader_class:
                loader = loader_class(request, self)
                return loader.get_metadata()
            
            return None
        except Exception as e:
            logger.error(f"Error getting model metadata: {str(e)}")
            return None

    def is_model_loaded(self) -> bool:
        """Check if a model is currently loaded."""
        return self.current_model is not None

    def get_model_info(self) -> Dict[str, Any]:
        """
        Get information about the currently loaded model.
        
        Returns:
            Dictionary containing model information
        """
        info = {
            "model_name": self.model_name,
            "model_type": self.model_type,
            "device": self.device,
            "is_loaded": self.is_model_loaded(),
            "is_loading": self._is_loading,
        }
        if self.model_config:
            info["config"] = self._make_json_serializable(self.model_config)
        return self._make_json_serializable(info)

    def clear_model(self) -> None:
        """Unload the current model and clear CUDA cache."""
        try:
            if self.current_model is not None:
                # Let the specific loader handle cleanup if method exists
                loader_class = self.loader_mapping.get(self.model_type)
                if loader_class and hasattr(loader_class, 'cleanup'):
                    loader_class.cleanup(self.current_model)
                else:
                    # Default cleanup
                    if hasattr(self.current_model, 'cpu'):
                        self.current_model.cpu()
                    del self.current_model

            if self.current_tokenizer is not None:
                del self.current_tokenizer

            # Reset all attributes
            self.current_model = None
            self.current_tokenizer = None
            self.model_type = None
            self.device = None
            self.model_name = None
            self.model_config = None

            # Clear CUDA cache if available
            import torch
            import gc
            gc.collect()
            if torch.cuda.is_available():
                torch.cuda.empty_cache()

        except Exception as e:
            logger.error(f"Error clearing model: {str(e)}")
            raise

    def _make_json_serializable(self, obj: Any) -> Any:
        """Convert objects to JSON serializable format."""
        if isinstance(obj, dict):
            return {k: self._make_json_serializable(v) for k, v in obj.items()}
        elif isinstance(obj, list):
            return [self._make_json_serializable(v) for v in obj]
        elif hasattr(obj, 'dtype'):  # Handle torch dtypes
            return str(obj)
        return obj

    def load_model(self, request: ModelLoadRequest) -> Tuple[Any, Any]:
        """
        Load a model based on the request configuration.
        
        Args:
            request: Model load request containing all necessary parameters
            
        Returns:
            Tuple of (model, tokenizer)
            
        Raises:
            ModelLoadError: If there's an error during model loading
            ModelNotFoundError: If the requested model is not found
        """
        if self._is_loading:
            raise ModelLoadError("A model is already being loaded")

        try:
            self._is_loading = True
            self.clear_model()  # Clear any existing model

            # Set device using imported get_device function
            self.device = get_device(request)
            self.model_name = request.model_name

            # Handle Ollama models first - convert to llama.cpp
            if request.model_type == 'ollama':
                try:
                    # Read the manifest to get the blob SHA
                    manifest_path = Path(request.model_path) / 'latest'
                    logger.info(f"Looking for manifest at: {manifest_path}")
                    if not manifest_path.exists():
                        raise ModelLoadError(f"Manifest file not found at: {manifest_path}")

                    import json
                    with open(manifest_path) as f:
                        manifest = json.load(f)
                    logger.info(f"Manifest content: {json.dumps(manifest, indent=2)}")
                    
                    # Get the model layer (first layer with mediaType 'application/vnd.ollama.image.model')
                    try:
                        model_layer = next(layer for layer in manifest['layers'] 
                                        if layer['mediaType'] == 'application/vnd.ollama.image.model')
                    except StopIteration:
                        raise ModelLoadError("No model layer found in manifest")

                    # Extract SHA and construct blob path
                    sha = model_layer['digest'].split(':')[1]
                    # Ollama stores the files directly in the blobs directory with a sha256- prefix
                    blob_path = Path(request.model_path).parent.parent.parent.parent / 'blobs' / f'sha256-{sha}'
                    logger.info(f"Looking for blob at: {blob_path}")
                    
                    if not blob_path.exists():
                        raise ModelLoadError(f"Model file not found at: {blob_path}")

                    # Update the request to use the actual model file
                    request.model_path = str(blob_path)
                    request.model_type = "llama.cpp"
                    logger.info(f"Converting Ollama model to llama.cpp with path: {request.model_path}")

                except Exception as e:
                    logger.error(f"Error processing Ollama model: {str(e)}")
                    raise ModelLoadError(f"Failed to process Ollama model: {str(e)}")

            # Check if model exists locally first
            model_path = Path(request.model_path) if request.model_path else Path(f"models/{request.model_name}")
            if model_path.exists():
                logger.info(f"Found local model at: {model_path}")
                # Auto-detect model type if not specified
                if not request.model_type or request.model_type == "auto":
                    request.model_type = self._detect_model_type(request)
                    logger.info(f"Detected model type: {request.model_type}")
            else:
                # Only attempt to download if it looks like a HF model ID
                if '/' in request.model_name:
                    logger.info(f"Model not found locally, will attempt to download from HuggingFace")
                else:
                    raise ModelNotFoundError(f"Model not found at: {model_path}")

            # Check platform compatibility
            is_compatible, message = check_platform_compatibility(request.model_type)
            if not is_compatible:
                raise ModelLoadError(message)
            logger.info(message)

            # Get the appropriate loader
            loader_class = self.loader_mapping.get(request.model_type)
            logger.info(f"Model type: {request.model_type}")
            logger.info(f"Available loaders: {list(self.loader_mapping.keys())}")
            if not loader_class:
                raise ModelLoadError(f"Unsupported model type: {request.model_type}")

            # Initialize and use the loader
            loader = loader_class(request, self)
            model, tokenizer = loader.load()

            # Store the results
            self.current_model = model
            self.current_tokenizer = tokenizer
            self.model_type = request.model_type
            # Make config JSON serializable before storing
            self.model_config = self._make_json_serializable(loader.get_config())

            return model, tokenizer

        except Exception as e:
            logger.error(f"Error loading model: {str(e)}", exc_info=True)
            self.clear_model()  # Cleanup on failure
            if isinstance(e, (ModelLoadError, ModelNotFoundError)):
                raise
            raise ModelLoadError(str(e))

        finally:
            self._is_loading = False

    def _detect_model_type(self, request: ModelLoadRequest) -> str:
        """
        Detect the type of model based on the model path and name.
        
        Args:
            request: Model load request
            
        Returns:
            String indicating the detected model type
        """
        model_path = Path(request.model_path) if request.model_path else Path(
            f"models/{request.model_name}")

        if model_path.exists():
            return detect_model_type(model_path)
        
        # Default to Transformers for HF models
        if '/' in request.model_name:
            return "Transformers"
        
        raise ModelNotFoundError(
            f"Could not detect model type: {request.model_name}")


# Global model manager instance
model_manager = ModelManager()

================================================
FILE: Backend/src/models/streamer.py
================================================
import traceback
from queue import Queue
from threading import Thread
from typing import Optional, Callable, Any, List, Union, AsyncIterator, Iterator, Dict
import torch
import time
import asyncio
import json
import logging

logger = logging.getLogger(__name__)


class StopNowException(Exception):
    pass


class StreamingStoppingCriteria:
    """Base class for stopping criteria during text generation"""

    def __init__(self):
        pass

    def __call__(self, input_ids, scores) -> bool:
        return False


class StopOnInterrupt(StreamingStoppingCriteria):
    """Stopping criteria that checks for interruption signals"""

    def __init__(self, stop_signal=None):
        super().__init__()
        self.stop_signal = stop_signal or (lambda: False)

    def __call__(self, input_ids, scores) -> bool:
        return self.stop_signal()


class StreamIterator(AsyncIterator[str], Iterator[str]):
    """Iterator that streams tokens as they are generated."""

    def __init__(self, func: Callable, callback: Optional[Callable] = None):
        self.func = func
        self.callback = callback
        self.queue = Queue()
        self.async_queue = asyncio.Queue()
        self.sentinel = object()
        self.stop_now = False
        self.thread = None

    def _queue_callback(self, data):
        """Callback that puts data into both queues"""
        if self.stop_now:
            raise StopNowException

        if data is None:
            self.queue.put(self.sentinel)
            self.async_queue.put_nowait(None)
            return

        if self.callback:
            self.callback(data)

        formatted_data = f"data: {json.dumps(data)}\n\n"
        self.queue.put(formatted_data)
        self.async_queue.put_nowait(formatted_data)

    def _start_generation(self):
        if not self.thread:
            def task():
                try:
                    self.func(self._queue_callback)
                except StopNowException:
                    pass
                except Exception:
                    traceback.print_exc()
                finally:
                    self._queue_callback(None)

            self.thread = Thread(target=task)
            self.thread.start()

    def __iter__(self) -> Iterator[str]:
        self._start_generation()
        return self

    def __next__(self) -> str:
        if not self.thread:
            self._start_generation()

        item = self.queue.get()
        if item is self.sentinel:
            raise StopIteration
        return item

    def __aiter__(self):
        self._start_generation()
        return self

    async def __anext__(self) -> str:
        if not self.thread:
            self._start_generation()

        try:
            item = await self.async_queue.get()
            if item is None:
                raise StopAsyncIteration
            return item
        except Exception as e:
            if isinstance(e, StopAsyncIteration):
                raise
            raise StopAsyncIteration from e

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.stop_now = True


class TextGenerator:
    """A text generator that streams tokens as they are generated."""

    def __init__(self, model, tokenizer, device: str = "cpu"):
        self.model = model
        self.tokenizer = tokenizer
        self.device = device
        self.stop_signal = False
        self._log_cuda_status()

    def _log_cuda_status(self):
        """Log CUDA status if available"""
        if hasattr(torch.cuda, 'is_available') and torch.cuda.is_available():
            logger.info("CUDA is available in TextGenerator")
            logger.info(
                f"Model GPU layers: {getattr(self.model, 'n_gpu_layers', 'unknown')}")
            logger.info(
                f"CUDA Memory allocated: {torch.cuda.memory_allocated() / 1024**2:.2f}MB")
            logger.info(
                f"CUDA Memory reserved: {torch.cuda.memory_reserved() / 1024**2:.2f}MB")

    def _create_stream_response(self, text: str, generated_text: str, is_final: bool = False) -> Dict:
        """Create a standardized streaming response"""
        response = {
            "id": "chatcmpl-" + str(hash(generated_text))[-12:],
            "object": "chat.completion.chunk",
            "created": int(time.time()),
            "model": "local-model",
            "choices": [{
                "index": 0,
                "delta": {} if is_final else {"content": text},
                "finish_reason": "stop" if is_final else None
            }]
        }
        return response

    def _stream_tokens(self, callback: Callable, generator, decode_func: Callable) -> str:
        """Generic token streaming implementation"""
        generated_text = ""
        for output in generator:
            text = decode_func(output)
            generated_text += text
            callback(self._create_stream_response(text, generated_text))

        # Send final message
        callback(self._create_stream_response(
            "", generated_text, is_final=True))
        callback(None)
        return generated_text

    def generate(self,
                 prompt: str,
                 max_new_tokens: int = 100,
                 temperature: float = 0.7,
                 top_p: float = 0.95,
                 top_k: int = 50,
                 repetition_penalty: float = 1.1,
                 stopping_criteria: Optional[List[StreamingStoppingCriteria]] = None,
                 callback: Optional[Callable[[dict], Any]] = None,
                 stream: bool = True) -> Union[str, Any]:
        """Generate text from a prompt, optionally streaming the output."""

        if hasattr(self.model, 'create_completion'):
            # llama.cpp model
            completion_args = {
                "prompt": prompt,
                "max_tokens": max_new_tokens,
                "temperature": temperature,
                "top_p": top_p,
                "top_k": top_k,
                "repeat_penalty": repetition_penalty,
                "stream": stream
            }

            if stream:
                def _stream(callback):
                    completion = self.model.create_completion(
                        **completion_args)
                    return self._stream_tokens(
                        callback,
                        completion,
                        lambda x: x["choices"][0]["text"]
                    )
                return StreamIterator(_stream, callback=callback)
            else:
                completion = self.model.create_completion(**completion_args)
                return completion["choices"][0]["text"]
        else:
            # Other models (transformers)
            inputs = self.tokenizer(
                prompt, return_tensors="pt", padding=True).to(self.device)
            gen_config = {
                "max_new_tokens": max_new_tokens,
                "temperature": max(temperature, 1e-2),
                "top_p": min(max(top_p, 0.1), 0.95),
                "top_k": top_k,
                "repetition_penalty": repetition_penalty,
                "do_sample": True,
                "pad_token_id": self.tokenizer.pad_token_id,
                "eos_token_id": self.tokenizer.eos_token_id,
                "use_cache": True
            }

            if stream:
                def _stream(callback):
                    with torch.no_grad():
                        generator = self.model.generate(
                            **inputs,
                            **gen_config,
                            stopping_criteria=stopping_criteria,
                            return_dict_in_generate=True,
                            output_scores=True
                        )
                        return self._stream_tokens(
                            callback,
                            generator,
                            lambda x: self.tokenizer.decode(
                                [x.sequences[0, -1].item() if not isinstance(x,
                                                                             torch.Tensor) else x.item()],
                                skip_special_tokens=True
                            )
                        )
                return StreamIterator(_stream, callback=callback)
            else:
                with torch.no_grad():
                    output = self.model.generate(
                        **inputs,
                        **gen_config,
                        stopping_criteria=stopping_criteria,
                        return_dict_in_generate=True,
                        output_scores=True
                    )
                    return self.tokenizer.decode(output.sequences[0], skip_special_tokens=True)

# End of TextGenerator class - everything after this line should be removed


================================================
FILE: Backend/src/models/utils/__init__.py
================================================


================================================
FILE: Backend/src/models/utils/detect_type.py
================================================
import json
from pathlib import Path
from typing import Union
import logging

logger = logging.getLogger(__name__)


def detect_model_type(model_path: Union[str, Path]) -> str:
    """
    Detect the model type from the model files and metadata
    Returns one of: 'ollama', 'Transformers', 'llama.cpp', 'llamacpp_HF', 'ExLlamav2', 'ExLlamav2_HF', 'HQQ', 'TensorRT-LLM'
    """
    model_path = Path(model_path)
    if not model_path.exists():
        raise ValueError(f"Model path does not exist: {model_path}")

    # Check for model metadata
    metadata_path = model_path / "metadata.json"
    if metadata_path.exists():
        try:
            with open(metadata_path, 'r') as f:
                metadata = json.load(f)
                if "model_type" in metadata:
                    return metadata["model_type"]
        except:
            logger.warning(f"Could not read metadata from {metadata_path}")

    # Check for specific file patterns
    files = list(model_path.glob("*"))
    file_names = [f.name for f in files]

    # TensorRT-LLM check
    if any(f.endswith('.engine') for f in file_names) or any(f.endswith('.plan') for f in file_names):
        return 'TensorRT-LLM'

    # llama.cpp check
    if any(f.endswith('.gguf') for f in file_names):
        # Check if there's a HF tokenizer
        if any(f == 'tokenizer_config.json' for f in file_names):
            return 'ExLlamav2_HF'
        return 'ExLlamav2'

    # HQQ check
    if any(f.endswith('.hqq') for f in file_names):
        return 'HQQ'

    # Default to Transformers for standard HF models
    if any(f in file_names for f in ['config.json', 'pytorch_model.bin', 'model.safetensors']):
        # Only check for ExLlamav2 if we find specific ExLlamav2 files
        if (model_path / 'tokenizer.model').exists():
            config_path = model_path / 'config.json'
            try:
                with open(config_path, 'r') as f:
                    config = json.load(f)
                    if config.get('model_type', '').lower() in ['llama', 'mistral']:
                        return 'ExLlamav2'
            except:
                pass
        return 'Transformers'

    raise ValueError(
        f"Could not determine model type from files in {model_path}")


================================================
FILE: Backend/src/models/utils/device.py
================================================
import torch
from src.endpoint.models import ModelLoadRequest


def get_device(request: ModelLoadRequest) -> str:
    if request.device != "auto":
        return request.device

    if torch.cuda.is_available():
        print("CUDA is available")
        return "cuda"
    elif torch.backends.mps.is_available():
        print("MPS is available")
        return "mps"
    else:
        print("No GPU available")
        return "cpu"


================================================
FILE: Backend/src/models/utils/download.py
================================================
import os
import logging
import requests
from tqdm import tqdm
from pathlib import Path
from typing import List, Dict, Optional

logger = logging.getLogger(__name__)

def download_file_with_progress(url: str, file_path: Path, headers: Optional[Dict[str, str]] = None) -> None:
    """Download a file with progress bar"""
    try:
        response = requests.get(url, stream=True, headers=headers or {})
        response.raise_for_status()

        total_size = int(response.headers.get('content-length', 0))
        block_size = 8192  # 8 KB

        with open(file_path, 'wb') as f, tqdm(
            desc=file_path.name,
            total=total_size,
            unit='iB',
            unit_scale=True,
            unit_divisor=1024,
        ) as pbar:
            for data in response.iter_content(block_size):
                size = f.write(data)
                pbar.update(size)

        logger.info(f"Successfully downloaded {file_path.name}")
    except Exception as e:
        if file_path.exists() and file_path.stat().st_size == 0:
            file_path.unlink()  # Remove empty/partial file
        raise ValueError(f"Failed to download file {file_path.name}: {str(e)}")

def get_hf_repo_files(repo_id: str, hf_token: Optional[str] = None) -> List[Dict]:
    """Get list of files in a HuggingFace repository"""
    api_url = f"https://huggingface.co/api/models/{repo_id}/tree/main"
    headers = {"Accept": "application/json"}
    if hf_token:
        headers["Authorization"] = f"Bearer {hf_token}"
        logger.info("Using provided HuggingFace token")

    logger.info(f"Fetching repository contents from {api_url}")
    response = requests.get(api_url, headers=headers)
    response.raise_for_status()
    return response.json()

def download_hf_model_files(repo_id: str, model_path: Path, required_files: List[str], hf_token: Optional[str] = None) -> None:
    """Download required files from a HuggingFace repository"""
    try:
        files = get_hf_repo_files(repo_id, hf_token)
        logger.info(f"Found {len(files)} files in repository")
        logger.info(f"Required files: {required_files}")

        headers = {}
        if hf_token:
            headers["Authorization"] = f"Bearer {hf_token}"

        for file_name in required_files:
            file_info = next((f for f in files if f['path'] == file_name), None)
            if not file_info:
                logger.error(f"Required file {file_name} not found in repository. Available files: {[f['path'] for f in files]}")
                raise ValueError(f"Required file {file_name} not found in repository {repo_id}")

            download_url = f"https://huggingface.co/{repo_id}/resolve/main/{file_name}"
            file_path = model_path / file_name

            logger.info(f"Downloading {file_name} ({file_info.get('size', 'unknown size')}) from {download_url}")
            download_file_with_progress(download_url, file_path, headers)

    except Exception as e:
        logger.error(f"Failed to download model: {str(e)}", exc_info=True)
        # Clean up any partially downloaded files
        if model_path.exists():
            import shutil
            shutil.rmtree(model_path)
        raise ValueError(f"Failed to download model: {str(e)}")

def find_best_gguf_file(files: List[Dict]) -> Optional[Dict]:
    """Find the best GGUF file from a list of files, preferring q4_k_m files and sorting by size"""
    gguf_files = [f for f in files if f.get('path', '').endswith('.gguf')]
    if not gguf_files:
        return None

    # Sort by preference for q4_k_m files and then by size
    gguf_files.sort(key=lambda x: (
        0 if 'q4_k_m' in x['path'].lower() else 1,
        x.get('size', float('inf'))
    ))
    return gguf_files[0]

def download_gguf_model(repo_id: str, model_path: Path, hf_token: Optional[str] = None) -> Path:
    """Download a GGUF model from HuggingFace"""
    try:
        files = get_hf_repo_files(repo_id, hf_token)
        file_info = find_best_gguf_file(files)
        if not file_info:
            raise ValueError(f"No GGUF files found in repository {repo_id}")

        file_name = file_info['path']
        download_url = f"https://huggingface.co/{repo_id}/resolve/main/{file_name}"
        model_path = model_path / file_name

        # Only download if file doesn't exist or is empty
        if not model_path.exists() or model_path.stat().st_size == 0:
            headers = {"Authorization": f"Bearer {hf_token}"} if hf_token else {}
            download_file_with_progress(download_url, model_path, headers)

        return model_path
    except Exception as e:
        if model_path.exists() and model_path.stat().st_size == 0:
            model_path.unlink()
        raise ValueError(f"Failed to download GGUF model: {str(e)}")


================================================
FILE: Backend/src/models/utils/platform.py
================================================
import platform
from typing import Tuple


def check_platform_compatibility(model_type: str) -> Tuple[bool, str]:
    """
    Check if the model type is compatible with the current platform
    Returns (is_compatible, message)
    """
    current_platform = platform.system().lower()

    platform_compatibility = {
        'TensorRT-LLM': ['linux'],  # TensorRT only works on Linux
        # ExLlama works on Windows and Linux
        'ExLlamav2': ['windows', 'linux'],
        'ExLlamav2_HF': ['windows', 'linux'],
        # HQQ works on all platforms
        'HQQ': ['linux', 'windows', 'darwin'],
        # llama.cpp works on all platforms
        'llama.cpp': ['linux', 'windows', 'darwin'],
        'llamacpp_HF': ['linux', 'windows', 'darwin'],
        # Transformers works on all platforms
        'Transformers': ['linux', 'windows', 'darwin'],
        'ollama': ['linux', 'windows', 'darwin']
    }

    compatible_platforms = platform_compatibility.get(model_type, [])
    is_compatible = current_platform in compatible_platforms

    if not is_compatible:
        message = f"Model type '{model_type}' is not compatible with {platform.system()}. Compatible platforms: {', '.join(compatible_platforms)}"
    else:
        message = f"Model type '{model_type}' is compatible with {platform.system()}"

    return is_compatible, message


================================================
FILE: Backend/src/vectorstorage/embeddings.py
================================================
import time


def chunk_list(lst, n):
    """Yield successive n-sized chunks from lst."""
    for i in range(0, len(lst), n):
        yield lst[i:i + n]


def embed_chunk(args):
    """Embed a chunk of documents."""
    vectordb, chunk, chunk_num, total_chunks, start_time, time_history = args
    try:
        vectordb.add_documents(chunk)

        # Calculate time taken for this chunk
        current_time = time.time()
        chunk_time = current_time - start_time
        time_history.append(chunk_time)

        # Keep only last 5 times
        if len(time_history) > 5:
            time_history.popleft()

        # Basic stats to return for all chunks
        result = {
            "chunk": chunk_num,
            "total_chunks": total_chunks,
            "docs_in_chunk": len(chunk),
            "percent_complete": round((chunk_num / total_chunks * 100), 2),
            "elapsed_time": current_time - start_time,
        }

        # Only add time estimates after 20 chunks and if we have enough data points
        if chunk_num >= 20 and len(time_history) >= 3:
            current_avg_time = sum(time_history) / len(time_history)

            # Store the lowest average time seen so far
            if not hasattr(embed_chunk, 'lowest_avg_time') or current_avg_time < embed_chunk.lowest_avg_time:
                embed_chunk.lowest_avg_time = current_avg_time

            remaining_chunks = total_chunks - chunk_num
            est_remaining_time = remaining_chunks * embed_chunk.lowest_avg_time
            est_finish_time = time.strftime(
                '%H:%M:%S', time.localtime(current_time + est_remaining_time))
            est_remaining_time_formatted = time.strftime(
                '%H:%M:%S', time.gmtime(est_remaining_time))

            result.update({
                "est_finish_time": est_finish_time,
                "time_per_chunk": embed_chunk.lowest_avg_time,
                "remaining_chunks": remaining_chunks,
                "est_remaining_time": est_remaining_time_formatted
            })
        else:
            result.update({
                "est_finish_time": "calculating...",
                "time_per_chunk": "calculating...",
                "remaining_chunks": total_chunks - chunk_num,
                "est_remaining_time": "calculating..."
            })

        return result
    except Exception as e:
        raise Exception(
            f"Error embedding chunk {chunk_num}/{total_chunks}: {str(e)}")


================================================
FILE: Backend/src/vectorstorage/helpers/sanitizeCollectionName.py
================================================
import re


def sanitize_collection_name(name):
    try:
        sanitized = re.sub(r'[^\w\-]', '_', name)
        sanitized = re.sub(r'^[^\w]|[^\w]$', '', sanitized)
        sanitized = re.sub(r'\.{2,}', '_', sanitized)

        if len(sanitized) < 3:
            sanitized = sanitized.ljust(3, "_")
        elif len(sanitized) > 63:
            sanitized = sanitized[:63]
        return sanitized
    except Exception as e:
        print(f"Error sanitizing collection name: {str(e)}")
        return None


================================================
FILE: Backend/src/vectorstorage/init_store.py
================================================
from langchain_huggingface import HuggingFaceEmbeddings
import logging
import torch
import os
from pathlib import Path

logger = logging.getLogger(__name__)

def get_models_dir():
    if os.name == 'posix':
        # For Linux, use ~/.local/share/Notate/models
        if os.uname().sysname == 'Linux':
            base_dir = os.path.expanduser('~/.local/share/Notate')
        # For macOS, use ~/Library/Application Support/Notate/models
        else:
            base_dir = os.path.expanduser('~/Library/Application Support/Notate')
    else:
        # For Windows, u
Download .txt
gitextract_jmk8mdb7/

├── .gitignore
├── Backend/
│   ├── .gitignore
│   ├── ensure_dependencies.py
│   ├── main.py
│   ├── requirements.txt
│   ├── src/
│   │   ├── authentication/
│   │   │   ├── api_key_authorization.py
│   │   │   └── token.py
│   │   ├── data/
│   │   │   ├── dataFetch/
│   │   │   │   ├── webcrawler.py
│   │   │   │   └── youtube.py
│   │   │   ├── dataIntake/
│   │   │   │   ├── csvFallbackSplitting.py
│   │   │   │   ├── fileTypes/
│   │   │   │   │   └── loadX.py
│   │   │   │   ├── getHtmlFiles.py
│   │   │   │   ├── loadFile.py
│   │   │   │   └── textSplitting.py
│   │   │   └── database/
│   │   │       ├── checkAPIKey.py
│   │   │       ├── db.py
│   │   │       ├── getCollectionInfo.py
│   │   │       └── getLLMApiKey.py
│   │   ├── endpoint/
│   │   │   ├── api.py
│   │   │   ├── deleteStore.py
│   │   │   ├── devApiCall.py
│   │   │   ├── embed.py
│   │   │   ├── models.py
│   │   │   ├── ragQuery.py
│   │   │   ├── transcribe.py
│   │   │   ├── vectorQuery.py
│   │   │   └── webcrawl.py
│   │   ├── llms/
│   │   │   ├── llmQuery.py
│   │   │   ├── messages/
│   │   │   │   └── formMessages.py
│   │   │   └── providers/
│   │   │       ├── local.py
│   │   │       ├── ollama.py
│   │   │       ├── ooba.py
│   │   │       └── openai.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── exceptions.py
│   │   │   ├── loaders/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base.py
│   │   │   │   ├── exllama.py
│   │   │   │   ├── hqq.py
│   │   │   │   ├── llamaccphf.py
│   │   │   │   ├── llamacpp.py
│   │   │   │   ├── tensorrt.py
│   │   │   │   └── transformers.py
│   │   │   ├── manager.py
│   │   │   ├── streamer.py
│   │   │   └── utils/
│   │   │       ├── __init__.py
│   │   │       ├── detect_type.py
│   │   │       ├── device.py
│   │   │       ├── download.py
│   │   │       └── platform.py
│   │   ├── vectorstorage/
│   │   │   ├── embeddings.py
│   │   │   ├── helpers/
│   │   │   │   └── sanitizeCollectionName.py
│   │   │   ├── init_store.py
│   │   │   └── vectorstore.py
│   │   └── voice/
│   │       └── voice_to_text.py
│   └── tests/
│       ├── testApi.py
│       └── test_voice.py
├── Frontend/
│   ├── .gitignore
│   ├── build/
│   │   └── icons/
│   │       └── icon.icns
│   ├── components.json
│   ├── e2e/
│   │   └── app.spec.ts
│   ├── electron-builder.json
│   ├── eslint.config.js
│   ├── index.html
│   ├── package.json
│   ├── playwright.config.ts
│   ├── postcss.config.js
│   ├── src/
│   │   ├── app/
│   │   │   ├── App.tsx
│   │   │   ├── index.css
│   │   │   ├── main.tsx
│   │   │   └── vite-env.d.ts
│   │   ├── components/
│   │   │   ├── AppAlert/
│   │   │   │   └── SettingsAlert.tsx
│   │   │   ├── Authentication/
│   │   │   │   ├── CreateAccount.tsx
│   │   │   │   └── SelectAccount.tsx
│   │   │   ├── Chat/
│   │   │   │   ├── Chat.tsx
│   │   │   │   └── ChatComponents/
│   │   │   │       ├── ChatHeader.tsx
│   │   │   │       ├── ChatInput.tsx
│   │   │   │       ├── ChatMessage.tsx
│   │   │   │       ├── ChatMessagesArea.tsx
│   │   │   │       ├── LoadingIndicator.tsx
│   │   │   │       ├── NewConvoWelcome.tsx
│   │   │   │       ├── ReasoningMessage.tsx
│   │   │   │       ├── StreamingMessage.tsx
│   │   │   │       ├── StreamingReasoningMessage.tsx
│   │   │   │       ├── SyntaxHightlightedCode.tsx
│   │   │   │       └── suggestions.tsx
│   │   │   ├── CollectionModals/
│   │   │   │   ├── CollectionComponents/
│   │   │   │   │   ├── AddLibrary.tsx
│   │   │   │   │   ├── DataStoreSelect.tsx
│   │   │   │   │   ├── FIlesInCollection.tsx
│   │   │   │   │   ├── Ingest.tsx
│   │   │   │   │   ├── IngestProgress.tsx
│   │   │   │   │   ├── IngestTabs/
│   │   │   │   │   │   ├── FileIngestTab.tsx
│   │   │   │   │   │   └── LinkIngestTab.tsx
│   │   │   │   │   └── ingestTypes.tsx
│   │   │   │   └── LibraryModal.tsx
│   │   │   ├── FileExplorer/
│   │   │   │   └── FileExplorer.tsx
│   │   │   ├── Header/
│   │   │   │   ├── Header.tsx
│   │   │   │   └── HeaderComponents/
│   │   │   │       ├── MainWindowControl.tsx
│   │   │   │       ├── Search.tsx
│   │   │   │       ├── SettingsDialog.tsx
│   │   │   │       ├── ToolsDialog.tsx
│   │   │   │       └── WinLinuxControls.tsx
│   │   │   ├── History/
│   │   │   │   └── History.tsx
│   │   │   ├── SettingsModal/
│   │   │   │   ├── SettingsComponents/
│   │   │   │   │   ├── ChatSettings.tsx
│   │   │   │   │   ├── DevIntegration.tsx
│   │   │   │   │   ├── LLMModels/
│   │   │   │   │   │   ├── AddLocalModel.tsx
│   │   │   │   │   │   ├── AddOllamaModel.tsx
│   │   │   │   │   │   ├── AzureOpenAI.tsx
│   │   │   │   │   │   ├── CustomLLM.tsx
│   │   │   │   │   │   ├── External.tsx
│   │   │   │   │   │   ├── ExternalOllama.tsx
│   │   │   │   │   │   ├── LocalLLM.tsx
│   │   │   │   │   │   ├── Ollama.tsx
│   │   │   │   │   │   └── Openrouter.tsx
│   │   │   │   │   ├── LLMPanel.tsx
│   │   │   │   │   └── providers/
│   │   │   │   │       ├── SvgIcon.tsx
│   │   │   │   │       ├── defaultsProviderModels.tsx
│   │   │   │   │       └── providerIcons.tsx
│   │   │   │   └── SettingsModal.tsx
│   │   │   ├── Tools/
│   │   │   │   ├── ToolComponents/
│   │   │   │   │   ├── AddTools.tsx
│   │   │   │   │   └── EnableTools.tsx
│   │   │   │   └── Tools.tsx
│   │   │   └── ui/
│   │   │       ├── alert.tsx
│   │   │       ├── avatar.tsx
│   │   │       ├── badge.tsx
│   │   │       ├── button.tsx
│   │   │       ├── buttonVariants.tsx
│   │   │       ├── card.tsx
│   │   │       ├── command.tsx
│   │   │       ├── dialog.tsx
│   │   │       ├── form.tsx
│   │   │       ├── icons.tsx
│   │   │       ├── input.tsx
│   │   │       ├── label.tsx
│   │   │       ├── menubar.tsx
│   │   │       ├── popover.tsx
│   │   │       ├── progress.tsx
│   │   │       ├── radio-group.tsx
│   │   │       ├── scroll-area.tsx
│   │   │       ├── select.tsx
│   │   │       ├── separator.tsx
│   │   │       ├── sheet.tsx
│   │   │       ├── slider.tsx
│   │   │       ├── switch.tsx
│   │   │       ├── tabs.tsx
│   │   │       ├── textarea.tsx
│   │   │       ├── toast.tsx
│   │   │       ├── toaster.tsx
│   │   │       └── tooltip.tsx
│   │   ├── context/
│   │   │   ├── ChatInputContext.tsx
│   │   │   ├── LibraryContext.tsx
│   │   │   ├── SysSettingsContext.tsx
│   │   │   ├── UserClientProviders.tsx
│   │   │   ├── UserContext.tsx
│   │   │   ├── ViewContext.tsx
│   │   │   ├── useChatInput.tsx
│   │   │   ├── useLibrary.tsx
│   │   │   ├── useSysSettings.tsx
│   │   │   ├── useUser.tsx
│   │   │   └── useView.tsx
│   │   ├── data/
│   │   │   ├── models.ts
│   │   │   └── sysSpecs.ts
│   │   ├── electron/
│   │   │   ├── authentication/
│   │   │   │   ├── devApi.ts
│   │   │   │   ├── secret.ts
│   │   │   │   └── token.ts
│   │   │   ├── crawl/
│   │   │   │   ├── cancelWebcrawl.ts
│   │   │   │   └── webcrawl.ts
│   │   │   ├── db.ts
│   │   │   ├── embedding/
│   │   │   │   ├── cancelEmbed.ts
│   │   │   │   └── vectorstoreQuery.ts
│   │   │   ├── handlers/
│   │   │   │   ├── azureHandlers.ts
│   │   │   │   ├── chatHandlers.ts
│   │   │   │   ├── closeEventHandler.ts
│   │   │   │   ├── collectionHandlers.ts
│   │   │   │   ├── customApiHandlers.ts
│   │   │   │   ├── dbHandlers.ts
│   │   │   │   ├── fileHandlers.ts
│   │   │   │   ├── handlers.test.ts
│   │   │   │   ├── ipcHandlers.ts
│   │   │   │   ├── localModelHandlers.ts
│   │   │   │   ├── menuHandlers.ts
│   │   │   │   ├── ollamaHandlers.ts
│   │   │   │   ├── openRouterHandlers.ts
│   │   │   │   └── voiceHandlers.ts
│   │   │   ├── helpers/
│   │   │   │   └── spawnAsync.ts
│   │   │   ├── llms/
│   │   │   │   ├── agentLayer/
│   │   │   │   │   ├── anthropicAgent.ts
│   │   │   │   │   ├── geminiAgent.ts
│   │   │   │   │   ├── ollamaAgent.ts
│   │   │   │   │   ├── openAiAgent.ts
│   │   │   │   │   └── tools/
│   │   │   │   │       └── websearch.ts
│   │   │   │   ├── apiCheckProviders/
│   │   │   │   │   ├── anthropic.ts
│   │   │   │   │   ├── deepseek.ts
│   │   │   │   │   ├── gemini.ts
│   │   │   │   │   ├── openai.ts
│   │   │   │   │   ├── openrouter.ts
│   │   │   │   │   └── xai.ts
│   │   │   │   ├── chatCompletion.ts
│   │   │   │   ├── generateTitle.ts
│   │   │   │   ├── keyValidation.ts
│   │   │   │   ├── llmHelpers/
│   │   │   │   │   ├── addAssistantMessage.ts
│   │   │   │   │   ├── addUserMessage.ts
│   │   │   │   │   ├── collectionData.ts
│   │   │   │   │   ├── countMessageTokens.ts
│   │   │   │   │   ├── getUserPrompt.ts
│   │   │   │   │   ├── ifNewConvo.ts
│   │   │   │   │   ├── prepMessages.ts
│   │   │   │   │   ├── providerInit.ts
│   │   │   │   │   ├── providersMap.ts
│   │   │   │   │   ├── returnReasoningPrompt.ts
│   │   │   │   │   ├── returnSystemPrompt.ts
│   │   │   │   │   ├── sendMessageChunk.ts
│   │   │   │   │   └── truncateMessages.ts
│   │   │   │   ├── llms.ts
│   │   │   │   ├── providers/
│   │   │   │   │   ├── anthropic.ts
│   │   │   │   │   ├── azureOpenAI.ts
│   │   │   │   │   ├── customEndpoint.ts
│   │   │   │   │   ├── deepseek.ts
│   │   │   │   │   ├── externalOllama.ts
│   │   │   │   │   ├── gemini.ts
│   │   │   │   │   ├── localModel.ts
│   │   │   │   │   ├── ollama.ts
│   │   │   │   │   ├── openai.ts
│   │   │   │   │   ├── openrouter.ts
│   │   │   │   │   └── xai.ts
│   │   │   │   └── reasoningLayer/
│   │   │   │       └── openAiChainOfThought.ts
│   │   │   ├── loadingWindow.ts
│   │   │   ├── localLLMs/
│   │   │   │   ├── getDirModels.ts
│   │   │   │   ├── loadModel.ts
│   │   │   │   ├── modelInfo.ts
│   │   │   │   └── unloadModel.ts
│   │   │   ├── main.ts
│   │   │   ├── mainWindow.test.ts
│   │   │   ├── mainWindow.ts
│   │   │   ├── menu.ts
│   │   │   ├── ollama/
│   │   │   │   ├── checkOllama.ts
│   │   │   │   ├── fetchLocalModels.ts
│   │   │   │   ├── getRunningModels.ts
│   │   │   │   ├── isOllamaRunning.ts
│   │   │   │   ├── ollamaPath.ts
│   │   │   │   ├── pullModel.ts
│   │   │   │   ├── runOllama.ts
│   │   │   │   ├── unloadAllModels.ts
│   │   │   │   └── unloadModel.ts
│   │   │   ├── pathResolver.ts
│   │   │   ├── preload.cts
│   │   │   ├── python/
│   │   │   │   ├── ensurePythonAndVenv.ts
│   │   │   │   ├── extractFromAsar.ts
│   │   │   │   ├── getLinuxPackageManager.ts
│   │   │   │   ├── ifFedora.ts
│   │   │   │   ├── installDependencies.ts
│   │   │   │   ├── installLlamaCpp.ts
│   │   │   │   ├── killProcessOnPort.ts
│   │   │   │   ├── python.test.ts
│   │   │   │   ├── runWithPrivileges.ts
│   │   │   │   └── startAndStopPython.ts
│   │   │   ├── resourceManager.ts
│   │   │   ├── specs/
│   │   │   │   └── systemSpecs.ts
│   │   │   ├── storage/
│   │   │   │   ├── deleteCollection.ts
│   │   │   │   ├── getFiles.ts
│   │   │   │   ├── getUserFiles.ts
│   │   │   │   ├── newFile.ts
│   │   │   │   ├── openCollectionFolder.ts
│   │   │   │   ├── removeFileorFolder.ts
│   │   │   │   ├── renameFile.ts
│   │   │   │   └── websiteFetch.ts
│   │   │   ├── tray.test.ts
│   │   │   ├── tray.ts
│   │   │   ├── tsconfig.json
│   │   │   ├── util.ts
│   │   │   ├── voice/
│   │   │   │   └── audioTranscription.ts
│   │   │   └── youtube/
│   │   │       └── youtubeIngest.ts
│   │   ├── hooks/
│   │   │   ├── use-toast.ts
│   │   │   ├── useAppInitialization.tsx
│   │   │   ├── useChatLogic.ts
│   │   │   ├── useChatManagement.ts
│   │   │   ├── useConversationManagement.ts
│   │   │   ├── useModelManagement.ts
│   │   │   ├── useStatistics.tsx
│   │   │   └── useUIState.ts
│   │   ├── lib/
│   │   │   ├── shikiHightlight.ts
│   │   │   └── utils.ts
│   │   ├── loading.html
│   │   ├── types/
│   │   │   └── contextTypes/
│   │   │       ├── LibraryContextTypes.ts
│   │   │       ├── SystemSettingsTypes.ts
│   │   │       ├── UserContextType.ts
│   │   │       └── UserViewTypes.ts
│   │   └── utils/
│   │       ├── chatUtilts.ts
│   │       └── webAudioRecorder.ts
│   ├── tailwind.config.js
│   ├── tsconfig.app.json
│   ├── tsconfig.json
│   ├── tsconfig.node.json
│   ├── types.d.ts
│   ├── vite.config.d.ts
│   ├── vite.config.js
│   └── vite.config.ts
├── LICENSE
└── README.md
Download .txt
SYMBOL INDEX (573 symbols across 217 files)

FILE: Backend/ensure_dependencies.py
  function find_python310 (line 18) | def find_python310():
  function create_venv (line 37) | def create_venv(venv_path=None):
  function get_venv_python (line 56) | def get_venv_python(venv_path):
  function install_package (line 62) | def install_package(python_path, package):
  function get_installed_packages (line 75) | def get_installed_packages(python_path):
  function async_init_store (line 85) | async def async_init_store():
  function get_package_version (line 119) | def get_package_version(python_path, package_name):
  function install_requirements (line 139) | def install_requirements(custom_venv_path=None):

FILE: Backend/main.py
  function timeout_middleware (line 49) | async def timeout_middleware(request: Request, call_next):
  function chat_completion (line 65) | async def chat_completion(request: ChatCompletionRequest, user_id: str =...
  function get_model_info (line 88) | async def get_model_info(user_id: str = Depends(verify_token_or_api_key)):
  function load_model_endpoint (line 96) | async def load_model_endpoint(request: ModelLoadRequest, user_id: str = ...
  function unload_model_endpoint (line 135) | async def unload_model_endpoint(user_id: str = Depends(verify_token_or_a...
  function webcrawl_endpoint (line 159) | async def webcrawl_endpoint(data: WebCrawlRequest, user_id: str = Depend...
  function transcribe_audio_endpoint (line 197) | async def transcribe_audio_endpoint(audio_file: UploadFile = File(...), ...
  function add_embedding (line 204) | async def add_embedding(data: EmbeddingRequest, user_id: str = Depends(v...
  function youtube_ingest (line 253) | async def youtube_ingest(data: YoutubeTranscriptRequest, user_id: str = ...
  function cancel_embedding (line 273) | async def cancel_embedding(user_id: str = Depends(verify_token)):
  function restart_server (line 284) | async def restart_server(user_id: str = Depends(verify_token)):
  function vector_query (line 305) | async def vector_query(data: VectorStoreQueryRequest, user_id: str = Dep...
  function delete_collection (line 317) | async def delete_collection(data: DeleteCollectionRequest, user_id: str ...
  function api_vector (line 325) | async def api_vector(query_request: QueryRequest, user_id: str = Depends...
  function api_llm (line 340) | async def api_llm(query_request: ChatCompletionRequest, user_id: str = D...
  function api_rag (line 355) | async def api_rag(query_request: QueryRequest, user_id: str = Depends(ap...
  function cancel_crawl (line 373) | async def cancel_crawl(user_id: str = Depends(verify_token)):

FILE: Backend/src/authentication/api_key_authorization.py
  function get_optional_token (line 19) | async def get_optional_token(token: Optional[str] = Depends(oauth2_schem...
  function api_key_auth (line 23) | async def api_key_auth(token: Optional[str] = Depends(get_optional_token)):

FILE: Backend/src/authentication/token.py
  function get_optional_token (line 19) | async def get_optional_token(token: Optional[str] = Depends(oauth2_schem...
  function verify_token (line 23) | async def verify_token(token: Optional[str] = Depends(get_optional_token)):
  function optional_auth (line 40) | async def optional_auth(request: Request):
  function verify_token_or_api_key (line 51) | async def verify_token_or_api_key(token: Optional[str] = Depends(get_opt...

FILE: Backend/src/data/dataFetch/webcrawler.py
  class WebCrawler (line 15) | class WebCrawler:
    method __init__ (line 16) | def __init__(self, base_url, user_id, user_name, collection_id, collec...
    method _get_collection_path (line 41) | def _get_collection_path(self, user_id, user_name, collection_id, coll...
    method _print_progress (line 54) | def _print_progress(self):
    method is_valid_url (line 71) | def is_valid_url(self, url):
    method save_page (line 101) | def save_page(self, url, html_content):
    method get_links (line 133) | def get_links(self, soup, current_url):
    method scrape_page (line 156) | def scrape_page(self, url):
    method scrape (line 188) | def scrape(self):
    method _update_progress (line 253) | def _update_progress(self, future):
    method _process_url (line 271) | def _process_url(self, url):
    method save_progress (line 304) | def save_progress(self):

FILE: Backend/src/data/dataFetch/youtube.py
  function _get_collection_path (line 22) | def _get_collection_path(user_id, user_name, collection_id, collection_n...
  function youtube_transcript (line 36) | def youtube_transcript(request: YoutubeTranscriptRequest) -> Generator[d...
  function _time_to_seconds (line 331) | def _time_to_seconds(time_str):

FILE: Backend/src/data/dataIntake/csvFallbackSplitting.py
  function split_csv_text (line 8) | def split_csv_text(text: str, file_path: str, metadata: dict = None) -> ...

FILE: Backend/src/data/dataIntake/fileTypes/loadX.py
  function load_pdf (line 15) | async def load_pdf(file_path):
  function load_py (line 57) | async def load_py(file):
  function load_docx (line 67) | async def load_docx(file):
  function load_txt (line 78) | async def load_txt(file):
  function load_md (line 87) | async def load_md(file):
  function load_html (line 99) | async def load_html(file_path: str) -> str:
  function load_csv (line 131) | async def load_csv(file):
  function load_json (line 141) | async def load_json(file):
  function load_pptx (line 151) | def load_pptx(file):
  function load_xlsx (line 165) | def load_xlsx(file):
  function load_docx (line 174) | async def load_docx(file):

FILE: Backend/src/data/dataIntake/getHtmlFiles.py
  function get_html_files (line 4) | def get_html_files(directory):

FILE: Backend/src/data/dataIntake/loadFile.py
  function load_document (line 32) | async def load_document(file: str):

FILE: Backend/src/data/dataIntake/textSplitting.py
  function split_text (line 6) | def split_text(text: str, file_path: str, metadata: dict = None) -> list:

FILE: Backend/src/data/database/checkAPIKey.py
  function check_api_key (line 4) | def check_api_key(user_id: int):

FILE: Backend/src/data/database/db.py
  function get_user_data_path (line 9) | def get_user_data_path():
  function db (line 27) | def db():

FILE: Backend/src/data/database/getCollectionInfo.py
  class CollectionSettings (line 7) | class CollectionSettings:
  function get_collection_settings (line 19) | def get_collection_settings(user_id: str, collection_name: str) -> Optio...

FILE: Backend/src/data/database/getLLMApiKey.py
  function get_llm_api_key (line 4) | def get_llm_api_key(user_id, provider):

FILE: Backend/src/endpoint/api.py
  function chat_completion_stream (line 18) | async def chat_completion_stream(request: ChatCompletionRequest) -> Asyn...

FILE: Backend/src/endpoint/deleteStore.py
  function delete_vectorstore_collection (line 8) | def delete_vectorstore_collection(data: DeleteCollectionRequest):

FILE: Backend/src/endpoint/devApiCall.py
  function vector_call (line 10) | def vector_call(query_request: VectorStoreQueryRequest, user_id: str):
  function rag_call (line 37) | async def rag_call(query_request: VectorStoreQueryRequest, user_id: str):
  function llm_call (line 69) | async def llm_call(query_request: ChatCompletionRequest, user_id: str):

FILE: Backend/src/endpoint/embed.py
  function embed (line 19) | async def embed(data: EmbeddingRequest) -> AsyncGenerator[dict, None]:

FILE: Backend/src/endpoint/models.py
  class EmbeddingRequest (line 5) | class EmbeddingRequest(BaseModel):
  class ModelLoadRequest (line 16) | class ModelLoadRequest(BaseModel):
    class Config (line 74) | class Config:
  class VectorStoreQueryRequest (line 77) | class VectorStoreQueryRequest(BaseModel):
  class YoutubeTranscriptRequest (line 99) | class YoutubeTranscriptRequest(BaseModel):
  class DeleteCollectionRequest (line 110) | class DeleteCollectionRequest(BaseModel):
  class WebCrawlRequest (line 117) | class WebCrawlRequest(BaseModel):
  class QueryRequest (line 129) | class QueryRequest(BaseModel):
  class Message (line 148) | class Message(BaseModel):
  class ChatCompletionRequest (line 155) | class ChatCompletionRequest(BaseModel):
  class GenerateRequest (line 173) | class GenerateRequest(BaseModel):

FILE: Backend/src/endpoint/ragQuery.py
  function rag_query (line 6) | async def rag_query(data: VectorStoreQueryRequest, collectionInfo):

FILE: Backend/src/endpoint/transcribe.py
  function transcribe_audio (line 12) | async def transcribe_audio(audio_file: UploadFile = File(...), model_nam...

FILE: Backend/src/endpoint/vectorQuery.py
  function query_vectorstore (line 6) | def query_vectorstore(data: VectorStoreQueryRequest, is_local: bool):

FILE: Backend/src/endpoint/webcrawl.py
  function webcrawl (line 15) | def webcrawl(data: WebCrawlRequest, cancel_event=None) -> Generator[dict...

FILE: Backend/src/llms/llmQuery.py
  function llm_query (line 9) | async def llm_query(data: ChatCompletionRequest, api_key: Optional[str] ...

FILE: Backend/src/llms/messages/formMessages.py
  function form_messages (line 4) | def form_messages(data: QueryRequest):

FILE: Backend/src/llms/providers/local.py
  function local_query (line 13) | async def local_query(data: ChatCompletionRequest):

FILE: Backend/src/llms/providers/ollama.py
  function ollama_query (line 8) | def ollama_query(data: QueryRequest, messages: list = None):

FILE: Backend/src/llms/providers/ooba.py
  function ooba_query (line 5) | def ooba_query(data: QueryRequest, messages: list = None):

FILE: Backend/src/llms/providers/openai.py
  function openai_query (line 6) | def openai_query(data: QueryRequest, api_key: Optional[str] = None, mess...

FILE: Backend/src/models/exceptions.py
  class ModelLoadError (line 1) | class ModelLoadError(Exception):
  class ModelNotFoundError (line 5) | class ModelNotFoundError(Exception):
  class ModelDownloadError (line 9) | class ModelDownloadError(Exception):

FILE: Backend/src/models/loaders/base.py
  class BaseLoader (line 13) | class BaseLoader(ABC):
    method __init__ (line 26) | def __init__(self, request: ModelLoadRequest, manager: Any):
    method load (line 39) | def load(self) -> Tuple[Any, Any]:
    method get_metadata (line 52) | def get_metadata(self) -> Optional[Dict[str, Any]]:
    method get_config (line 62) | def get_config(self) -> Dict[str, Any]:
    method _resolve_model_path (line 71) | def _resolve_model_path(self) -> Path:
    method get_request_dict (line 94) | def get_request_dict(self) -> Dict[str, Any]:
    method log_loading_info (line 103) | def log_loading_info(self) -> None:
    method cleanup (line 111) | def cleanup(model: Any) -> None:
    method validate_model_path (line 125) | def validate_model_path(self) -> None:
    method get_common_metadata (line 136) | def get_common_metadata(self) -> Dict[str, Any]:
    method validate_request (line 151) | def validate_request(self) -> None:
    method check_dependencies (line 164) | def check_dependencies(self) -> None:
    method prepare_loading (line 173) | def prepare_loading(self) -> None:
    method get_device_config (line 192) | def get_device_config(self) -> Dict[str, Any]:
    method get_memory_info (line 208) | def get_memory_info(self) -> Dict[str, Any]:
    method get_system_info (line 226) | def get_system_info(self) -> Dict[str, Any]:
    method log_error (line 243) | def log_error(self, error: Exception, context: str = "") -> None:
    method __repr__ (line 254) | def __repr__(self) -> str:

FILE: Backend/src/models/loaders/exllama.py
  class ExLlamaV2Loader (line 11) | class ExLlamaV2Loader(BaseLoader):
    method load (line 14) | def load(self) -> Tuple[Any, Any]:
    method get_metadata (line 66) | def get_metadata(self) -> Optional[Dict[str, Any]]:
    method get_config (line 76) | def get_config(self) -> Dict[str, Any]:
  class ExLlamaV2HFLoader (line 88) | class ExLlamaV2HFLoader(BaseLoader):
    method load (line 91) | def load(self) -> Tuple[Any, Any]:
    method get_metadata (line 104) | def get_metadata(self) -> Optional[Dict[str, Any]]:
    method get_config (line 108) | def get_config(self) -> Dict[str, Any]:

FILE: Backend/src/models/loaders/hqq.py
  class HQQLoader (line 13) | class HQQLoader(BaseLoader):
    method load (line 16) | def load(self) -> Tuple[Any, Any]:
    method _download_model (line 60) | def _download_model(self) -> None:
    method get_metadata (line 126) | def get_metadata(self) -> Optional[Dict[str, Any]]:
    method get_config (line 137) | def get_config(self) -> Dict[str, Any]:

FILE: Backend/src/models/loaders/llamaccphf.py
  class LlamaCppHFLoader (line 6) | class LlamaCppHFLoader(LlamaCppLoader):
    method load (line 12) | def load(self) -> Tuple[Any, Any]:

FILE: Backend/src/models/loaders/llamacpp.py
  class LlamaCppLoader (line 16) | class LlamaCppLoader(BaseLoader):
    method __init__ (line 22) | def __init__(self, request: ModelLoadRequest, manager: Any):
    method load (line 27) | def load(self) -> Tuple[Any, Any]:
    method _get_model_path (line 96) | def _get_model_path(self) -> Path:
    method _download_model (line 197) | def _download_model(self, model_dir: Path) -> Path:
    method _download_file (line 240) | def _download_file(self, url: str, path: Path, headers: Dict[str, str]...
    method _get_model_params (line 259) | def _get_model_params(self) -> Dict[str, Any]:
    method _configure_gpu_layers (line 317) | def _configure_gpu_layers(self) -> int:
    method _setup_cache (line 335) | def _setup_cache(self, model: Any) -> None:
    method get_metadata (line 349) | def get_metadata(self) -> Optional[Dict[str, Any]]:
    method get_config (line 383) | def get_config(self) -> Dict[str, Any]:
    method cleanup (line 394) | def cleanup(model: Any) -> None:

FILE: Backend/src/models/loaders/tensorrt.py
  class TensorRTLoader (line 11) | class TensorRTLoader(BaseLoader):
    method load (line 14) | def load(self) -> Tuple[Any, Any]:
    method get_metadata (line 45) | def get_metadata(self) -> Optional[Dict[str, Any]]:
    method get_config (line 56) | def get_config(self) -> Dict[str, Any]:

FILE: Backend/src/models/loaders/transformers.py
  class TransformersLoader (line 16) | class TransformersLoader(BaseLoader):
    method load (line 22) | def load(self) -> Tuple[Any, Any]:
    method _get_model_kwargs (line 123) | def _get_model_kwargs(self) -> Dict[str, Any]:
    method _get_quantization_config (line 178) | def _get_quantization_config(self) -> BitsAndBytesConfig:
    method get_metadata (line 188) | def get_metadata(self) -> Optional[Dict[str, Any]]:
    method get_config (line 208) | def get_config(self) -> Dict[str, Any]:
    method _make_json_serializable (line 231) | def _make_json_serializable(self, obj: Any) -> Any:
    method cleanup (line 242) | def cleanup(model: PreTrainedModel) -> None:

FILE: Backend/src/models/manager.py
  class ModelManager (line 22) | class ModelManager:
    method __init__ (line 28) | def __init__(self):
    method check_platform_compatibility (line 49) | def check_platform_compatibility(self, model_type: str) -> Tuple[bool,...
    method get_model_metadata (line 53) | def get_model_metadata(self, request: ModelLoadRequest) -> Optional[Di...
    method is_model_loaded (line 78) | def is_model_loaded(self) -> bool:
    method get_model_info (line 82) | def get_model_info(self) -> Dict[str, Any]:
    method clear_model (line 100) | def clear_model(self) -> None:
    method _make_json_serializable (line 136) | def _make_json_serializable(self, obj: Any) -> Any:
    method load_model (line 146) | def load_model(self, request: ModelLoadRequest) -> Tuple[Any, Any]:
    method _detect_model_type (line 261) | def _detect_model_type(self, request: ModelLoadRequest) -> str:

FILE: Backend/src/models/streamer.py
  class StopNowException (line 14) | class StopNowException(Exception):
  class StreamingStoppingCriteria (line 18) | class StreamingStoppingCriteria:
    method __init__ (line 21) | def __init__(self):
    method __call__ (line 24) | def __call__(self, input_ids, scores) -> bool:
  class StopOnInterrupt (line 28) | class StopOnInterrupt(StreamingStoppingCriteria):
    method __init__ (line 31) | def __init__(self, stop_signal=None):
    method __call__ (line 35) | def __call__(self, input_ids, scores) -> bool:
  class StreamIterator (line 39) | class StreamIterator(AsyncIterator[str], Iterator[str]):
    method __init__ (line 42) | def __init__(self, func: Callable, callback: Optional[Callable] = None):
    method _queue_callback (line 51) | def _queue_callback(self, data):
    method _start_generation (line 68) | def _start_generation(self):
    method __iter__ (line 83) | def __iter__(self) -> Iterator[str]:
    method __next__ (line 87) | def __next__(self) -> str:
    method __aiter__ (line 96) | def __aiter__(self):
    method __anext__ (line 100) | async def __anext__(self) -> str:
    method __enter__ (line 114) | def __enter__(self):
    method __exit__ (line 117) | def __exit__(self, exc_type, exc_val, exc_tb):
  class TextGenerator (line 121) | class TextGenerator:
    method __init__ (line 124) | def __init__(self, model, tokenizer, device: str = "cpu"):
    method _log_cuda_status (line 131) | def _log_cuda_status(self):
    method _create_stream_response (line 142) | def _create_stream_response(self, text: str, generated_text: str, is_f...
    method _stream_tokens (line 157) | def _stream_tokens(self, callback: Callable, generator, decode_func: C...
    method generate (line 171) | def generate(self,

FILE: Backend/src/models/utils/detect_type.py
  function detect_model_type (line 9) | def detect_model_type(model_path: Union[str, Path]) -> str:

FILE: Backend/src/models/utils/device.py
  function get_device (line 5) | def get_device(request: ModelLoadRequest) -> str:

FILE: Backend/src/models/utils/download.py
  function download_file_with_progress (line 10) | def download_file_with_progress(url: str, file_path: Path, headers: Opti...
  function get_hf_repo_files (line 36) | def get_hf_repo_files(repo_id: str, hf_token: Optional[str] = None) -> L...
  function download_hf_model_files (line 49) | def download_hf_model_files(repo_id: str, model_path: Path, required_fil...
  function find_best_gguf_file (line 80) | def find_best_gguf_file(files: List[Dict]) -> Optional[Dict]:
  function download_gguf_model (line 93) | def download_gguf_model(repo_id: str, model_path: Path, hf_token: Option...

FILE: Backend/src/models/utils/platform.py
  function check_platform_compatibility (line 5) | def check_platform_compatibility(model_type: str) -> Tuple[bool, str]:

FILE: Backend/src/vectorstorage/embeddings.py
  function chunk_list (line 4) | def chunk_list(lst, n):
  function embed_chunk (line 10) | def embed_chunk(args):

FILE: Backend/src/vectorstorage/helpers/sanitizeCollectionName.py
  function sanitize_collection_name (line 4) | def sanitize_collection_name(name):

FILE: Backend/src/vectorstorage/init_store.py
  function get_models_dir (line 9) | def get_models_dir():
  function init_store (line 25) | async def init_store(model_name: str = "HIT-TMG/KaLM-embedding-multiling...

FILE: Backend/src/vectorstorage/vectorstore.py
  function get_app_data_dir (line 12) | def get_app_data_dir():
  function get_vectorstore (line 27) | def get_vectorstore(api_key: str, collection_name: str, use_local_embedd...

FILE: Backend/src/voice/voice_to_text.py
  function initialize_model (line 21) | def initialize_model(model_name: str = "base"):

FILE: Backend/tests/testApi.py
  function test_embed_endpoint (line 8) | def test_embed_endpoint():
  function test_concurrent_embedding (line 22) | def test_concurrent_embedding():
  function test_youtube_ingest (line 43) | def test_youtube_ingest():
  function test_cancel_embedding (line 56) | def test_cancel_embedding():
  function test_query (line 83) | def test_query():

FILE: Backend/tests/test_voice.py
  function create_test_wav (line 13) | def create_test_wav(duration=3.0, frequency=440.0, sample_rate=16000):
  function test_voice_to_text_basic (line 37) | def test_voice_to_text_basic():
  function test_voice_to_text_models (line 58) | def test_voice_to_text_models():
  function test_voice_to_text_invalid_audio (line 79) | def test_voice_to_text_invalid_audio():
  function test_voice_to_text_missing_file (line 98) | def test_voice_to_text_missing_file():
  function test_voice_to_text_long_audio (line 103) | def test_voice_to_text_long_audio():
  function test_voice_to_text_different_frequencies (line 122) | def test_voice_to_text_different_frequencies():
  function record_audio (line 141) | def record_audio(duration=5, sample_rate=16000):
  function test_live_voice_to_text (line 151) | def test_live_voice_to_text(capsys):

FILE: Frontend/e2e/app.spec.ts
  function waitForMainWindow (line 16) | async function waitForMainWindow(timeout = 45000): Promise<Page> {
  function waitForPreloadScript (line 30) | async function waitForPreloadScript(page: Page): Promise<unknown> {
  type MenuItem (line 123) | interface MenuItem {

FILE: Frontend/src/app/App.tsx
  function App (line 14) | function App() {

FILE: Frontend/src/components/AppAlert/SettingsAlert.tsx
  function SettingsAlert (line 10) | function SettingsAlert() {

FILE: Frontend/src/components/Authentication/CreateAccount.tsx
  function CreateAccount (line 18) | function CreateAccount() {

FILE: Frontend/src/components/Authentication/SelectAccount.tsx
  function SelectAccount (line 23) | function SelectAccount({ users }: { users: User[] }) {

FILE: Frontend/src/components/Chat/Chat.tsx
  function Chat (line 12) | function Chat() {

FILE: Frontend/src/components/Chat/ChatComponents/ChatHeader.tsx
  function ChatHeader (line 9) | function ChatHeader() {

FILE: Frontend/src/components/Chat/ChatComponents/ChatMessagesArea.tsx
  function ChatMessagesArea (line 8) | function ChatMessagesArea({

FILE: Frontend/src/components/Chat/ChatComponents/LoadingIndicator.tsx
  function LoadingIndicator (line 4) | function LoadingIndicator() {

FILE: Frontend/src/components/Chat/ChatComponents/NewConvoWelcome.tsx
  function NewConvoWelcome (line 15) | function NewConvoWelcome() {

FILE: Frontend/src/components/Chat/ChatComponents/ReasoningMessage.tsx
  type ReasoningMessageProps (line 20) | interface ReasoningMessageProps {

FILE: Frontend/src/components/Chat/ChatComponents/StreamingMessage.tsx
  function StreamingMessage (line 17) | function StreamingMessage({ content }: { content: string }) {

FILE: Frontend/src/components/Chat/ChatComponents/SyntaxHightlightedCode.tsx
  type SyntaxHighlightedCodeProps (line 6) | interface SyntaxHighlightedCodeProps {
  function SyntaxHighlightedCode (line 11) | function SyntaxHighlightedCode({
  function normalizeLanguage (line 61) | function normalizeLanguage(language: string, code: string): string {

FILE: Frontend/src/components/CollectionModals/CollectionComponents/AddLibrary.tsx
  function AddLibrary (line 28) | function AddLibrary() {

FILE: Frontend/src/components/CollectionModals/CollectionComponents/DataStoreSelect.tsx
  function DataStoreSelect (line 23) | function DataStoreSelect() {

FILE: Frontend/src/components/CollectionModals/CollectionComponents/FIlesInCollection.tsx
  function FilesInCollection (line 22) | function FilesInCollection() {

FILE: Frontend/src/components/CollectionModals/CollectionComponents/Ingest.tsx
  function IngestModal (line 11) | function IngestModal({

FILE: Frontend/src/components/CollectionModals/CollectionComponents/IngestProgress.tsx
  function IngestProgress (line 7) | function IngestProgress({ truncate }: { truncate?: boolean }) {

FILE: Frontend/src/components/CollectionModals/CollectionComponents/IngestTabs/FileIngestTab.tsx
  function FileTab (line 8) | function FileTab() {

FILE: Frontend/src/components/CollectionModals/CollectionComponents/IngestTabs/LinkIngestTab.tsx
  function LinkIngestTab (line 10) | function LinkIngestTab() {

FILE: Frontend/src/components/CollectionModals/LibraryModal.tsx
  function LibraryModal (line 6) | function LibraryModal() {

FILE: Frontend/src/components/FileExplorer/FileExplorer.tsx
  type FileNode (line 24) | interface FileNode {
  type FileItemProps (line 31) | interface FileItemProps {
  function buildFileTree (line 249) | function buildFileTree(files: string[]): FileNode[] {
  function FileExplorer (line 281) | function FileExplorer() {

FILE: Frontend/src/components/Header/Header.tsx
  function Header (line 13) | function Header() {

FILE: Frontend/src/components/Header/HeaderComponents/Search.tsx
  type ConversationWithTimestamp (line 6) | type ConversationWithTimestamp = Conversation & {
  function SearchComponent (line 10) | function SearchComponent() {

FILE: Frontend/src/components/Header/HeaderComponents/SettingsDialog.tsx
  function SettingsDialog (line 15) | function SettingsDialog() {

FILE: Frontend/src/components/Header/HeaderComponents/ToolsDialog.tsx
  function ToolsDialog (line 13) | function ToolsDialog() {

FILE: Frontend/src/components/Header/HeaderComponents/WinLinuxControls.tsx
  function WinLinuxControls (line 12) | function WinLinuxControls() {

FILE: Frontend/src/components/History/History.tsx
  function History (line 16) | function History() {

FILE: Frontend/src/components/SettingsModal/SettingsComponents/ChatSettings.tsx
  function ChatSettings (line 38) | function ChatSettings() {

FILE: Frontend/src/components/SettingsModal/SettingsComponents/DevIntegration.tsx
  type APIKey (line 22) | interface APIKey {
  function DevIntegration (line 29) | function DevIntegration() {

FILE: Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/AddLocalModel.tsx
  function AddLocalModel (line 16) | function AddLocalModel() {

FILE: Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/AddOllamaModel.tsx
  function AddOllamaModel (line 16) | function AddOllamaModel() {

FILE: Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/AzureOpenAI.tsx
  function AzureOpenAI (line 14) | function AzureOpenAI() {

FILE: Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/CustomLLM.tsx
  function CustomLLM (line 7) | function CustomLLM() {

FILE: Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/External.tsx
  type ExternalProps (line 6) | interface ExternalProps {
  function External (line 11) | function External({

FILE: Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/ExternalOllama.tsx
  function ExternalOllama (line 23) | function ExternalOllama() {

FILE: Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/LocalLLM.tsx
  function LocalLLM (line 34) | function LocalLLM() {

FILE: Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/Ollama.tsx
  function Ollama (line 15) | function Ollama() {

FILE: Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/Openrouter.tsx
  function Openrouter (line 7) | function Openrouter() {

FILE: Frontend/src/components/SettingsModal/SettingsComponents/LLMPanel.tsx
  function LLMPanel (line 34) | function LLMPanel() {

FILE: Frontend/src/components/SettingsModal/SettingsModal.tsx
  function SettingsModal (line 8) | function SettingsModal() {

FILE: Frontend/src/components/Tools/ToolComponents/AddTools.tsx
  function AddTools (line 1) | function AddTools() {

FILE: Frontend/src/components/Tools/ToolComponents/EnableTools.tsx
  function EnableTools (line 13) | function EnableTools() {

FILE: Frontend/src/components/Tools/Tools.tsx
  function Tools (line 8) | function Tools() {

FILE: Frontend/src/components/ui/badge.tsx
  type BadgeProps (line 26) | interface BadgeProps
  function Badge (line 30) | function Badge({ className, variant, ...props }: BadgeProps) {

FILE: Frontend/src/components/ui/button.tsx
  type ButtonProps (line 8) | interface ButtonProps

FILE: Frontend/src/components/ui/form.tsx
  type FormFieldContextValue (line 18) | type FormFieldContextValue<
  type FormItemContextValue (line 65) | type FormItemContextValue = {

FILE: Frontend/src/components/ui/icons.tsx
  type Icon (line 9) | type Icon = LucideIcon;

FILE: Frontend/src/components/ui/sheet.tsx
  type SheetContentProps (line 50) | interface SheetContentProps

FILE: Frontend/src/components/ui/toast.tsx
  type ToastProps (line 113) | type ToastProps = React.ComponentPropsWithoutRef<typeof Toast>;
  type ToastActionElement (line 115) | type ToastActionElement = React.ReactElement<typeof ToastAction>;

FILE: Frontend/src/components/ui/toaster.tsx
  function Toaster (line 11) | function Toaster() {

FILE: Frontend/src/context/ChatInputContext.tsx
  type ChatInputContextType (line 3) | interface ChatInputContextType {

FILE: Frontend/src/context/UserClientProviders.tsx
  function UserClientProviders (line 6) | function UserClientProviders({

FILE: Frontend/src/data/sysSpecs.ts
  type SystemSpecs (line 1) | type SystemSpecs = {

FILE: Frontend/src/electron/authentication/devApi.ts
  function getDevSecretPath (line 8) | function getDevSecretPath(): string {
  function getSecret (line 15) | function getSecret(): string {
  function getDevApiKey (line 34) | async function getDevApiKey({

FILE: Frontend/src/electron/authentication/secret.ts
  function generateSecret (line 5) | function generateSecret(): string {
  function getSecret (line 12) | function getSecret(): string {

FILE: Frontend/src/electron/authentication/token.ts
  function getToken (line 4) | async function getToken({ userId }: { userId: string }) {

FILE: Frontend/src/electron/crawl/cancelWebcrawl.ts
  function cancelWebcrawl (line 3) | async function cancelWebcrawl(payload: {

FILE: Frontend/src/electron/crawl/webcrawl.ts
  type ProgressData (line 5) | interface ProgressData {
  function webcrawl (line 15) | async function webcrawl(payload: {

FILE: Frontend/src/electron/db.ts
  class DatabaseService (line 15) | class DatabaseService {
    method constructor (line 18) | constructor() {
    type TableInfo (line 382) | interface TableInfo {
    type TableName (line 390) | type TableName = keyof typeof expectedColumns;
    type ColumnInfo (line 487) | interface ColumnInfo {
    method init (line 546) | init() {
    method addInitialTools (line 552) | addInitialTools() {
    method getUsers (line 558) | getUsers() {
    method getUserSettings (line 565) | getUserSettings(userId: string | number): Promise<UserSettings> {
    method updateUserSettings (line 573) | updateUserSettings(settings: UserSettings) {
    method getUserPrompts (line 635) | getUserPrompts(userId: number) {
    method addUserPrompt (line 642) | addUserPrompt(
    method addAPIKey (line 663) | addAPIKey(userId: number, key: string, provider: string) {
    method updateUserPrompt (line 680) | updateUserPrompt(userId: number, id: number, name: string, prompt: str...
    method isCollectionLocal (line 688) | isCollectionLocal(collectionId: number): boolean {
    method getCollectionLocalEmbeddingModel (line 695) | getCollectionLocalEmbeddingModel(collectionId: number): string {
    method createCollection (line 701) | createCollection(
    method addFileToCollection (line 730) | addFileToCollection(userId: number, id: number, file: string) {
    method deleteCollection (line 746) | deleteCollection(userId: number, id: number) {
    method getCollection (line 751) | getCollection(collectionId: number) {
    method getCollectionName (line 756) | getCollectionName(collectionId: number) {
    method getFilesInCollection (line 761) | getFilesInCollection(userId: number, collectionId: number) {
    method getUserCollections (line 766) | getUserCollections(userId: number) {
    method addUser (line 772) | addUser(name: string): { id: number; name: string; error?: string } {
    method getUserApiKeys (line 798) | getUserApiKeys(userId: number): Promise<ApiKey[]> {
    method getApiKey (line 805) | getApiKey(userId: number, provider: string): string {
    method getUserConversations (line 812) | getUserConversations(userId: number) {
    method getUserConversationTitle (line 826) | getUserConversationTitle(userId: number, conversationId: number) {
    method addUserConversation (line 832) | addUserConversation(userId: number, title: string) {
    method deleteUserConversation (line 844) | deleteUserConversation(userId: number, id: number) {
    method getConversationMessages (line 850) | getConversationMessages(userId: number, conversationId: number) {
    method addUserMessage (line 857) | addUserMessage(
    method addReasoningContent (line 883) | addReasoningContent(messageId: number, reasoningContent: string) {
    method deleteUserMessage (line 889) | deleteUserMessage(userId: number, id: number) {
    method getUserPrompt (line 895) | getUserPrompt(userId: number, promptId: number) {
    method updateMessageDataId (line 900) | updateMessageDataId(messageId: number, dataId: number) {
    method addRetrievedData (line 905) | addRetrievedData(messageId: number, data: string): number {
    method getConversationMessagesWithData (line 915) | getConversationMessagesWithData(userId: number, conversationId: number) {
    method addDevAPIKey (line 929) | addDevAPIKey(
    method getDevAPIKeys (line 941) | getDevAPIKeys(userId: number) {
    method deleteDevAPIKey (line 946) | deleteDevAPIKey(userId: number, id: number) {
    method getOpenRouterModel (line 952) | getOpenRouterModel(userId: number) {
    method addOpenRouterModel (line 957) | addOpenRouterModel(userId: number, model: string) {
    method deleteOpenRouterModel (line 972) | deleteOpenRouterModel(userId: number, id: number) {
    method getOpenRouterModels (line 977) | getOpenRouterModels(userId: number) {
    method getAzureOpenAIModels (line 984) | getAzureOpenAIModels(userId: number) {
    method addAzureOpenAIModel (line 996) | addAzureOpenAIModel(
    method deleteAzureOpenAIModel (line 1010) | deleteAzureOpenAIModel(userId: number, id: number) {
    method getAzureOpenAIModel (line 1015) | getAzureOpenAIModel(userId: number, id: number) {
    method getCustomAPI (line 1025) | getCustomAPI(userId: number) {
    method getCustomAPIs (line 1037) | getCustomAPIs(userId: number) {
    method deleteCustomAPI (line 1049) | deleteCustomAPI(userId: number, id: number) {
    method addCustomAPI (line 1054) | addCustomAPI(
    method addTool (line 1071) | addTool(name: string, description: string) {
    method getUserTools (line 1083) | getUserTools(userId: number) {
    method addUserTool (line 1095) | addUserTool(userId: number, toolId: number, enabled: number, docked: n...
    method removeUserTool (line 1110) | removeUserTool(userId: number, toolId: number) {
    method updateUserTool (line 1116) | updateUserTool(
    method getTools (line 1136) | getTools() {
    method addExternalOllama (line 1140) | addExternalOllama(
    method getExternalOllama (line 1162) | getExternalOllama(userId: number) {

FILE: Frontend/src/electron/embedding/cancelEmbed.ts
  function cancelEmbed (line 3) | async function cancelEmbed(payload: {

FILE: Frontend/src/electron/embedding/vectorstoreQuery.ts
  function vectorstoreQuery (line 4) | async function vectorstoreQuery(payload: {

FILE: Frontend/src/electron/handlers/azureHandlers.ts
  function setupAzureOpenAI (line 4) | async function setupAzureOpenAI() {

FILE: Frontend/src/electron/handlers/chatHandlers.ts
  function setupChatHandlers (line 7) | function setupChatHandlers(mainWindow: BrowserWindow) {

FILE: Frontend/src/electron/handlers/closeEventHandler.ts
  function handleCloseEvents (line 3) | function handleCloseEvents(mainWindow: BrowserWindow) {

FILE: Frontend/src/electron/handlers/collectionHandlers.ts
  function setupCollectionHandlers (line 14) | function setupCollectionHandlers() {

FILE: Frontend/src/electron/handlers/customApiHandlers.ts
  function setupCustomApiHandlers (line 4) | async function setupCustomApiHandlers() {

FILE: Frontend/src/electron/handlers/dbHandlers.ts
  function setupDbHandlers (line 4) | function setupDbHandlers() {

FILE: Frontend/src/electron/handlers/fileHandlers.ts
  function setupFileHandlers (line 7) | function setupFileHandlers() {

FILE: Frontend/src/electron/handlers/ipcHandlers.ts
  function setupIpcHandlers (line 5) | function setupIpcHandlers(mainWindow: BrowserWindow) {

FILE: Frontend/src/electron/handlers/localModelHandlers.ts
  function getModelsPath (line 14) | function getModelsPath() {
  function downloadModel (line 28) | async function downloadModel(payload: {
  function setupLocalModelHandlers (line 313) | function setupLocalModelHandlers() {

FILE: Frontend/src/electron/handlers/menuHandlers.ts
  function setupMenuHandlers (line 3) | function setupMenuHandlers(mainWindow: BrowserWindow) {

FILE: Frontend/src/electron/handlers/ollamaHandlers.ts
  function setupOllamaHandlers (line 10) | async function setupOllamaHandlers() {

FILE: Frontend/src/electron/handlers/openRouterHandlers.ts
  function setupOpenRouterHandlers (line 5) | async function setupOpenRouterHandlers() {

FILE: Frontend/src/electron/handlers/voiceHandlers.ts
  function setupVttHandlers (line 14) | function setupVttHandlers() {

FILE: Frontend/src/electron/helpers/spawnAsync.ts
  function spawnAsync (line 5) | function spawnAsync(

FILE: Frontend/src/electron/llms/agentLayer/anthropicAgent.ts
  function anthropicAgent (line 8) | async function anthropicAgent(

FILE: Frontend/src/electron/llms/agentLayer/geminiAgent.ts
  function geminiAgent (line 8) | async function geminiAgent(

FILE: Frontend/src/electron/llms/agentLayer/ollamaAgent.ts
  function ollamaAgent (line 9) | async function ollamaAgent(

FILE: Frontend/src/electron/llms/agentLayer/openAiAgent.ts
  function openAiAgent (line 7) | async function openAiAgent(

FILE: Frontend/src/electron/llms/agentLayer/tools/websearch.ts
  function webSearch (line 3) | async function webSearch(payload: { url: string }) {

FILE: Frontend/src/electron/llms/apiCheckProviders/anthropic.ts
  function AnthropicProviderAPIKeyCheck (line 3) | async function AnthropicProviderAPIKeyCheck(apiKey: string): Promise<{

FILE: Frontend/src/electron/llms/apiCheckProviders/deepseek.ts
  function DeepSeekProviderAPIKeyCheck (line 3) | async function DeepSeekProviderAPIKeyCheck(

FILE: Frontend/src/electron/llms/apiCheckProviders/gemini.ts
  function initializeGemini (line 4) | async function initializeGemini(apiKey: string) {
  function GeminiProviderAPIKeyCheck (line 8) | async function GeminiProviderAPIKeyCheck(apiKey: string): Promise<{

FILE: Frontend/src/electron/llms/apiCheckProviders/openai.ts
  function OpenAIProviderAPIKeyCheck (line 3) | async function OpenAIProviderAPIKeyCheck(apiKey: string): Promise<{

FILE: Frontend/src/electron/llms/apiCheckProviders/openrouter.ts
  function OpenRouterProviderAPIKeyCheck (line 3) | async function OpenRouterProviderAPIKeyCheck(

FILE: Frontend/src/electron/llms/apiCheckProviders/xai.ts
  function initializeXAI (line 5) | async function initializeXAI(apiKey: string) {
  function XAIProviderAPIKeyCheck (line 9) | async function XAIProviderAPIKeyCheck(apiKey: string): Promise<{

FILE: Frontend/src/electron/llms/chatCompletion.ts
  function chatCompletion (line 10) | async function chatCompletion(

FILE: Frontend/src/electron/llms/generateTitle.ts
  function chatCompletionTitle (line 8) | async function chatCompletionTitle(
  function generateTitleOpenRouter (line 41) | async function generateTitleOpenRouter(input: string, user: User) {
  function generateTitleDeepSeek (line 45) | async function generateTitleDeepSeek(input: string, user: User) {
  function generateTitleCustom (line 49) | async function generateTitleCustom(
  function generateTitleAnthropic (line 76) | async function generateTitleAnthropic(input: string, user: User) {
  function generateTitleGemini (line 106) | async function generateTitleGemini(input: string, user: User) {
  function generateTitleXAI (line 128) | async function generateTitleXAI(input: string, user: User) {
  function generateOllamaTitle (line 132) | async function generateOllamaTitle(input: string, model: string) {
  function generateTitleOpenAI (line 170) | async function generateTitleOpenAI(input: string, user: User) {
  function generateTitleAzureOpenAI (line 174) | async function generateTitleAzureOpenAI(input: string, user: User) {
  function generateTitleLocalOpenAI (line 178) | async function generateTitleLocalOpenAI(
  function generateTitle (line 202) | async function generateTitle(input: string, user: User) {

FILE: Frontend/src/electron/llms/keyValidation.ts
  function keyValidation (line 8) | async function keyValidation({

FILE: Frontend/src/electron/llms/llmHelpers/addAssistantMessage.ts
  function addAssistantMessage (line 3) | async function addAssistantMessage(

FILE: Frontend/src/electron/llms/llmHelpers/addUserMessage.ts
  function addUserMessage (line 3) | async function addUserMessage(

FILE: Frontend/src/electron/llms/llmHelpers/collectionData.ts
  function ifCollection (line 6) | async function ifCollection(

FILE: Frontend/src/electron/llms/llmHelpers/countMessageTokens.ts
  function countMessageTokens (line 5) | function countMessageTokens(

FILE: Frontend/src/electron/llms/llmHelpers/getUserPrompt.ts
  function getUserPrompt (line 3) | async function getUserPrompt(

FILE: Frontend/src/electron/llms/llmHelpers/ifNewConvo.ts
  function ifNewConversation (line 5) | async function ifNewConversation(messages: Message[], activeUser: User) {

FILE: Frontend/src/electron/llms/llmHelpers/prepMessages.ts
  function prepMessages (line 3) | async function prepMessages(messages: Message[]) {

FILE: Frontend/src/electron/llms/llmHelpers/providerInit.ts
  function providerInitialize (line 5) | async function providerInitialize(
  function initializeExternalOllama (line 48) | async function initializeExternalOllama(activeUser: User) {
  function initializeOpenRouter (line 70) | async function initializeOpenRouter(activeUser: User) {
  function initializeAzureOpenAI (line 83) | async function initializeAzureOpenAI(activeUser: User) {
  function initializeDeepSeek (line 107) | async function initializeDeepSeek(activeUser: User) {
  function initializeCustom (line 120) | async function initializeCustom(activeUser: User) {
  function initializeLocalOpenAI (line 143) | async function initializeLocalOpenAI(activeUser: User) {
  function initializeXAI (line 152) | async function initializeXAI(activeUser: User) {

FILE: Frontend/src/electron/llms/llmHelpers/returnReasoningPrompt.ts
  function returnReasoningPrompt (line 1) | async function returnReasoningPrompt(

FILE: Frontend/src/electron/llms/llmHelpers/returnSystemPrompt.ts
  function returnSystemPrompt (line 3) | async function returnSystemPrompt(

FILE: Frontend/src/electron/llms/llmHelpers/sendMessageChunk.ts
  function sendMessageChunk (line 3) | function sendMessageChunk(

FILE: Frontend/src/electron/llms/llmHelpers/truncateMessages.ts
  function truncateMessages (line 5) | function truncateMessages(

FILE: Frontend/src/electron/llms/llms.ts
  function chatRequest (line 12) | async function chatRequest(

FILE: Frontend/src/electron/llms/providers/anthropic.ts
  function chainOfThought (line 11) | async function chainOfThought(
  function AnthropicProvider (line 72) | async function AnthropicProvider(

FILE: Frontend/src/electron/llms/providers/azureOpenAI.ts
  function AzureOpenAIProvider (line 3) | async function AzureOpenAIProvider(

FILE: Frontend/src/electron/llms/providers/customEndpoint.ts
  function CustomProvider (line 4) | async function CustomProvider(

FILE: Frontend/src/electron/llms/providers/deepseek.ts
  type DeepSeekDelta (line 11) | interface DeepSeekDelta
  function DeepSeekProvider (line 16) | async function DeepSeekProvider(

FILE: Frontend/src/electron/llms/providers/externalOllama.ts
  function ExternalOllamaProvider (line 4) | async function ExternalOllamaProvider(

FILE: Frontend/src/electron/llms/providers/gemini.ts
  function initializeGemini (line 17) | async function initializeGemini(apiKey: string) {
  function chainOfThought (line 21) | async function chainOfThought(
  function GeminiProvider (line 108) | async function GeminiProvider(

FILE: Frontend/src/electron/llms/providers/localModel.ts
  function LocalModelProvider (line 4) | async function LocalModelProvider(

FILE: Frontend/src/electron/llms/providers/ollama.ts
  function OllamaProvider (line 11) | async function OllamaProvider(
  function chainOfThought (line 158) | async function chainOfThought(

FILE: Frontend/src/electron/llms/providers/openai.ts
  function OpenAIProvider (line 4) | async function OpenAIProvider(

FILE: Frontend/src/electron/llms/providers/openrouter.ts
  function OpenRouterProvider (line 4) | async function OpenRouterProvider(

FILE: Frontend/src/electron/llms/providers/xai.ts
  function XAIProvider (line 4) | async function XAIProvider(

FILE: Frontend/src/electron/llms/reasoningLayer/openAiChainOfThought.ts
  function openAiChainOfThought (line 7) | async function openAiChainOfThought(

FILE: Frontend/src/electron/loadingWindow.ts
  function createLoadingWindow (line 17) | function createLoadingWindow(icon?: Electron.NativeImage) {
  function updateLoadingText (line 81) | function updateLoadingText(text: string) {
  function updateLoadingStatus (line 87) | function updateLoadingStatus(
  function closeLoadingWindow (line 97) | function closeLoadingWindow() {

FILE: Frontend/src/electron/localLLMs/getDirModels.ts
  function getDirModels (line 4) | async function getDirModels(payload: {

FILE: Frontend/src/electron/localLLMs/loadModel.ts
  function loadModel (line 3) | async function loadModel(payload: {

FILE: Frontend/src/electron/localLLMs/modelInfo.ts
  function modelInfo (line 3) | async function modelInfo(payload: {

FILE: Frontend/src/electron/localLLMs/unloadModel.ts
  function unloadModel (line 3) | async function unloadModel(payload: {

FILE: Frontend/src/electron/mainWindow.ts
  function createMainWindow (line 7) | function createMainWindow(icon?: Electron.NativeImage) {

FILE: Frontend/src/electron/menu.ts
  function createMenu (line 5) | function createMenu(mainWindow: BrowserWindow) {

FILE: Frontend/src/electron/ollama/checkOllama.ts
  function checkOllama (line 6) | async function checkOllama(): Promise<boolean> {

FILE: Frontend/src/electron/ollama/fetchLocalModels.ts
  function fetchOllamaModels (line 7) | async function fetchOllamaModels(): Promise<string[]> {

FILE: Frontend/src/electron/ollama/getRunningModels.ts
  function getRunningModels (line 4) | async function getRunningModels(): Promise<string[]> {

FILE: Frontend/src/electron/ollama/isOllamaRunning.ts
  function isOllamaServerRunning (line 4) | async function isOllamaServerRunning(): Promise<boolean> {

FILE: Frontend/src/electron/ollama/pullModel.ts
  function pullModel (line 4) | async function pullModel(model: string): Promise<void> {

FILE: Frontend/src/electron/ollama/runOllama.ts
  function startOllamaServer (line 15) | async function startOllamaServer(): Promise<void> {
  function createOllamaProcess (line 53) | async function createOllamaProcess(
  function runOllama (line 124) | async function runOllama({

FILE: Frontend/src/electron/ollama/unloadAllModels.ts
  function unloadAllModels (line 4) | async function unloadAllModels(): Promise<void> {

FILE: Frontend/src/electron/ollama/unloadModel.ts
  function unloadModel (line 3) | async function unloadModel(model: string): Promise<void> {

FILE: Frontend/src/electron/pathResolver.ts
  function getPreloadPath (line 5) | function getPreloadPath() {
  function getUIPath (line 13) | function getUIPath() {
  function getAssetsPath (line 17) | function getAssetsPath() {

FILE: Frontend/src/electron/python/ensurePythonAndVenv.ts
  function ensurePythonAndVenv (line 11) | async function ensurePythonAndVenv(backendPath: string) {

FILE: Frontend/src/electron/python/extractFromAsar.ts
  function extractFromAsar (line 6) | function extractFromAsar(sourcePath: string, destPath: string) {

FILE: Frontend/src/electron/python/getLinuxPackageManager.ts
  function getLinuxPackageManager (line 5) | function getLinuxPackageManager(): {

FILE: Frontend/src/electron/python/ifFedora.ts
  function ifFedora (line 5) | async function ifFedora() {

FILE: Frontend/src/electron/python/installDependencies.ts
  function installDependencies (line 6) | async function installDependencies(

FILE: Frontend/src/electron/python/installLlamaCpp.ts
  function getNextMessage (line 53) | function getNextMessage(): string {
  function startRotatingMessages (line 75) | function startRotatingMessages(baseProgress: number) {
  function stopRotatingMessages (line 87) | function stopRotatingMessages() {
  function installLlamaCpp (line 95) | async function installLlamaCpp(

FILE: Frontend/src/electron/python/killProcessOnPort.ts
  function killProcessOnPort (line 4) | async function killProcessOnPort(port: number): Promise<void> {

FILE: Frontend/src/electron/python/runWithPrivileges.ts
  function runWithPrivileges (line 5) | async function runWithPrivileges(

FILE: Frontend/src/electron/python/startAndStopPython.ts
  function startPythonServer (line 21) | async function startPythonServer() {
  function stopPythonServer (line 288) | function stopPythonServer() {

FILE: Frontend/src/electron/resourceManager.ts
  constant POLLING_INTERVAL (line 7) | const POLLING_INTERVAL = 500;
  function pollResource (line 9) | function pollResource(mainWindow: BrowserWindow) {
  function getCpuUsage (line 22) | function getCpuUsage(): Promise<number> {
  function getMemoryUsage (line 30) | function getMemoryUsage() {
  function getDiskUsage (line 34) | function getDiskUsage() {
  function getStaticData (line 44) | async function getStaticData(): Promise<StaticData> {

FILE: Frontend/src/electron/specs/systemSpecs.ts
  function systemSpecs (line 4) | async function systemSpecs(): Promise<{

FILE: Frontend/src/electron/storage/deleteCollection.ts
  function deleteCollection (line 5) | async function deleteCollection(

FILE: Frontend/src/electron/storage/getFiles.ts
  function getFilesInCollection (line 2) | function getFilesInCollection(userId: number, collectionId: number) {

FILE: Frontend/src/electron/storage/getUserFiles.ts
  function getUserCollectionFiles (line 5) | function getUserCollectionFiles(payload: {

FILE: Frontend/src/electron/storage/newFile.ts
  type PythonProgressData (line 7) | interface PythonProgressData {
  type ProgressData (line 15) | interface ProgressData {
  function addFileToCollection (line 25) | async function addFileToCollection(

FILE: Frontend/src/electron/storage/removeFileorFolder.ts
  function removeFileorFolder (line 5) | function removeFileorFolder(payload: {

FILE: Frontend/src/electron/storage/renameFile.ts
  function renameFile (line 5) | function renameFile(payload: {

FILE: Frontend/src/electron/storage/websiteFetch.ts
  type PythonProgressData (line 9) | interface PythonProgressData {
  type ProgressData (line 17) | interface ProgressData {
  function websiteFetch (line 27) | async function websiteFetch(payload: {

FILE: Frontend/src/electron/tray.ts
  function createTray (line 4) | function createTray(mainWindow: BrowserWindow) {

FILE: Frontend/src/electron/util.ts
  function isDev (line 10) | function isDev(): boolean {
  function ipcMainHandle (line 14) | function ipcMainHandle<
  function ipcMainDatabaseHandle (line 25) | function ipcMainDatabaseHandle<Key extends keyof EventPayloadMapping>(
  function ipcWebContentsSend (line 37) | function ipcWebContentsSend<Key extends keyof EventPayloadMapping>(
  function validateEventFrame (line 45) | function validateEventFrame(frame: WebFrameMain | null) {
  function ipcMainOn (line 57) | function ipcMainOn<Key extends keyof EventPayloadMapping>(

FILE: Frontend/src/electron/voice/audioTranscription.ts
  function audioTranscription (line 6) | async function audioTranscription(audioData: Buffer, userId: number) {

FILE: Frontend/src/electron/youtube/youtubeIngest.ts
  type PythonProgressData (line 5) | interface PythonProgressData {
  type ProgressData (line 13) | interface ProgressData {
  function youtubeIngest (line 23) | async function youtubeIngest(payload: {

FILE: Frontend/src/hooks/use-toast.ts
  constant TOAST_LIMIT (line 8) | const TOAST_LIMIT = 1;
  constant TOAST_REMOVE_DELAY (line 9) | const TOAST_REMOVE_DELAY = 1000000;
  type ToasterToast (line 11) | type ToasterToast = ToastProps & {
  function genId (line 27) | function genId() {
  type ActionType (line 32) | type ActionType = typeof actionTypes;
  type Action (line 34) | type Action =
  type State (line 52) | interface State {
  function dispatch (line 133) | function dispatch(action: Action) {
  type Toast (line 140) | type Toast = Omit<ToasterToast, "id">;
  function toast (line 142) | function toast({ ...props }: Toast) {
  function useToast (line 171) | function useToast() {

FILE: Frontend/src/hooks/useAppInitialization.tsx
  function useAppInitialization (line 10) | function useAppInitialization() {

FILE: Frontend/src/hooks/useChatLogic.ts
  function useChatLogic (line 10) | function useChatLogic() {

FILE: Frontend/src/hooks/useStatistics.tsx
  function useStatistics (line 3) | function useStatistics(dataPointCount: number): Statistics[] {

FILE: Frontend/src/hooks/useUIState.ts
  function handleClickOutside (line 9) | function handleClickOutside(event: MouseEvent) {

FILE: Frontend/src/lib/shikiHightlight.ts
  function initializeShiki (line 4) | async function initializeShiki() {
  function highlightCode (line 48) | function highlightCode(code: string, language: string): string {

FILE: Frontend/src/lib/utils.ts
  function cn (line 4) | function cn(...inputs: ClassValue[]) {
  function formatDate (line 8) | function formatDate(input: Date | string): string {
  function sanitizeStoreName (line 22) | async function sanitizeStoreName(name: string) {

FILE: Frontend/src/types/contextTypes/LibraryContextTypes.ts
  type LibraryContextType (line 1) | interface LibraryContextType {

FILE: Frontend/src/types/contextTypes/SystemSettingsTypes.ts
  type SysSettingsContextType (line 3) | interface SysSettingsContextType {

FILE: Frontend/src/types/contextTypes/UserContextType.ts
  type UserContextType (line 1) | interface UserContextType {

FILE: Frontend/src/types/contextTypes/UserViewTypes.ts
  type UserViewContextType (line 3) | interface UserViewContextType {

FILE: Frontend/src/utils/webAudioRecorder.ts
  class WebAudioRecorder (line 1) | class WebAudioRecorder {
    method getSupportedMimeType (line 6) | private static getSupportedMimeType(): string {
    method startRecording (line 23) | async startRecording(): Promise<void> {
    method stopRecording (line 83) | async stopRecording(): Promise<ArrayBuffer> {
    method cleanup (line 128) | private cleanup(): void {
    method isRecording (line 144) | isRecording(): boolean {
    method cancelRecording (line 148) | cancelRecording(): void {

FILE: Frontend/types.d.ts
  type Statistics (line 1) | type Statistics = {
  type StaticData (line 7) | type StaticData = {
  type UnsubscribeFunction (line 13) | type UnsubscribeFunction = () => void;
  type View (line 15) | type View =
  type User (line 24) | type User = {
  type AzureModel (line 29) | type AzureModel = {
  type Message (line 37) | type Message = {
  type ReasoningEffort (line 49) | type ReasoningEffort = "low" | "medium" | "high";
  type UserSettings (line 51) | interface UserSettings {
  type Collection (line 78) | type Collection = {
  type ApiKey (line 87) | type ApiKey = {
  type Conversation (line 93) | type Conversation = {
  type UserPrompts (line 100) | interface UserPrompts {
  type FrameWindowAction (line 106) | type FrameWindowAction = "close" | "minimize" | "maximize" | "unmaximize";
  type TranscribeAudioInput (line 108) | interface TranscribeAudioInput {
  type TranscribeAudioOutput (line 113) | interface TranscribeAudioOutput {
  type Model (line 120) | interface Model {
  type CustomModel (line 129) | interface CustomModel {
  type DownloadModelProgress (line 138) | interface DownloadModelProgress {
  type EventPayloadMapping (line 150) | interface EventPayloadMapping {
  type Window (line 414) | interface Window {
  type Keys (line 874) | type Keys = {
  type DataContent (line 881) | interface DataContent {
  type OpenRouterModel (line 894) | type OpenRouterModel = string;
  type ProgressData (line 896) | interface ProgressData extends CustomProgressData, OllamaProgressEvent {}
  type LLMProvider (line 898) | type LLMProvider =
  type OllamaProgressEvent (line 910) | interface OllamaProgressEvent {
  type Electron (line 915) | interface Electron {
  type APIKey (line 932) | interface APIKey {
  type CustomProgressData (line 939) | type CustomProgressData = {
  type DownloadProgress (line 971) | interface DownloadProgress {
  type OllamaModel (line 975) | interface OllamaModel {
  type ExternalOllama (line 979) | interface ExternalOllama {
  type DownloadProgressData (line 987) | interface DownloadProgressData {
  type ProviderResponse (line 1002) | interface ProviderResponse {
  type ChatRequestResult (line 1011) | interface ChatRequestResult {
  type ProviderInputParams (line 1020) | interface ProviderInputParams {
  type Tool (line 1039) | interface Tool {
  type UserTool (line 1045) | interface UserTool {
  type WebSearchResult (line 1051) | type WebSearchResult = {
Condensed preview — 298 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,097K chars).
[
  {
    "path": ".gitignore",
    "chars": 360,
    "preview": "# Python cache files\n__pycache__/\n*.py[cod]\n*$py.class\n\n.venv\n\n/Frontend/node_modules\n/Frontend/dist\n\n.env.local\n\ndataba"
  },
  {
    "path": "Backend/.gitignore",
    "chars": 14,
    "preview": "venv\ntestData\n"
  },
  {
    "path": "Backend/ensure_dependencies.py",
    "chars": 7375,
    "preview": "import sys\nimport os\nimport subprocess\nimport asyncio\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nim"
  },
  {
    "path": "Backend/main.py",
    "chars": 14905,
    "preview": "import logging\nfrom src.authentication.api_key_authorization import api_key_auth\nfrom src.authentication.token import ve"
  },
  {
    "path": "Backend/requirements.txt",
    "chars": 3265,
    "preview": "annotated-types==0.7.0\nanyio==4.6.2.post1\nasgiref==3.8.1\nbackoff==2.2.1\nbcrypt==4.2.1\nbuild==1.2.2.post1\ncachetools==5.5"
  },
  {
    "path": "Backend/src/authentication/api_key_authorization.py",
    "chars": 977,
    "preview": "from fastapi import Depends\nfrom fastapi.security import OAuth2PasswordBearer\nfrom typing import Optional\nimport jwt\n\nim"
  },
  {
    "path": "Backend/src/authentication/token.py",
    "chars": 1879,
    "preview": "from fastapi import Depends, Request\nfrom fastapi.security import OAuth2PasswordBearer\nfrom typing import Optional\nimpor"
  },
  {
    "path": "Backend/src/data/dataFetch/webcrawler.py",
    "chars": 11366,
    "preview": "import os\nimport json\nimport logging\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin, url"
  },
  {
    "path": "Backend/src/data/dataFetch/youtube.py",
    "chars": 14996,
    "preview": "import os\nfrom src.endpoint.models import YoutubeTranscriptRequest\nfrom src.vectorstorage.vectorstore import get_vectors"
  },
  {
    "path": "Backend/src/data/dataIntake/csvFallbackSplitting.py",
    "chars": 2983,
    "preview": "from langchain_core.documents import Document\nimport pandas as pd\nimport io\nimport time\nfrom typing import Generator\n\n\nd"
  },
  {
    "path": "Backend/src/data/dataIntake/fileTypes/loadX.py",
    "chars": 5492,
    "preview": "import pandas as pd\nimport json\nimport markdown\nfrom bs4 import BeautifulSoup\nfrom pptx import Presentation\nfrom langcha"
  },
  {
    "path": "Backend/src/data/dataIntake/getHtmlFiles.py",
    "chars": 374,
    "preview": "import os\n\n\ndef get_html_files(directory):\n    \"\"\"Recursively get all HTML files in a directory and its subdirectories\"\""
  },
  {
    "path": "Backend/src/data/dataIntake/loadFile.py",
    "chars": 1391,
    "preview": "import os\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nfrom src.data.dataIntake.fileTypes.loadX import (\n    lo"
  },
  {
    "path": "Backend/src/data/dataIntake/textSplitting.py",
    "chars": 1568,
    "preview": "from langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain_core.documents import Document\nimport "
  },
  {
    "path": "Backend/src/data/database/checkAPIKey.py",
    "chars": 760,
    "preview": "from src.data.database.db import db\n\n\ndef check_api_key(user_id: int):\n    \"\"\" check to see if the userId has API key in"
  },
  {
    "path": "Backend/src/data/database/db.py",
    "chars": 1647,
    "preview": "import sqlite3\nimport os\nimport pathlib\nimport platform\n\nIS_DEV = os.environ.get(\"IS_DEV\") == \"1\"\n\n\ndef get_user_data_pa"
  },
  {
    "path": "Backend/src/data/database/getCollectionInfo.py",
    "chars": 1621,
    "preview": "from src.data.database.db import db\nfrom dataclasses import dataclass\nfrom typing import Optional\n\n\n@dataclass\nclass Col"
  },
  {
    "path": "Backend/src/data/database/getLLMApiKey.py",
    "chars": 461,
    "preview": "from src.data.database.db import db\n\n\ndef get_llm_api_key(user_id, provider):\n    try:\n        conn = db()\n        curso"
  },
  {
    "path": "Backend/src/endpoint/api.py",
    "chars": 9147,
    "preview": "from typing import AsyncGenerator\nimport json\nfrom src.endpoint.models import ChatCompletionRequest\nfrom transformers im"
  },
  {
    "path": "Backend/src/endpoint/deleteStore.py",
    "chars": 660,
    "preview": "from src.endpoint.models import DeleteCollectionRequest\nfrom src.vectorstorage.vectorstore import get_vectorstore\nimport"
  },
  {
    "path": "Backend/src/endpoint/devApiCall.py",
    "chars": 3365,
    "preview": "from src.data.database.getCollectionInfo import get_collection_settings\nfrom src.data.database.getLLMApiKey import get_l"
  },
  {
    "path": "Backend/src/endpoint/embed.py",
    "chars": 4612,
    "preview": "from src.data.dataIntake.textSplitting import split_text\nfrom src.data.dataIntake.loadFile import load_document\nfrom src"
  },
  {
    "path": "Backend/src/endpoint/models.py",
    "chars": 6082,
    "preview": "from pydantic import BaseModel\nfrom typing import Optional, Dict, Any, List, Literal\n\n\nclass EmbeddingRequest(BaseModel)"
  },
  {
    "path": "Backend/src/endpoint/ragQuery.py",
    "chars": 1521,
    "preview": "from src.endpoint.models import VectorStoreQueryRequest, ChatCompletionRequest\nfrom src.endpoint.vectorQuery import quer"
  },
  {
    "path": "Backend/src/endpoint/transcribe.py",
    "chars": 1597,
    "preview": "from src.voice.voice_to_text import initialize_model\n\nimport os\nimport tempfile\nfrom fastapi import UploadFile, File, HT"
  },
  {
    "path": "Backend/src/endpoint/vectorQuery.py",
    "chars": 844,
    "preview": "from src.endpoint.models import VectorStoreQueryRequest\nfrom src.vectorstorage.helpers.sanitizeCollectionName import san"
  },
  {
    "path": "Backend/src/endpoint/webcrawl.py",
    "chars": 3342,
    "preview": "from src.data.dataIntake.fileTypes.loadX import load_html\nfrom src.data.dataIntake.textSplitting import split_text\nfrom "
  },
  {
    "path": "Backend/src/llms/llmQuery.py",
    "chars": 776,
    "preview": "from src.endpoint.models import ChatCompletionRequest\nfrom src.llms.providers.ooba import ooba_query\nfrom src.llms.provi"
  },
  {
    "path": "Backend/src/llms/messages/formMessages.py",
    "chars": 631,
    "preview": "from src.endpoint.models import QueryRequest\n\n\ndef form_messages(data: QueryRequest):\n    try:\n        if not data.promp"
  },
  {
    "path": "Backend/src/llms/providers/local.py",
    "chars": 3214,
    "preview": "import asyncio\nimport json\nimport time\nimport logging\nfrom src.endpoint.api import chat_completion_stream\nfrom src.endpo"
  },
  {
    "path": "Backend/src/llms/providers/ollama.py",
    "chars": 2192,
    "preview": "from src.endpoint.models import QueryRequest\nimport requests\nimport json\nimport time\n\n\n\ndef ollama_query(data: QueryRequ"
  },
  {
    "path": "Backend/src/llms/providers/ooba.py",
    "chars": 529,
    "preview": "from src.endpoint.models import QueryRequest\nimport requests\n\n\ndef ooba_query(data: QueryRequest, messages: list = None)"
  },
  {
    "path": "Backend/src/llms/providers/openai.py",
    "chars": 912,
    "preview": "from src.endpoint.models import QueryRequest\nfrom openai import OpenAI\nfrom typing import Optional\n\n\ndef openai_query(da"
  },
  {
    "path": "Backend/src/models/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "Backend/src/models/exceptions.py",
    "chars": 341,
    "preview": "class ModelLoadError(Exception):\n    \"\"\"Exception raised when there is an error loading a model.\"\"\"\n    pass\n\nclass Mode"
  },
  {
    "path": "Backend/src/models/loaders/__init__.py",
    "chars": 417,
    "preview": "from .transformers import TransformersLoader\nfrom .llamacpp import LlamaCppLoader\nfrom .llamaccphf import LlamaCppHFLoad"
  },
  {
    "path": "Backend/src/models/loaders/base.py",
    "chars": 8058,
    "preview": "from abc import ABC, abstractmethod\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple\nimport loggin"
  },
  {
    "path": "Backend/src/models/loaders/exllama.py",
    "chars": 3940,
    "preview": "import logging\nfrom typing import Any, Dict, Optional, Tuple\n\nfrom src.models.loaders.base import BaseLoader\nfrom src.mo"
  },
  {
    "path": "Backend/src/models/loaders/hqq.py",
    "chars": 5872,
    "preview": "import logging\nfrom typing import Any, Dict, Optional, Tuple\nimport requests\nfrom tqdm import tqdm\n\nfrom src.models.load"
  },
  {
    "path": "Backend/src/models/loaders/llamaccphf.py",
    "chars": 915,
    "preview": "from typing import Any, Tuple\n\nfrom src.models.loaders.llamacpp import LlamaCppLoader\n\n\nclass LlamaCppHFLoader(LlamaCppL"
  },
  {
    "path": "Backend/src/models/loaders/llamacpp.py",
    "chars": 16582,
    "preview": "import os\nimport logging\nimport requests\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple\nfrom tqd"
  },
  {
    "path": "Backend/src/models/loaders/tensorrt.py",
    "chars": 2381,
    "preview": "import logging\nfrom typing import Any, Dict, Optional, Tuple\n\nfrom src.models.loaders.base import BaseLoader\nfrom src.mo"
  },
  {
    "path": "Backend/src/models/loaders/transformers.py",
    "chars": 10450,
    "preview": "import logging\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple\nimport torch\nfrom transformers imp"
  },
  {
    "path": "Backend/src/models/manager.py",
    "chars": 11439,
    "preview": "import logging\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Any, Dict, Union\n\nfrom src.endpoint.models i"
  },
  {
    "path": "Backend/src/models/streamer.py",
    "chars": 8853,
    "preview": "import traceback\nfrom queue import Queue\nfrom threading import Thread\nfrom typing import Optional, Callable, Any, List, "
  },
  {
    "path": "Backend/src/models/utils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "Backend/src/models/utils/detect_type.py",
    "chars": 2256,
    "preview": "import json\nfrom pathlib import Path\nfrom typing import Union\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef"
  },
  {
    "path": "Backend/src/models/utils/device.py",
    "chars": 433,
    "preview": "import torch\nfrom src.endpoint.models import ModelLoadRequest\n\n\ndef get_device(request: ModelLoadRequest) -> str:\n    if"
  },
  {
    "path": "Backend/src/models/utils/download.py",
    "chars": 4772,
    "preview": "import os\nimport logging\nimport requests\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom typing import List, Dict, O"
  },
  {
    "path": "Backend/src/models/utils/platform.py",
    "chars": 1346,
    "preview": "import platform\nfrom typing import Tuple\n\n\ndef check_platform_compatibility(model_type: str) -> Tuple[bool, str]:\n    \"\""
  },
  {
    "path": "Backend/src/vectorstorage/embeddings.py",
    "chars": 2464,
    "preview": "import time\n\n\ndef chunk_list(lst, n):\n    \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n    for i in range(0, len(lst)"
  },
  {
    "path": "Backend/src/vectorstorage/helpers/sanitizeCollectionName.py",
    "chars": 507,
    "preview": "import re\n\n\ndef sanitize_collection_name(name):\n    try:\n        sanitized = re.sub(r'[^\\w\\-]', '_', name)\n        sanit"
  },
  {
    "path": "Backend/src/vectorstorage/init_store.py",
    "chars": 2271,
    "preview": "from langchain_huggingface import HuggingFaceEmbeddings\nimport logging\nimport torch\nimport os\nfrom pathlib import Path\n\n"
  },
  {
    "path": "Backend/src/vectorstorage/vectorstore.py",
    "chars": 5169,
    "preview": "from src.vectorstorage.init_store import get_models_dir\nfrom langchain_huggingface import HuggingFaceEmbeddings\nfrom lan"
  },
  {
    "path": "Backend/src/voice/voice_to_text.py",
    "chars": 2314,
    "preview": "import whisper\nimport os\nimport warnings\nimport torch\nimport shutil\nimport subprocess\n\n# Suppress specific warnings\nwarn"
  },
  {
    "path": "Backend/tests/testApi.py",
    "chars": 3714,
    "preview": "import pytest\nfrom fastapi.testclient import TestClient\nfrom main import app\nfrom src.endpoint.models import EmbeddingRe"
  },
  {
    "path": "Backend/tests/test_voice.py",
    "chars": 6515,
    "preview": "import pytest\nfrom fastapi.testclient import TestClient\nfrom main import app\nimport os\nimport tempfile\nimport wave\nimpor"
  },
  {
    "path": "Frontend/.gitignore",
    "chars": 347,
    "preview": "# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\npnpm-debug.log*\nlerna-debug.log*\n\nnode_modules\ndist\ndis"
  },
  {
    "path": "Frontend/components.json",
    "chars": 447,
    "preview": "{\n  \"$schema\": \"https://ui.shadcn.com/schema.json\",\n  \"style\": \"new-york\",\n  \"rsc\": false,\n  \"tsx\": true,\n  \"tailwind\": "
  },
  {
    "path": "Frontend/e2e/app.spec.ts",
    "chars": 16520,
    "preview": "import {\n  test,\n  expect,\n  _electron,\n  Page,\n  ElectronApplication,\n} from \"@playwright/test\";\n\nlet electronApp: Elec"
  },
  {
    "path": "Frontend/electron-builder.json",
    "chars": 1912,
    "preview": "{\n    \"appId\": \"com.electron.notate\",\n    \"productName\": \"Notate\",\n    \"extraResources\": [\n        \"dist-electron/preloa"
  },
  {
    "path": "Frontend/eslint.config.js",
    "chars": 734,
    "preview": "import js from '@eslint/js'\nimport globals from 'globals'\nimport reactHooks from 'eslint-plugin-react-hooks'\nimport reac"
  },
  {
    "path": "Frontend/index.html",
    "chars": 556,
    "preview": "<!doctype html>\n<html lang=\"en\">\n  <head>\n    <meta charset=\"UTF-8\" />\n    <link rel=\"icon\" type=\"image/svg+xml\" href=\"/"
  },
  {
    "path": "Frontend/package.json",
    "chars": 4420,
    "preview": "{\n  \"name\": \"notate\",\n  \"description\": \"Notate is a cross-platform chatbot that can help assist in your research\",\n  \"au"
  },
  {
    "path": "Frontend/playwright.config.ts",
    "chars": 2176,
    "preview": "import { defineConfig, devices } from \"@playwright/test\";\n\n/**\n * Read environment variables from file.\n * https://githu"
  },
  {
    "path": "Frontend/postcss.config.js",
    "chars": 80,
    "preview": "export default {\n  plugins: {\n    tailwindcss: {},\n    autoprefixer: {},\n  },\n}\n"
  },
  {
    "path": "Frontend/src/app/App.tsx",
    "chars": 1472,
    "preview": "import { useMemo } from \"react\";\nimport Chat from \"@/components/Chat/Chat\";\nimport { Toaster } from \"@/components/ui/toa"
  },
  {
    "path": "Frontend/src/app/index.css",
    "chars": 6946,
    "preview": "@tailwind base;\n@tailwind components;\n@tailwind utilities;\n\n@layer base {\n  :root {\n    --gradient: #4ecdc4;\n\n    --back"
  },
  {
    "path": "Frontend/src/app/main.tsx",
    "chars": 350,
    "preview": "import { StrictMode } from \"react\";\nimport { createRoot } from \"react-dom/client\";\nimport \"./index.css\";\nimport App from"
  },
  {
    "path": "Frontend/src/app/vite-env.d.ts",
    "chars": 407,
    "preview": "import { defineConfig } from \"vite\";\nimport react from \"@vitejs/plugin-react\";\nimport path from \"path\";\n\n// https://vite"
  },
  {
    "path": "Frontend/src/components/AppAlert/SettingsAlert.tsx",
    "chars": 755,
    "preview": "import {\n  Dialog,\n  DialogContent,\n  DialogTitle,\n  DialogDescription,\n} from \"@/components/ui/dialog\";\nimport { useUse"
  },
  {
    "path": "Frontend/src/components/Authentication/CreateAccount.tsx",
    "chars": 4516,
    "preview": "import { Label } from \"@/components/ui/label\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n  Card,\n  CardC"
  },
  {
    "path": "Frontend/src/components/Authentication/SelectAccount.tsx",
    "chars": 5349,
    "preview": "import { Avatar, AvatarFallback } from \"@/components/ui/avatar\";\nimport { Card, CardContent } from \"@/components/ui/card"
  },
  {
    "path": "Frontend/src/components/Chat/Chat.tsx",
    "chars": 2314,
    "preview": "import { ArrowDown, Loader2 } from \"lucide-react\";\nimport { Button } from \"@/components/ui/button\";\nimport { useUser } f"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/ChatHeader.tsx",
    "chars": 1380,
    "preview": "import { Button } from \"@/components/ui/button\";\nimport { PlusCircle } from \"lucide-react\";\nimport { Loader2 } from \"luc"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/ChatInput.tsx",
    "chars": 8909,
    "preview": "import { LibraryModal } from \"@/components/CollectionModals/LibraryModal\";\nimport { Button } from \"@/components/ui/butto"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/ChatMessage.tsx",
    "chars": 15909,
    "preview": "import { Avatar, AvatarImage } from \"@/components/ui/avatar\";\nimport {\n  lazy,\n  Suspense,\n  useRef,\n  useEffect,\n  useS"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/ChatMessagesArea.tsx",
    "chars": 1905,
    "preview": "import { ScrollArea } from \"@/components/ui/scroll-area\";\nimport { NewConvoWelcome } from \"./NewConvoWelcome\";\nimport { "
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/LoadingIndicator.tsx",
    "chars": 872,
    "preview": "import { useUser } from \"@/context/useUser\";\nimport { Loader2 } from \"lucide-react\";\n\nexport function LoadingIndicator()"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/NewConvoWelcome.tsx",
    "chars": 4518,
    "preview": "import { Button } from \"@/components/ui/button\";\nimport { MessageSquare, X } from \"lucide-react\";\nimport notateLogo from"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/ReasoningMessage.tsx",
    "chars": 7077,
    "preview": "import { BrainCircuit, ChevronDown, ChevronUp, Sparkles } from \"lucide-react\";\nimport { useState, CSSProperties, useEffe"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/StreamingMessage.tsx",
    "chars": 5353,
    "preview": "import { Avatar, AvatarImage } from \"@/components/ui/avatar\";\nimport { SyntaxHighlightedCode } from \"@/components/Chat/C"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/StreamingReasoningMessage.tsx",
    "chars": 7500,
    "preview": "import { BrainCircuit, ChevronDown, ChevronUp, Sparkles } from \"lucide-react\";\nimport { useState, CSSProperties, useEffe"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/SyntaxHightlightedCode.tsx",
    "chars": 2245,
    "preview": "import { highlightCode } from \"@/lib/shikiHightlight\";\nimport { useClipboard } from \"use-clipboard-copy\";\nimport { useSt"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/suggestions.tsx",
    "chars": 1534,
    "preview": "export const docSuggestions = [\n  \"What documents have I uploaded?\",\n  \"Summarize the documents in this collection\",\n  \""
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/AddLibrary.tsx",
    "chars": 17399,
    "preview": "import { Label } from \"@/components/ui/label\";\nimport { Textarea } from \"@/components/ui/textarea\";\nimport { Button } fr"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/DataStoreSelect.tsx",
    "chars": 4733,
    "preview": "import { Button } from \"@/components/ui/button\";\nimport {\n  Popover,\n  PopoverContent,\n  PopoverTrigger,\n} from \"@/compo"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/FIlesInCollection.tsx",
    "chars": 4124,
    "preview": "import { Button } from \"@/components/ui/button\";\nimport {\n  File,\n  Library,\n  Globe,\n  Youtube,\n  FileText,\n  ChevronDo"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/Ingest.tsx",
    "chars": 4240,
    "preview": "import { Tabs, TabsList, TabsTrigger, TabsContent } from \"@/components/ui/tabs\";\nimport { Button } from \"@/components/ui"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/IngestProgress.tsx",
    "chars": 2006,
    "preview": "import { Button } from \"@/components/ui/button\";\nimport { Progress } from \"@/components/ui/progress\";\nimport { useUser }"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/IngestTabs/FileIngestTab.tsx",
    "chars": 2551,
    "preview": "import { Button } from \"@/components/ui/button\";\nimport { Upload, Loader2 } from \"lucide-react\";\nimport { useCallback } "
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/IngestTabs/LinkIngestTab.tsx",
    "chars": 8780,
    "preview": "import { Button } from \"@/components/ui/button\";\nimport { Input } from \"@/components/ui/input\";\nimport { Upload, Loader2"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/ingestTypes.tsx",
    "chars": 666,
    "preview": "export const implementedFileTypes = [\n  \".md\",\n  \".html\",\n  \".json\",\n  \".py\",\n  \".txt\",\n  \".csv\",\n  \".pdf\",\n  \".docx\",\n]"
  },
  {
    "path": "Frontend/src/components/CollectionModals/LibraryModal.tsx",
    "chars": 802,
    "preview": "import { useLibrary } from \"@/context/useLibrary\";\nimport IngestModal from \"./CollectionComponents/Ingest\";\nimport AddLi"
  },
  {
    "path": "Frontend/src/components/FileExplorer/FileExplorer.tsx",
    "chars": 10777,
    "preview": "import {\n  FileIcon,\n  Trash2,\n  Edit2,\n  FolderOpen,\n  File,\n  Folder,\n  ChevronLeftCircle,\n} from \"lucide-react\";\nimpo"
  },
  {
    "path": "Frontend/src/components/Header/Header.tsx",
    "chars": 2774,
    "preview": "import { useUser } from \"@/context/useUser\";\nimport { useEffect } from \"react\";\nimport { useSysSettings } from \"@/contex"
  },
  {
    "path": "Frontend/src/components/Header/HeaderComponents/MainWindowControl.tsx",
    "chars": 3606,
    "preview": "import { Minimize, Minus, X, Maximize2Icon } from \"lucide-react\";\nimport { useEffect, useState } from \"react\";\n\nconst Ma"
  },
  {
    "path": "Frontend/src/components/Header/HeaderComponents/Search.tsx",
    "chars": 5064,
    "preview": "import { SearchIcon, Search } from \"lucide-react\";\nimport { useUser } from \"@/context/useUser\";\nimport { useEffect, useS"
  },
  {
    "path": "Frontend/src/components/Header/HeaderComponents/SettingsDialog.tsx",
    "chars": 1772,
    "preview": "import {\n  Dialog,\n  DialogTrigger,\n  DialogContent,\n  DialogHeader,\n  DialogTitle,\n  DialogDescription,\n} from \"@/compo"
  },
  {
    "path": "Frontend/src/components/Header/HeaderComponents/ToolsDialog.tsx",
    "chars": 835,
    "preview": "import { Button } from \"@/components/ui/button\";\nimport ToolboxIcon from \"@/assets/toolbox/toolbox.svg\";\nimport {\n  Dial"
  },
  {
    "path": "Frontend/src/components/Header/HeaderComponents/WinLinuxControls.tsx",
    "chars": 4328,
    "preview": "import {\n  Menubar,\n  MenubarMenu,\n  MenubarTrigger,\n  MenubarContent,\n  MenubarItem,\n  MenubarSeparator,\n} from \"@/comp"
  },
  {
    "path": "Frontend/src/components/History/History.tsx",
    "chars": 6019,
    "preview": "import { useEffect, useState } from \"react\";\nimport { format } from \"date-fns\";\nimport {\n  Scroll,\n  Search,\n  Calendar,"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/ChatSettings.tsx",
    "chars": 25767,
    "preview": "import { Label } from \"@/components/ui/label\";\nimport {\n  Popover,\n  PopoverTrigger,\n  PopoverContent,\n} from \"@/compone"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/DevIntegration.tsx",
    "chars": 7522,
    "preview": "import { Cpu, Trash, Copy, Check, Eye } from \"lucide-react\";\nimport { Button } from \"@/components/ui/button\";\nimport { I"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/AddLocalModel.tsx",
    "chars": 7915,
    "preview": "import { useState, useEffect } from \"react\";\nimport { Button } from \"@/components/ui/button\";\nimport { Input } from \"@/c"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/AddOllamaModel.tsx",
    "chars": 8705,
    "preview": "import { useState, useEffect } from \"react\";\nimport { Button } from \"@/components/ui/button\";\nimport { Input } from \"@/c"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/AzureOpenAI.tsx",
    "chars": 5338,
    "preview": "import { Input } from \"@/components/ui/input\";\nimport { Button } from \"@/components/ui/button\";\nimport { useState } from"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/CustomLLM.tsx",
    "chars": 2633,
    "preview": "import { Input } from \"@/components/ui/input\";\nimport { Button } from \"@/components/ui/button\";\nimport { useState } from"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/External.tsx",
    "chars": 1400,
    "preview": "import { Input } from \"@/components/ui/input\";\nimport { Button } from \"@/components/ui/button\";\nimport { useSysSettings "
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/ExternalOllama.tsx",
    "chars": 9021,
    "preview": "import { Input } from \"@/components/ui/input\";\nimport { Button } from \"@/components/ui/button\";\nimport { useState } from"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/LocalLLM.tsx",
    "chars": 4372,
    "preview": "import {\n  Tooltip,\n  TooltipContent,\n  TooltipProvider,\n  TooltipTrigger,\n} from \"@/components/ui/tooltip\";\nimport { Bu"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/Ollama.tsx",
    "chars": 3280,
    "preview": "import { Button } from \"@/components/ui/button\";\nimport AddOllamaModel from \"./AddOllamaModel\";\nimport { useSysSettings "
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/Openrouter.tsx",
    "chars": 3263,
    "preview": "import { Button } from \"@/components/ui/button\";\nimport { Input } from \"@/components/ui/input\";\nimport { useUser } from "
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMPanel.tsx",
    "chars": 10710,
    "preview": "\"use client\";\n\nimport { useState } from \"react\";\nimport { Button } from \"@/components/ui/button\";\nimport { toast } from "
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/providers/SvgIcon.tsx",
    "chars": 250,
    "preview": "export const SvgIcon = ({ src, alt }: { src: string; alt: string }) => (\n  <div className=\"h-3 w-3 relative\">\n    <img\n "
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/providers/defaultsProviderModels.tsx",
    "chars": 246,
    "preview": "export const defaultProviderModel = {\n  OpenAI: \"gpt-3.5-turbo\",\n  Anthropic: \"claude-3-5-sonnet-20241022\",\n  Gemini: \"g"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/providers/providerIcons.tsx",
    "chars": 1225,
    "preview": "import openai from \"@/assets/providers/openai.svg\";\nimport anthropic from \"@/assets/providers/anthropic.svg\";\nimport gem"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsModal.tsx",
    "chars": 3250,
    "preview": "import { Tabs, TabsContent, TabsList, TabsTrigger } from \"@/components/ui/tabs\";\nimport { MessageSquare, Cpu, Settings2 "
  },
  {
    "path": "Frontend/src/components/Tools/ToolComponents/AddTools.tsx",
    "chars": 147,
    "preview": "export default function AddTools() {\n  return (\n    <div className=\"flex flex-col gap-4 text-center\">\n      <h1>Coming S"
  },
  {
    "path": "Frontend/src/components/Tools/ToolComponents/EnableTools.tsx",
    "chars": 3064,
    "preview": "import { Button } from \"@/components/ui/button\";\nimport { useUser } from \"@/context/useUser\";\nimport { Globe } from \"luc"
  },
  {
    "path": "Frontend/src/components/Tools/Tools.tsx",
    "chars": 2475,
    "preview": "import { Tabs, TabsList, TabsTrigger, TabsContent } from \"@/components/ui/tabs\";\nimport { Plus } from \"lucide-react\";\nim"
  },
  {
    "path": "Frontend/src/components/ui/alert.tsx",
    "chars": 1601,
    "preview": "import * as React from \"react\"\nimport { cva, type VariantProps } from \"class-variance-authority\"\n\nimport { cn } from \"@/"
  },
  {
    "path": "Frontend/src/components/ui/avatar.tsx",
    "chars": 1405,
    "preview": "import * as React from \"react\"\nimport * as AvatarPrimitive from \"@radix-ui/react-avatar\"\n\nimport { cn } from \"@/lib/util"
  },
  {
    "path": "Frontend/src/components/ui/badge.tsx",
    "chars": 1143,
    "preview": "import * as React from \"react\"\nimport { cva, type VariantProps } from \"class-variance-authority\"\n\nimport { cn } from \"@/"
  },
  {
    "path": "Frontend/src/components/ui/button.tsx",
    "chars": 762,
    "preview": "import * as React from \"react\";\nimport { Slot } from \"@radix-ui/react-slot\";\nimport { type VariantProps } from \"class-va"
  },
  {
    "path": "Frontend/src/components/ui/buttonVariants.tsx",
    "chars": 1260,
    "preview": "import { cva } from \"class-variance-authority\";\n\nexport const buttonVariants = cva(\n  \"inline-flex items-center justify-"
  },
  {
    "path": "Frontend/src/components/ui/card.tsx",
    "chars": 1859,
    "preview": "import * as React from \"react\";\n\nimport { cn } from \"@/lib/utils\"\n\nconst Card = React.forwardRef<\n  HTMLDivElement,\n  Re"
  },
  {
    "path": "Frontend/src/components/ui/command.tsx",
    "chars": 5126,
    "preview": "import * as React from \"react\";\nimport { DialogTitle, type DialogProps } from \"@radix-ui/react-dialog\";\nimport { Command"
  },
  {
    "path": "Frontend/src/components/ui/dialog.tsx",
    "chars": 4093,
    "preview": "import * as React from \"react\";\nimport * as DialogPrimitive from \"@radix-ui/react-dialog\";\nimport { X } from \"lucide-rea"
  },
  {
    "path": "Frontend/src/components/ui/form.tsx",
    "chars": 4097,
    "preview": "import * as React from \"react\"\nimport * as LabelPrimitive from \"@radix-ui/react-label\"\nimport { Slot } from \"@radix-ui/r"
  },
  {
    "path": "Frontend/src/components/ui/icons.tsx",
    "chars": 1097,
    "preview": "import {\n  Github,\n  Moon,\n  SunMedium,\n  Twitter,\n  type LucideIcon,\n} from \"lucide-react\";\n\nexport type Icon = LucideI"
  },
  {
    "path": "Frontend/src/components/ui/input.tsx",
    "chars": 772,
    "preview": "import * as React from \"react\"\n\nimport { cn } from \"@/lib/utils\";\n\nconst Input = React.forwardRef<HTMLInputElement, Reac"
  },
  {
    "path": "Frontend/src/components/ui/label.tsx",
    "chars": 710,
    "preview": "import * as React from \"react\"\nimport * as LabelPrimitive from \"@radix-ui/react-label\"\nimport { cva, type VariantProps }"
  },
  {
    "path": "Frontend/src/components/ui/menubar.tsx",
    "chars": 8015,
    "preview": "import * as React from \"react\"\nimport * as MenubarPrimitive from \"@radix-ui/react-menubar\"\nimport { Check, ChevronRight,"
  },
  {
    "path": "Frontend/src/components/ui/popover.tsx",
    "chars": 1295,
    "preview": "import * as React from \"react\"\nimport * as PopoverPrimitive from \"@radix-ui/react-popover\"\n\nimport { cn } from \"@/lib/ut"
  },
  {
    "path": "Frontend/src/components/ui/progress.tsx",
    "chars": 778,
    "preview": "import * as React from \"react\"\nimport * as ProgressPrimitive from \"@radix-ui/react-progress\"\n\nimport { cn } from \"@/lib/"
  },
  {
    "path": "Frontend/src/components/ui/radio-group.tsx",
    "chars": 1410,
    "preview": "import * as React from \"react\"\nimport * as RadioGroupPrimitive from \"@radix-ui/react-radio-group\"\nimport { Circle } from"
  },
  {
    "path": "Frontend/src/components/ui/scroll-area.tsx",
    "chars": 1651,
    "preview": "import * as React from \"react\";\nimport * as ScrollAreaPrimitive from \"@radix-ui/react-scroll-area\";\n\nimport { cn } from "
  },
  {
    "path": "Frontend/src/components/ui/select.tsx",
    "chars": 5627,
    "preview": "import * as React from \"react\"\nimport * as SelectPrimitive from \"@radix-ui/react-select\"\nimport { Check, ChevronDown, Ch"
  },
  {
    "path": "Frontend/src/components/ui/separator.tsx",
    "chars": 756,
    "preview": "import * as React from \"react\"\nimport * as SeparatorPrimitive from \"@radix-ui/react-separator\"\n\nimport { cn } from \"@/li"
  },
  {
    "path": "Frontend/src/components/ui/sheet.tsx",
    "chars": 4254,
    "preview": "import * as React from \"react\"\nimport * as SheetPrimitive from \"@radix-ui/react-dialog\"\nimport { cva, type VariantProps "
  },
  {
    "path": "Frontend/src/components/ui/slider.tsx",
    "chars": 1037,
    "preview": "import * as React from \"react\"\nimport * as SliderPrimitive from \"@radix-ui/react-slider\"\n\nimport { cn } from \"@/lib/util"
  },
  {
    "path": "Frontend/src/components/ui/switch.tsx",
    "chars": 1148,
    "preview": "import * as React from \"react\"\nimport * as SwitchPrimitives from \"@radix-ui/react-switch\"\n\nimport { cn } from \"@/lib/uti"
  },
  {
    "path": "Frontend/src/components/ui/tabs.tsx",
    "chars": 1891,
    "preview": "import * as React from \"react\";\nimport * as TabsPrimitive from \"@radix-ui/react-tabs\";\n\nimport { cn } from \"@/lib/utils\""
  },
  {
    "path": "Frontend/src/components/ui/textarea.tsx",
    "chars": 652,
    "preview": "import * as React from \"react\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst Textarea = React.forwardRef<\n  HTMLTextAreaEleme"
  },
  {
    "path": "Frontend/src/components/ui/toast.tsx",
    "chars": 4850,
    "preview": "import * as React from \"react\";\nimport * as ToastPrimitives from \"@radix-ui/react-toast\";\nimport { cva, type VariantProp"
  },
  {
    "path": "Frontend/src/components/ui/toaster.tsx",
    "chars": 772,
    "preview": "import { useToast } from \"@/hooks/use-toast\"\nimport {\n  Toast,\n  ToastClose,\n  ToastDescription,\n  ToastProvider,\n  Toas"
  },
  {
    "path": "Frontend/src/components/ui/tooltip.tsx",
    "chars": 1206,
    "preview": "import * as React from \"react\"\nimport * as TooltipPrimitive from \"@radix-ui/react-tooltip\"\n\nimport { cn } from \"@/lib/ut"
  },
  {
    "path": "Frontend/src/context/ChatInputContext.tsx",
    "chars": 513,
    "preview": "import { createContext } from \"react\";\n\nexport interface ChatInputContextType {\n  input: string;\n  setInput: React.Dispa"
  },
  {
    "path": "Frontend/src/context/LibraryContext.tsx",
    "chars": 9935,
    "preview": "import React, { createContext, useCallback, useEffect, useState } from \"react\";\nimport { toast } from \"@/hooks/use-toast"
  },
  {
    "path": "Frontend/src/context/SysSettingsContext.tsx",
    "chars": 10026,
    "preview": "import React, { createContext, useRef, useState } from \"react\";\nimport { toast } from \"@/hooks/use-toast\";\nimport { SysS"
  },
  {
    "path": "Frontend/src/context/UserClientProviders.tsx",
    "chars": 519,
    "preview": "import { UserProvider } from \"./UserContext\";\nimport { SysSettingsProvider } from \"./SysSettingsContext\";\nimport { ViewP"
  },
  {
    "path": "Frontend/src/context/UserContext.tsx",
    "chars": 7785,
    "preview": "import React, { createContext, useMemo } from \"react\";\nimport { ChatInputContext, ChatInputContextType } from \"./ChatInp"
  },
  {
    "path": "Frontend/src/context/ViewContext.tsx",
    "chars": 527,
    "preview": "import React, { createContext, useState } from \"react\";\nimport { UserViewContextType } from \"@/types/contextTypes/UserVi"
  },
  {
    "path": "Frontend/src/context/useChatInput.tsx",
    "chars": 304,
    "preview": "import { useContext } from \"react\";\nimport { ChatInputContext } from \"./ChatInputContext\";\n\nexport const useChatInput = "
  },
  {
    "path": "Frontend/src/context/useLibrary.tsx",
    "chars": 297,
    "preview": "import { useContext } from \"react\";\nimport { LibraryContext } from \"./LibraryContext\";\n\nexport const useLibrary = () => "
  },
  {
    "path": "Frontend/src/context/useSysSettings.tsx",
    "chars": 321,
    "preview": "import { useContext } from \"react\";\nimport { SysSettingsContext } from \"./SysSettingsContext\";\n\nexport const useSysSetti"
  },
  {
    "path": "Frontend/src/context/useUser.tsx",
    "chars": 279,
    "preview": "import { useContext } from \"react\";\nimport { UserContext } from \"./UserContext\";\n\nexport const useUser = () => {\n  const"
  },
  {
    "path": "Frontend/src/context/useView.tsx",
    "chars": 279,
    "preview": "import { useContext } from \"react\";\nimport { ViewContext } from \"./ViewContext\";\n\nexport const useView = () => {\n  const"
  },
  {
    "path": "Frontend/src/data/models.ts",
    "chars": 418,
    "preview": "import React from \"react\";\n\nexport const fetchEmbeddingModels = async (\n  setEmbeddingModels: React.Dispatch<React.SetSt"
  },
  {
    "path": "Frontend/src/data/sysSpecs.ts",
    "chars": 659,
    "preview": "export type SystemSpecs = {\n  cpu: string;\n  vram: string;\n  GPU_Manufacturer?: string;\n};\n\nexport const fetchSystemSpec"
  },
  {
    "path": "Frontend/src/electron/authentication/devApi.ts",
    "chars": 1188,
    "preview": "import jwt from \"jsonwebtoken\";\nimport fs from \"fs\";\nimport path from \"path\";\nimport { app } from \"electron\";\nimport cry"
  },
  {
    "path": "Frontend/src/electron/authentication/secret.ts",
    "chars": 450,
    "preview": "import crypto from 'crypto';\n\nlet jwtSecret: string | null = null;\n\nexport function generateSecret(): string {\n    // Ge"
  },
  {
    "path": "Frontend/src/electron/authentication/token.ts",
    "chars": 216,
    "preview": "import jwt from \"jsonwebtoken\";\nimport { getSecret } from \"./secret.js\";\n\nexport async function getToken({ userId }: { u"
  },
  {
    "path": "Frontend/src/electron/crawl/cancelWebcrawl.ts",
    "chars": 487,
    "preview": "import { getToken } from \"../authentication/token.js\";\n\nexport async function cancelWebcrawl(payload: {\n  userId: number"
  },
  {
    "path": "Frontend/src/electron/crawl/webcrawl.ts",
    "chars": 4087,
    "preview": "import { getToken } from \"../authentication/token.js\";\nimport db from \"../db.js\";\nimport { BrowserWindow } from \"electro"
  },
  {
    "path": "Frontend/src/electron/db.ts",
    "chars": 36814,
    "preview": "import { fileURLToPath } from \"url\";\nimport path from \"path\";\nimport fs from \"fs\";\nimport { app } from \"electron\";\nimpor"
  },
  {
    "path": "Frontend/src/electron/embedding/cancelEmbed.ts",
    "chars": 481,
    "preview": "import { getToken } from \"../authentication/token.js\";\n\nexport async function cancelEmbed(payload: {\n  userId: number;\n}"
  },
  {
    "path": "Frontend/src/electron/embedding/vectorstoreQuery.ts",
    "chars": 1388,
    "preview": "import { getToken } from \"../authentication/token.js\";\nimport db from \"../db.js\";\n\nexport async function vectorstoreQuer"
  },
  {
    "path": "Frontend/src/electron/handlers/azureHandlers.ts",
    "chars": 1142,
    "preview": "import { ipcMainDatabaseHandle } from \"../util.js\";\nimport db from \"../db.js\";\n\nexport async function setupAzureOpenAI()"
  },
  {
    "path": "Frontend/src/electron/handlers/chatHandlers.ts",
    "chars": 1557,
    "preview": "import { ipcMain } from \"electron\";\nimport { chatRequest } from \"../llms/llms.js\";\nimport { keyValidation } from \"../llm"
  },
  {
    "path": "Frontend/src/electron/handlers/closeEventHandler.ts",
    "chars": 390,
    "preview": "import { app, BrowserWindow } from \"electron\";\n\nexport function handleCloseEvents(mainWindow: BrowserWindow) {\n  let wil"
  },
  {
    "path": "Frontend/src/electron/handlers/collectionHandlers.ts",
    "chars": 4858,
    "preview": "import { vectorstoreQuery } from \"../embedding/vectorstoreQuery.js\";\nimport { websiteFetch } from \"../storage/websiteFet"
  },
  {
    "path": "Frontend/src/electron/handlers/customApiHandlers.ts",
    "chars": 1038,
    "preview": "import { ipcMainDatabaseHandle } from \"../util.js\";\nimport db from \"../db.js\";\n\nexport async function setupCustomApiHand"
  },
  {
    "path": "Frontend/src/electron/handlers/dbHandlers.ts",
    "chars": 10615,
    "preview": "import db from \"../db.js\";\nimport { ipcMainHandle, ipcMainDatabaseHandle } from \"../util.js\";\nimport { getDevApiKey } fr"
  },
  {
    "path": "Frontend/src/electron/handlers/fileHandlers.ts",
    "chars": 1037,
    "preview": "import { ipcMainDatabaseHandle } from \"../util.js\";\nimport { openCollectionFolderFromFileExplorer } from \"../storage/ope"
  },
  {
    "path": "Frontend/src/electron/handlers/handlers.test.ts",
    "chars": 1623,
    "preview": "import { test, expect, vi, Mock } from \"vitest\";\nimport { ipcMain } from \"electron\";\n\n// Mock electron IPC\nvi.mock(\"elec"
  },
  {
    "path": "Frontend/src/electron/handlers/ipcHandlers.ts",
    "chars": 1190,
    "preview": "import { BrowserWindow, ipcMain, dialog } from \"electron\";\nimport { ipcMainHandle, ipcMainOn, isDev } from \"../util.js\";"
  },
  {
    "path": "Frontend/src/electron/handlers/localModelHandlers.ts",
    "chars": 11533,
    "preview": "import { ipcMainDatabaseHandle } from \"../util.js\";\n\nimport { getDirModels } from \"../localLLMs/getDirModels.js\";\nimport"
  },
  {
    "path": "Frontend/src/electron/handlers/menuHandlers.ts",
    "chars": 1396,
    "preview": "import { ipcMain, app, BrowserWindow } from \"electron\";\n\nexport function setupMenuHandlers(mainWindow: BrowserWindow) {\n"
  },
  {
    "path": "Frontend/src/electron/handlers/ollamaHandlers.ts",
    "chars": 2192,
    "preview": "import { ipcMainHandle } from \"../util.js\";\nimport { fetchOllamaModels } from \"../ollama/fetchLocalModels.js\";\nimport { "
  },
  {
    "path": "Frontend/src/electron/handlers/openRouterHandlers.ts",
    "chars": 1306,
    "preview": "import { ipcMainDatabaseHandle } from \"../util.js\";\nimport db from \"../db.js\";\nimport { OpenRouterProviderAPIKeyCheck } "
  },
  {
    "path": "Frontend/src/electron/handlers/voiceHandlers.ts",
    "chars": 2236,
    "preview": "import { ipcMainHandle } from \"../util.js\";\nimport * as fs from \"fs\";\nimport * as path from \"path\";\nimport { app } from "
  },
  {
    "path": "Frontend/src/electron/helpers/spawnAsync.ts",
    "chars": 1074,
    "preview": "import { spawn } from \"child_process\";\nimport log from \"electron-log\";\nimport { updateLoadingStatus } from \"../loadingWi"
  },
  {
    "path": "Frontend/src/electron/llms/agentLayer/anthropicAgent.ts",
    "chars": 3562,
    "preview": "import { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { sendMessageChunk } from \"../llm"
  },
  {
    "path": "Frontend/src/electron/llms/agentLayer/geminiAgent.ts",
    "chars": 3675,
    "preview": "import { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { sendMessageChunk } from \"../llm"
  },
  {
    "path": "Frontend/src/electron/llms/agentLayer/ollamaAgent.ts",
    "chars": 3334,
    "preview": "import { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { sendMessageChunk } from \"../llm"
  },
  {
    "path": "Frontend/src/electron/llms/agentLayer/openAiAgent.ts",
    "chars": 2867,
    "preview": "import { OpenAI } from \"openai\";\nimport { webSearch } from \"./tools/websearch.js\";\nimport { ChatCompletionMessageParam }"
  },
  {
    "path": "Frontend/src/electron/llms/agentLayer/tools/websearch.ts",
    "chars": 2570,
    "preview": "import { chromium } from \"playwright\";\n\nexport async function webSearch(payload: { url: string }) {\n  const browser = aw"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/anthropic.ts",
    "chars": 774,
    "preview": "import Anthropic from \"@anthropic-ai/sdk\";\nimport log from \"electron-log\";\nexport async function AnthropicProviderAPIKey"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/deepseek.ts",
    "chars": 840,
    "preview": "import OpenAI from \"openai\";\nimport log from \"electron-log\";\nexport async function DeepSeekProviderAPIKeyCheck(\n  apiKey"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/gemini.ts",
    "chars": 1007,
    "preview": "import { GoogleGenerativeAI } from \"@google/generative-ai\";\nimport log from \"electron-log\"; \nlet genAI: GoogleGenerative"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/openai.ts",
    "chars": 751,
    "preview": "import OpenAI from \"openai\";\nimport log from \"electron-log\";\nexport async function OpenAIProviderAPIKeyCheck(apiKey: str"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/openrouter.ts",
    "chars": 857,
    "preview": "import OpenAI from \"openai\";\nimport log from \"electron-log\";\nexport async function OpenRouterProviderAPIKeyCheck(\n  apiK"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/xai.ts",
    "chars": 985,
    "preview": "import OpenAI from \"openai\";\nimport log from \"electron-log\";\nlet openai: OpenAI;\n\nasync function initializeXAI(apiKey: s"
  },
  {
    "path": "Frontend/src/electron/llms/chatCompletion.ts",
    "chars": 4224,
    "preview": "import OpenAI from \"openai\";\nimport db from \"../db.js\";\nimport { openAiChainOfThought } from \"./reasoningLayer/openAiCha"
  },
  {
    "path": "Frontend/src/electron/llms/generateTitle.ts",
    "chars": 7206,
    "preview": "import Anthropic from \"@anthropic-ai/sdk\";\nimport db from \"../db.js\";\nimport { ChatCompletionMessageParam } from \"openai"
  },
  {
    "path": "Frontend/src/electron/llms/keyValidation.ts",
    "chars": 1759,
    "preview": "import { OpenAIProviderAPIKeyCheck } from \"./apiCheckProviders/openai.js\";\nimport { AnthropicProviderAPIKeyCheck } from "
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/addAssistantMessage.ts",
    "chars": 629,
    "preview": "import db from \"../../db.js\";\n\nexport async function addAssistantMessage(\n  activeUser: User,\n  conversationId: bigint |"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/addUserMessage.ts",
    "chars": 270,
    "preview": "import db from \"../../db.js\";\n\nexport async function addUserMessage(\n  activeUser: User,\n  conversationId: number,\n  mes"
  }
]

// ... and 98 more files (download for full content)

About this extraction

This page contains the full source code of the CNTRLAI/Notate GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 298 files (1002.2 KB), approximately 246.8k tokens, and a symbol index with 573 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!