[
  {
    "path": ".gitignore",
    "content": "# Python cache files\n__pycache__/\n*.py[cod]\n*$py.class\n\n.venv\n\n/Frontend/node_modules\n/Frontend/dist\n\n.env.local\n\ndatabase.sqlite\n\nBackend/venv\nBackend/venvs\nmodels/*\n\n.DS_Store\n\n*.tsbuildinfo\n\nCollections/*\n\nFileCollections\n\n.dev.secret\n\nVectorStores/*\nFrontend/chroma_db/chroma.sqlite3\n\nmonitor_resources.ps1\n\nFrontend/models/*\nBackend/models/*\ntest_curl.txt"
  },
  {
    "path": "Backend/.gitignore",
    "content": "venv\ntestData\n"
  },
  {
    "path": "Backend/ensure_dependencies.py",
    "content": "import sys\nimport os\nimport subprocess\nimport asyncio\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nimport warnings\nimport logging\n\n# Filter transformers model warnings\nwarnings.filterwarnings('ignore', category=UserWarning)\nos.environ['TRANSFORMERS_NO_ADVISORY_WARNINGS'] = 'true'\n\n# Configure logging to handle progress messages\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef find_python310():\n    python_commands = [\"python3.12\", \"python3\"] if sys.platform != \"win32\" else [\n        \"python3.11\", \"py -3.11\", \"python\"]\n\n    for cmd in python_commands:\n        try:\n            result = subprocess.run(\n                [cmd, \"--version\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n            if sys.platform == \"win32\":\n                if \"Python 3.11\" in result.stdout:\n                    return cmd\n            else:\n                if \"Python 3.12\" in result.stdout:\n                    return cmd\n        except:\n            continue\n    return None\n\n\ndef create_venv(venv_path=None):\n    if venv_path is None:\n        venv_path = os.path.join(os.path.dirname(__file__), 'venv')\n    if not os.path.exists(venv_path):\n        print(\"Creating virtual environment...\")\n        python310 = find_python310()\n        if not python310:\n            if sys.platform == \"win32\":\n                raise RuntimeError(\n                    \"Python 3.11 is required but not found. Please install Python 3.11.\")\n            else:\n                raise RuntimeError(\n                    \"Python 3.12 is required but not found. Please install Python 3.12.\")\n\n        subprocess.check_call([python310, \"-m\", \"venv\", venv_path])\n        print(f\"Created virtual environment with {python310}\")\n    return venv_path\n\n\ndef get_venv_python(venv_path):\n    if sys.platform == \"win32\":\n        return os.path.join(venv_path, \"Scripts\", \"python.exe\")\n    return os.path.join(venv_path, \"bin\", \"python\")\n\n\ndef install_package(python_path, package):\n    try:\n        subprocess.check_call(\n            [python_path, '-m', 'pip', 'install', '--no-deps',\n                '--upgrade-strategy', 'only-if-needed', package],\n            stdout=subprocess.DEVNULL,\n            stderr=subprocess.DEVNULL\n        )\n        return package, None\n    except subprocess.CalledProcessError as e:\n        return package, str(e)\n\n\ndef get_installed_packages(python_path):\n    result = subprocess.run(\n        [python_path, '-m', 'pip', 'list', '--format=freeze'],\n        stdout=subprocess.PIPE,\n        stderr=subprocess.PIPE,\n        text=True\n    )\n    return {line.split('==')[0].lower(): line.split('==')[1] for line in result.stdout.splitlines()}\n\n\nasync def async_init_store():\n    try:\n        # Suppress model initialization warnings\n        import transformers\n        from src.vectorstorage.init_store import init_store\n        transformers.logging.set_verbosity_error()\n        logging.getLogger(\n            \"transformers.modeling_utils\").setLevel(logging.ERROR)\n\n        # Configure huggingface_hub logging\n        hf_logger = logging.getLogger(\"huggingface_hub\")\n        hf_logger.setLevel(logging.INFO)\n        sys.stdout.write(\n            \"Downloading initial embedding model (HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5) ...|85\\n\")\n        sys.stdout.flush()\n\n        # Redirect stderr to capture progress messages\n        with open(os.devnull, 'w') as devnull:\n            old_stderr = sys.stderr\n            sys.stderr = devnull\n            try:\n                model_path = await init_store()\n                sys.stdout.write(\n                    f\"Model downloaded successfully to {model_path}|95\\n\")\n            finally:\n                sys.stderr = old_stderr\n\n        sys.stdout.flush()\n    except Exception as e:\n        sys.stdout.write(f\"Error downloading model: {str(e)}|85\\n\")\n        sys.stdout.flush()\n        raise e\n\n\ndef get_package_version(python_path, package_name):\n    try:\n        result = subprocess.run(\n            [python_path, '-m', 'pip', 'show', package_name],\n            capture_output=True,\n            text=True\n        )\n        for line in result.stdout.split('\\n'):\n            if line.startswith('Version: '):\n                version = line.split('Version: ')[1].strip()\n                # Handle CUDA variants of PyTorch\n                if package_name == 'torch' and '+cu' in version:\n                    # Strip CUDA suffix for version comparison\n                    version = version.split('+')[0]\n                return version\n    except:\n        return None\n    return None\n\n\ndef install_requirements(custom_venv_path=None):\n    try:\n        venv_path = create_venv(custom_venv_path)\n        python_path = get_venv_python(venv_path)\n\n        # Install core dependencies first\n\n        requirements_path = os.path.join(\n            os.path.dirname(__file__), 'requirements.txt')\n\n        # Handle remaining requirements\n        with open(requirements_path, 'r') as f:\n            requirements = [\n                line.strip() for line in f\n                if line.strip()\n                and not line.startswith('#')\n            ]\n\n        total_deps = len(requirements)\n        sys.stdout.write(f\"Total packages to process: {total_deps}|50\\n\")\n        sys.stdout.flush()\n\n        installed_packages = get_installed_packages(python_path)\n\n        to_install = []\n        for req in requirements:\n            pkg_name = req.split('==')[0] if '==' in req else req\n            if pkg_name.lower() not in installed_packages:\n                to_install.append(req)\n\n        completed_deps = total_deps - len(to_install)\n        progress = 50 + (completed_deps / total_deps) * \\\n            30  # Scale from 50 to 80\n        sys.stdout.write(f\"Checked installed packages|{progress:.1f}\\n\")\n        sys.stdout.flush()\n\n        with ThreadPoolExecutor(max_workers=5) as executor:\n            future_to_pkg = {executor.submit(\n                install_package, python_path, req): req for req in to_install}\n            for future in as_completed(future_to_pkg):\n                pkg = future_to_pkg[future]\n                pkg_name = pkg.split('==')[0] if '==' in pkg else pkg\n                result, error = future.result()\n                completed_deps += 1\n                progress = 50 + (completed_deps / total_deps) * \\\n                    30  # Scale from 50 to 80\n\n                if error:\n                    sys.stdout.write(\n                        f\"Error installing {pkg_name}: {error}|{progress:.1f}\\n\")\n                else:\n                    sys.stdout.write(f\"Installed {pkg_name}|{progress:.1f}\\n\")\n                sys.stdout.flush()\n\n        # Now we can safely import init_store after all dependencies are installed\n        sys.stdout.write(\n            \"All dependencies installed, initializing model store...|85\\n\")\n        sys.stdout.flush()\n\n        # Initialize the store to download the model\n        asyncio.run(async_init_store())\n\n        sys.stdout.write(\n            \"Dependencies installed and model initialized successfully!|99\\n\")\n        sys.stdout.flush()\n\n    except Exception as e:\n        sys.stdout.write(f\"Error installing dependencies: {str(e)}|0\\n\")\n        sys.stdout.flush()\n        sys.exit(1)\n\n\nif __name__ == \"__main__\":\n    custom_venv_path = sys.argv[1] if len(sys.argv) > 1 else None\n    install_requirements(custom_venv_path)\n"
  },
  {
    "path": "Backend/main.py",
    "content": "import logging\nfrom src.authentication.api_key_authorization import api_key_auth\nfrom src.authentication.token import verify_token, verify_token_or_api_key\nfrom src.data.database.checkAPIKey import check_api_key\nfrom src.data.dataFetch.youtube import youtube_transcript\nfrom src.endpoint.deleteStore import delete_vectorstore_collection\nfrom src.endpoint.models import EmbeddingRequest, QueryRequest, ChatCompletionRequest, VectorStoreQueryRequest, DeleteCollectionRequest, YoutubeTranscriptRequest, WebCrawlRequest, ModelLoadRequest\nfrom src.endpoint.embed import embed\nfrom src.endpoint.vectorQuery import query_vectorstore\nfrom src.endpoint.devApiCall import rag_call, llm_call, vector_call\nfrom src.endpoint.transcribe import transcribe_audio\nfrom src.endpoint.webcrawl import webcrawl\nfrom src.models.manager import model_manager\nfrom fastapi import FastAPI, Depends, File, UploadFile, Request, BackgroundTasks\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import StreamingResponse, JSONResponse\nimport asyncio\nimport os\nimport signal\nimport sys\nimport psutil\nimport threading\nimport uvicorn\nimport json\nfrom src.endpoint.api import chat_completion_stream\n\napp = FastAPI()\nembedding_task = None\nembedding_event = None\ncrawl_task = None\ncrawl_event = None\n\norigins = [\"http://localhost\", \"http://127.0.0.1\"]\n\napp.add_middleware(\n    CORSMiddleware,\n    allow_origins=origins,\n    allow_credentials=True,\n    allow_methods=[\"*\"],\n    allow_headers=[\"*\"],\n    max_age=3600,  # Cache preflight requests for 1 hour\n    expose_headers=[\"*\"]\n)\n\n# Configure FastAPI app settings for long-running requests\n\n\n@app.middleware(\"http\")\nasync def timeout_middleware(request: Request, call_next):\n    try:\n        # Set a long timeout for the request\n        # 1 hour timeout\n        response = await asyncio.wait_for(call_next(request), timeout=3600)\n        return response\n    except asyncio.TimeoutError:\n        return JSONResponse(\n            status_code=504,\n            content={\"detail\": \"Request timeout\"}\n        )\n\nlogger = logging.getLogger(__name__)\n\n\n@app.post(\"/chat/completions\")\nasync def chat_completion(request: ChatCompletionRequest, user_id: str = Depends(verify_token_or_api_key)) -> StreamingResponse:\n    \"\"\"Stream chat completion from the model\"\"\"\n    print(\"Chat completion request received\")\n    print(user_id, request)\n    info = model_manager.get_model_info()\n    print(info)\n    if request.model != info[\"model_name\"]:\n        model_load_request = ModelLoadRequest(\n            model_name=request.model)\n        model, tokenizer = model_manager.load_model(model_load_request)\n        print(\"Model mismatch\")\n        return {\"status\": \"error\", \"message\": \"Model mismatch\"}\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    print(\"Authorized\")\n    print(request)\n    return StreamingResponse(\n        chat_completion_stream(request),\n        media_type=\"text/event-stream\"\n    )\n\n\n@app.get(\"/model-info\")\nasync def get_model_info(user_id: str = Depends(verify_token_or_api_key)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    \"\"\"Get information about the currently loaded model\"\"\"\n    return JSONResponse(content=model_manager.get_model_info())\n\n\n@app.post(\"/load-model\")\nasync def load_model_endpoint(request: ModelLoadRequest, user_id: str = Depends(verify_token_or_api_key)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    \"\"\"Load a model with the specified configuration\"\"\"\n    print(\"Loading model\")\n    print(request)\n    model_type = request.model_type or \"auto\"\n    if model_type != \"auto\":\n        is_compatible, message = model_manager.check_platform_compatibility(\n            model_type)\n        logger.info(f\"is_compatible: {is_compatible}, message: {message}\")\n        # Return early if platform is not compatible\n        if not is_compatible:\n            response_data = model_manager._make_json_serializable({\n                \"status\": \"error\",\n                \"message\": f\"Cannot load model: {message}\",\n                \"model_info\": model_manager.get_model_info()\n            })\n            return JSONResponse(content=response_data)\n    try:\n        model, tokenizer = model_manager.load_model(request)\n        response_data = model_manager._make_json_serializable({\n            \"status\": \"success\",\n            \"message\": f\"Successfully loaded model {request.model_name}\",\n            \"model_info\": model_manager.get_model_info()\n        })\n        print(response_data)\n        logger.info(response_data)\n        return JSONResponse(content=response_data)\n    except Exception as e:\n        response_data = model_manager._make_json_serializable({\n            \"status\": \"error\",\n            \"message\": str(e),\n            \"model_info\": model_manager.get_model_info()\n        })\n        return JSONResponse(status_code=500, content=response_data)\n\n\n@app.post(\"/unload-model\")\nasync def unload_model_endpoint(user_id: str = Depends(verify_token_or_api_key)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    \"\"\"Unload the currently loaded model\"\"\"\n\n    try:\n        model_manager.clear_model()\n        return JSONResponse(content={\n            \"status\": \"success\",\n            \"message\": \"Model unloaded successfully\",\n            \"model_info\": model_manager.get_model_info()\n        })\n    except Exception as e:\n        return JSONResponse(\n            status_code=500,\n            content={\n                \"status\": \"error\",\n                \"message\": str(e),\n                \"model_info\": model_manager.get_model_info()\n            }\n        )\n\n\n@app.post(\"/webcrawl\")\nasync def webcrawl_endpoint(data: WebCrawlRequest, user_id: str = Depends(verify_token)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n\n    global crawl_task, crawl_event\n    if crawl_task is not None:\n        return {\"status\": \"error\", \"message\": \"A crawl process is already running\"}\n\n    crawl_event = asyncio.Event()\n\n    async def event_generator():\n        global crawl_task, crawl_event\n        try:\n            for result in webcrawl(data, crawl_event):\n                if crawl_event.is_set():\n                    yield f\"data: {{'type': 'cancelled', 'message': 'Crawl process cancelled'}}\\n\\n\"\n                    break\n                yield f\"{result}\\n\\n\"\n                await asyncio.sleep(0.1)\n        except Exception as e:\n            error_data = {\n                \"status\": \"error\",\n                \"data\": {\n                    \"message\": str(e)\n                }\n            }\n            yield f\"data: {json.dumps(error_data)}\\n\\n\"\n        finally:\n            crawl_task = None\n            crawl_event = None\n\n    response = StreamingResponse(\n        event_generator(), media_type=\"text/event-stream\")\n    crawl_task = asyncio.create_task(event_generator().__anext__())\n    return response\n\n\n@app.post(\"/transcribe\")\nasync def transcribe_audio_endpoint(audio_file: UploadFile = File(...), model_name: str = \"base\", user_id: str = Depends(verify_token)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    return await transcribe_audio(audio_file, model_name)\n\n\n@app.post(\"/embed\")\nasync def add_embedding(data: EmbeddingRequest, user_id: str = Depends(verify_token)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    print(\"Metadata:\", data.metadata)\n    global embedding_task, embedding_event\n\n    if embedding_task is not None:\n        return {\"status\": \"error\", \"message\": \"An embedding process is already running\"}\n\n    embedding_event = asyncio.Event()\n\n    async def event_generator():\n        global embedding_task, embedding_event\n        try:\n            async for result in embed(data):\n                if embedding_event.is_set():\n                    yield f\"data: {{'type': 'cancelled', 'message': 'Embedding process cancelled'}}\\n\\n\"\n                    break\n\n                if result[\"status\"] == \"progress\":\n                    progress_data = result[\"data\"]\n                    yield f\"data: {{'type': 'progress', 'chunk': {progress_data['chunk']}, 'totalChunks': {progress_data['total_chunks']}, 'percent_complete': '{progress_data['percent_complete']}', 'est_remaining_time': '{progress_data['est_remaining_time']}'}}\\n\\n\"\n                else:\n                    yield f\"data: {{'type': '{result['status']}', 'message': '{result['message']}'}}\\n\\n\"\n                await asyncio.sleep(0.1)  # Prevent overwhelming the connection\n        except Exception as e:\n            logger.error(f\"Error in embedding process: {str(e)}\")\n            yield f\"data: {{'type': 'error', 'message': '{str(e)}'}}\\n\\n\"\n        finally:\n            embedding_task = None\n            embedding_event = None\n            logger.info(\"Embedding task cleanup completed\")\n\n    response = StreamingResponse(\n        event_generator(),\n        media_type=\"text/event-stream\"\n    )\n\n    # Set response headers for better connection handling\n    response.headers[\"Cache-Control\"] = \"no-cache\"\n    response.headers[\"Connection\"] = \"keep-alive\"\n    response.headers[\"X-Accel-Buffering\"] = \"no\"\n    response.headers[\"Transfer-Encoding\"] = \"chunked\"\n\n    embedding_task = asyncio.create_task(event_generator().__anext__())\n    return response\n\n\n@app.post(\"/youtube-ingest\")\nasync def youtube_ingest(data: YoutubeTranscriptRequest, user_id: str = Depends(verify_token)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n\n    async def event_generator():\n        try:\n            for result in youtube_transcript(data):\n                if result[\"status\"] == \"progress\":\n                    progress_data = result[\"data\"]\n                    yield f\"data: {{'type': 'progress', 'chunk': {progress_data['chunk']}, 'totalChunks': {progress_data['total_chunks']}, 'percent_complete': '{progress_data['percent_complete']}', 'message': '{progress_data['message']}'}}\\n\\n\"\n                else:\n                    yield f\"data: {{'type': '{result['status']}', 'message': '{result['message']}'}}\\n\\n\"\n                await asyncio.sleep(0.1)\n        except Exception as e:\n            yield f\"data: {{'type': 'error', 'message': '{str(e)}'}}\\n\\n\"\n\n    return StreamingResponse(event_generator(), media_type=\"text/event-stream\")\n\n\n@app.post(\"/cancel-embed\")\nasync def cancel_embedding(user_id: str = Depends(verify_token)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    global embedding_task, embedding_event\n    if embedding_event:\n        embedding_event.set()\n        return {\"status\": \"success\", \"message\": \"Embedding process cancelled\"}\n    return {\"status\": \"error\", \"message\": \"No embedding process running\"}\n\n\n@app.post(\"/restart-server\")\nasync def restart_server(user_id: str = Depends(verify_token)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n\n    def restart():\n        pid = os.getpid()\n        parent = psutil.Process(pid)\n        # Kill all child processes\n        for child in parent.children(recursive=True):\n            child.kill()\n        # Kill the current process\n        os.kill(pid, signal.SIGTERM)\n        # Start a new instances\n        python = sys.executable\n        os.execl(python, python, *sys.argv)\n\n    threading.Thread(target=restart).start()\n    return {\"status\": \"success\", \"message\": \"Server restart initiated\"}\n\n\n@app.post(\"/vector-query\")\nasync def vector_query(data: VectorStoreQueryRequest, user_id: str = Depends(verify_token)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    try:\n        result = query_vectorstore(data, data.is_local)\n        return result\n    except Exception as e:\n        print(f\"Error querying vectorstore: {str(e)}\")\n        return {\"status\": \"error\", \"message\": str(e)}\n\n\n@app.post(\"/delete-collection\")\nasync def delete_collection(data: DeleteCollectionRequest, user_id: str = Depends(verify_token)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    print(\"Authorized\")\n    return delete_vectorstore_collection(data)\n\n\n@app.post(\"/api/vector\")\nasync def api_vector(query_request: QueryRequest, user_id: str = Depends(api_key_auth)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    \"\"\" check to see if the userId has API key in SQLite \"\"\"\n    if not query_request.collection_name:\n        print(\"No collection name provided\")\n        return {\"status\": \"error\", \"message\": \"No collection name provided\"}\n    if check_api_key(int(user_id)) == False:\n        print(\"Unauthorized\")\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    print(\"Authorized\")\n    return vector_call(query_request, user_id)\n\n\n@app.post(\"/api/llm\")\nasync def api_llm(query_request: ChatCompletionRequest, user_id: str = Depends(api_key_auth)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    \"\"\" check to see if the userId has API key in SQLite \"\"\"\n    if not query_request.model:\n        print(\"No model provided\")\n        return {\"status\": \"error\", \"message\": \"No model provided\"}\n    if check_api_key(int(user_id)) == False:\n        print(\"Unauthorized\")\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    print(\"Authorized\")\n    return await llm_call(query_request, user_id)\n\n\n@app.post(\"/api/rag\")\nasync def api_rag(query_request: QueryRequest, user_id: str = Depends(api_key_auth)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    \"\"\" check to see if the userId has API key in SQLite \"\"\"\n    if not query_request.model:\n        print(\"No model provided\")\n        return {\"status\": \"error\", \"message\": \"No model provided\"}\n    if not query_request.collection_name:\n        print(\"No collection name provided\")\n        return {\"status\": \"error\", \"message\": \"No collection name provided\"}\n    if check_api_key(int(user_id)) == False:\n        print(\"Unauthorized\")\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    print(\"Authorized\")\n    return await rag_call(query_request, user_id)\n\n\n@app.post(\"/cancel-crawl\")\nasync def cancel_crawl(user_id: str = Depends(verify_token)):\n    if user_id is None:\n        return {\"status\": \"error\", \"message\": \"Unauthorized\"}\n    global crawl_task, crawl_event\n    if crawl_event:\n        crawl_event.set()\n        return {\"status\": \"success\", \"message\": \"Crawl process cancelled\"}\n    return {\"status\": \"error\", \"message\": \"No crawl process running\"}\n\n\nif __name__ == \"__main__\":\n    print(\"Starting server...\")\n    uvicorn.run(\n        app,\n        host=\"127.0.0.1\",\n        port=47372,\n        timeout_keep_alive=3600,\n        timeout_graceful_shutdown=300,\n        limit_concurrency=10,\n        backlog=2048\n    )\n"
  },
  {
    "path": "Backend/requirements.txt",
    "content": "annotated-types==0.7.0\nanyio==4.6.2.post1\nasgiref==3.8.1\nbackoff==2.2.1\nbcrypt==4.2.1\nbuild==1.2.2.post1\ncachetools==5.5.0\ncertifi==2024.8.30\ncharset-normalizer==3.4.0\nchromadb==0.6.3\nchroma-hnswlib==0.7.6\nclick==8.1.7\ncoloredlogs==15.0.1\nDeprecated==1.2.15\ndnspython==2.7.0\ndurationpy==0.9\necdsa==0.19.0\nemail_validator==2.2.0\nexceptiongroup==1.2.2\nfastapi==0.115.6\nfastapi-cli==0.0.6\nfilelock==3.16.1\nflatbuffers==24.3.25\nfsspec==2024.10.0\ngoogle-auth==2.36.0\ngoogleapis-common-protos==1.66.0\ngrpcio==1.68.1\nh11==0.14.0\nhttpcore==1.0.7\nhttptools==0.6.4\nhttpx==0.28.0\nhuggingface-hub==0.26.5\nhumanfriendly==10.0\nidna==3.10\nimportlib_metadata==8.5.0\nimportlib_resources==6.4.5\niniconfig==2.0.0\nJinja2==3.1.5\nkubernetes==31.0.0\nmarkdown-it-py==3.0.0\nMarkupSafe==3.0.2\nmdurl==0.1.2\nmmh3==5.0.1\nmonotonic==1.6\nmpmath==1.3.0\nnumba==0.58.1\noauthlib==3.2.2\nonnxruntime==1.20.1\nopentelemetry-api==1.28.2\nopentelemetry-exporter-otlp-proto-common==1.28.2\nopentelemetry-exporter-otlp-proto-grpc==1.28.2\nopentelemetry-instrumentation==0.49b2\nopentelemetry-instrumentation-asgi==0.49b2\nopentelemetry-instrumentation-fastapi==0.49b2\nopentelemetry-proto==1.28.2\nopentelemetry-sdk==1.28.2\nopentelemetry-semantic-conventions==0.49b2\nopentelemetry-util-http==0.49b2\norjson==3.10.12\noverrides==7.7.0\npackaging==24.2\npasslib==1.7.4\npluggy==1.5.0\nposthog==3.7.4\nprotobuf==5.29.1\npyasn1==0.6.1\npyasn1_modules==0.4.1\npydantic>=2.9.0,<3.0.0\npydantic_core==2.14.6\nPygments==2.18.0\nPyPika==0.48.9\npyproject_hooks==1.2.0\npytest==8.3.4\npython-dateutil==2.9.0.post0\npython-dotenv==1.0.1\nPyJWT==2.10.1\npython-multipart==0.0.19\nPyYAML==6.0.2\nrequests==2.32.3\nrequests-oauthlib==2.0.0\nrich==13.9.4\nrich-toolkit==0.11.3\nrsa==4.9\nshellingham==1.5.4\nsix==1.17.0\nsniffio==1.3.1\nstarlette==0.41.3\nsympy==1.13.3\ntenacity==9.0.0\ntokenizers==0.21.0\ntomli==2.2.1\ntqdm==4.67.1\ntyper==0.15.1\nurllib3==2.2.3\nuvicorn==0.32.1\nwatchfiles==1.0.0\nwebsocket-client==1.8.0\nwebsockets==14.1\nwrapt==1.17.0\nzipp==3.21.0\npypdf[full]==5.1.0\npython-docx==0.8.11\nbeautifulsoup4==4.12.2\nmarkdown==3.5.1\npython-pptx==0.6.21\nopenpyxl==3.1.2\nlxml==5.3.0\npandas==2.2.3\npytz==2024.2\npillow==11.0.0\nsoupsieve==2.6\nopenai==1.58.1\ndistro==1.9.0\nnest_asyncio==1.5.6\nhypercorn==0.14.3\ntoml==0.10.2\nh2==4.1.0\nhyperframe==6.0.1\nhpack==4.0.0\nhttp3==0.6.7\nh11==0.14.0\nhttpcore==1.0.7\nsentence-transformers==3.3.1\nthreadpoolctl==3.5.0\njoblib==1.4.2\nscipy==1.15.1\nhttpx==0.28.0\npriority==2.0.0\nwsproto==1.2.0\njiter==0.8.2\nlangchain==0.3.16\nlangchain-text-splitters==0.3.4\nlangchain_core==0.3.28\nlangsmith==0.2.3\nrequests_toolbelt==1.0.0\njsonpatch==1.33\njsonpointer==3.0.0\nlangchain_community==0.3.16\ntiktoken==0.8.0\nregex==2024.11.6\nlangchain-openai==0.2.14\nlangchain-chroma==0.2.1\npsutil==6.1.1\nollama==0.4.4\ndocx2txt==0.8\nyt-dlp==2024.12.23\nwebvtt-py==0.4.6\nlangchain-ollama==0.2.2\nopenai-whisper==20240930\naccelerate>=0.20.3\nbitsandbytes>=0.41.1\nsafetensors>=0.4.0\nllvmlite==0.43.0\neinops==0.8.0\noptimum==1.23.3\ndatasets==3.2.0\npyarrow==18.1.0\nmultiprocess==0.70.17\ndill>=0.3.6\naiohttp==3.11.11\nmultidict==6.1.0\nattrs>=23.1.0\nyarl==1.18.3\npropcache==0.2.1\nasync-timeout==5.0.1\naiohappyeyeballs==2.4.4\naiosignal==1.3.2\nfrozenlist==1.5.0\nxxhash==3.5.0\ndiskcache==5.6.3\nhqq==0.2.2\ntermcolor==2.5.0\nlangchain-huggingface==0.1.2\npypdf==5.2.0"
  },
  {
    "path": "Backend/src/authentication/api_key_authorization.py",
    "content": "from fastapi import Depends\nfrom fastapi.security import OAuth2PasswordBearer\nfrom typing import Optional\nimport jwt\n\nimport logging\nimport os\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\noauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"token\")\nSECRET_KEY = os.environ.get(\"SECRET_KEY\")\n\nif not SECRET_KEY:\n    raise RuntimeError(\"Could not get JWT secret for API key authorization\")\n\n\nasync def get_optional_token(token: Optional[str] = Depends(oauth2_scheme)):\n    return token\n\n\nasync def api_key_auth(token: Optional[str] = Depends(get_optional_token)):\n    if token is None:\n        return None\n    try:\n        payload = jwt.decode(token, SECRET_KEY, algorithms=[\"HS256\"])\n        user_id: str = payload.get(\"userId\")\n        logger.info(f\"User ID: {user_id}\")\n        if user_id is None:\n            return None\n        return user_id\n    except jwt.exceptions.InvalidTokenError:\n        logger.error(\"Invalid token\")\n        return None\n"
  },
  {
    "path": "Backend/src/authentication/token.py",
    "content": "from fastapi import Depends, Request\nfrom fastapi.security import OAuth2PasswordBearer\nfrom typing import Optional\nimport os\nimport jwt\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\noauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"token\")\n\n# Get secret from environment variable\nSECRET_KEY = os.environ.get(\"JWT_SECRET\")\nif not SECRET_KEY:\n    raise RuntimeError(\"JWT_SECRET environment variable is not set\")\n\n\nasync def get_optional_token(token: Optional[str] = Depends(oauth2_scheme)):\n    return token\n\n\nasync def verify_token(token: Optional[str] = Depends(get_optional_token)):\n    if token is None:\n        return None\n    try:\n        payload = jwt.decode(token, SECRET_KEY, algorithms=[\"HS256\"])\n        print(f\"Payload: {payload}\")\n        user_id: str = payload.get(\"userId\")\n        logger.info(f\"User ID: {user_id}\")\n        if user_id is None:\n            return None\n        return user_id\n\n    except jwt.exceptions.InvalidTokenError:\n        logger.error(\"Invalid token\")\n        return None\n\n\nasync def optional_auth(request: Request):\n    if \"Authorization\" in request.headers:\n        token = request.headers[\"Authorization\"].split(\"Bearer \")[1]\n        try:\n            payload = jwt.decode(token, SECRET_KEY, algorithms=[\"HS256\"])\n            return payload.get(\"userId\")\n        except jwt.exceptions.InvalidTokenError:\n            return None\n    return None\n\n\nasync def verify_token_or_api_key(token: Optional[str] = Depends(get_optional_token)):\n    \"\"\"Verify token using normal auth, falling back to API key auth if that fails\"\"\"\n    # Try normal token verification first\n    user_id = await verify_token(token)\n    if user_id:\n        return user_id\n        \n    # Fall back to API key verification\n    from src.authentication.api_key_authorization import api_key_auth\n    return await api_key_auth(token)\n"
  },
  {
    "path": "Backend/src/data/dataFetch/webcrawler.py",
    "content": "import os\nimport json\nimport logging\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin, urlparse\nimport time\nimport threading\nimport concurrent.futures\nfrom concurrent.futures import ThreadPoolExecutor\nfrom queue import Queue, Empty\nimport threading\n\n\nclass WebCrawler:\n    def __init__(self, base_url, user_id, user_name, collection_id, collection_name, max_workers, cancel_event=None):\n        self.base_url = base_url\n        self.output_dir = self._get_collection_path(\n            user_id, user_name, collection_id, collection_name)\n        self.visited_urls = set()\n        self.failed_urls = set()\n        self.delay = 0  # Reduced delay since we're rate limiting with max_workers\n        self.max_workers = 35\n        self.url_queue = Queue()\n        self.url_lock = threading.Lock()\n        self.progress_bar = None\n        self.total_urls = 0\n        self.current_urls = 0\n        self.update_callback = None\n        self.cancel_event = cancel_event\n\n        # Setup logging\n        logging.basicConfig(\n            level=logging.INFO,\n            format='%(asctime)s - %(levelname)s - %(message)s'\n        )\n\n        # Create output directory if it doesn't exist\n        os.makedirs(self.output_dir, exist_ok=True)\n\n    def _get_collection_path(self, user_id, user_name, collection_id, collection_name):\n        \"\"\"Generate the collection path matching the frontend structure\"\"\"\n        app_data_path = os.path.abspath(os.path.join(\n            os.path.dirname(os.path.dirname(os.path.dirname(__file__))), \"..\"\n        ))\n        return os.path.join(\n            app_data_path,\n            \"..\",\n            \"FileCollections\",\n            f\"{user_id}_{user_name}\",\n            f\"{collection_id}_{collection_name}\"\n        )\n\n    def _print_progress(self):\n        \"\"\"Print progress as JSON\"\"\"\n        if self.total_urls > 0:\n            percent = (self.current_urls / self.total_urls) * 100\n            progress_data = {\n                \"status\": \"progress\",\n                \"data\": {\n                    \"message\": f\"Part 1 of 2: Scraping page {self.current_urls} out of {self.total_urls} from {self.base_url}\",\n                    \"chunk\": self.current_urls,\n                    \"total_chunks\": self.total_urls,\n                    \"percent_complete\": f\"{percent:.1f}%\"\n                }\n            }\n            json_str = json.dumps(progress_data)\n            print(f\"data: {json_str}\")\n            return progress_data\n\n    def is_valid_url(self, url):\n        \"\"\"Check if URL belongs to the same domain and is a documentation page\"\"\"\n        # Remove fragment identifier (#) and anything that follows\n        url = url.split('#')[0]\n        if not url:  # Skip empty URLs after fragment removal\n            return False\n\n        # First check if URL starts with base_url\n        if not url.startswith(self.base_url):\n            logging.debug(f\"Filtered URL (not starting with base URL): {url}\")\n            return False\n\n        # Remove trailing slashes for consistency\n        url = url.rstrip('/')\n\n        # Skip obviously invalid URLs\n        invalid_patterns = [\n            '.pdf', '.zip', '.png', '.jpg',  # File extensions\n            'github.com', 'twitter.com',      # External sites\n            '/api/', '/examples/',            # Common non-doc paths\n            '?', 'mailto:', 'javascript:'     # Special URLs\n        ]\n\n        if any(pattern in url for pattern in invalid_patterns):\n            logging.debug(f\"Filtered URL (invalid pattern): {url}\")\n            return False\n\n        # Ensure not a resource file\n        return not url.endswith(('js', 'css', 'json'))\n\n    def save_page(self, url, html_content):\n        \"\"\"Save the HTML content to a file\"\"\"\n        try:\n            # Create base_url_docs directory\n            parsed_base_url = urlparse(self.base_url)\n            base_url_dir = parsed_base_url.netloc.replace(\".\", \"_\") + \"_docs\"\n            base_dir = os.path.join(self.output_dir, base_url_dir)\n            os.makedirs(base_dir, exist_ok=True)\n\n            # Create a file path based on the URL structure\n            parsed_url = urlparse(url)\n            path_parts = parsed_url.path.strip('/').split('/')\n\n            # Create subdirectories if needed\n            current_dir = base_dir\n            for part in path_parts[:-1]:\n                current_dir = os.path.join(current_dir, part)\n                os.makedirs(current_dir, exist_ok=True)\n\n            # Save the file\n            filename = path_parts[-1] if path_parts else 'index'\n            filepath = os.path.join(current_dir, f\"{filename}.html\")\n\n            with open(filepath, 'w', encoding='utf-8') as f:\n                f.write(html_content)\n\n            return True\n\n        except Exception as e:\n            logging.error(f\"Error saving {url}: {str(e)}\")\n            return False\n\n    def get_links(self, soup, current_url):\n        \"\"\"Extract valid documentation links from the page\"\"\"\n        links = set()\n        for a in soup.find_all('a', href=True):\n            # Get the full URL\n            url = urljoin(current_url, a['href'])\n\n            # Remove fragment identifier (#) and anything that follows\n            url = url.split('#')[0]\n\n            # Skip empty URLs after fragment removal\n            if not url:\n                continue\n\n            # Remove trailing slashes for consistency\n            url = url.rstrip('/')\n\n            # Only add if it's valid and not already visited\n            if self.is_valid_url(url) and url not in self.visited_urls:\n                links.add(url)\n\n        return links\n\n    def scrape_page(self, url):\n        \"\"\"Scrape a single page and return its content and links\"\"\"\n        try:\n            response = requests.get(url, timeout=10)\n            response.raise_for_status()\n            html_content = response.text\n\n            # Create BeautifulSoup object with the response text\n            soup = BeautifulSoup(html_content, 'html.parser')\n\n            # Remove unwanted elements before getting links\n            for element in soup.find_all(['header', 'footer', 'nav', 'script', 'style', 'meta']):\n                if element is not None:\n                    element.decompose()\n\n            # Get links from the cleaned soup\n            links = self.get_links(soup, url)\n\n            return soup, links\n\n        except Exception as e:\n            error_data = {\n                \"status\": \"error\",\n                \"data\": {\n                    \"message\": str(e)\n                }\n            }\n            print(f\"data: {json.dumps(error_data)}\")\n            logging.error(f\"Error scraping {url}: {str(e)}\")\n            self.failed_urls.add(url)\n            return None, set()\n\n    def scrape(self):\n        \"\"\"Main scraping method using thread pool\"\"\"\n        # Initialize with start URL\n        self.url_queue.put(self.base_url)\n        self.total_urls = 1  # Initialize with 1 for the base URL\n        self.current_urls = 0\n\n        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:\n            active_tasks = set()\n\n            while True:\n                try:\n                    # Check for cancellation\n                    if self.cancel_event and self.cancel_event.is_set():\n                        break\n\n                    # Get next URL with timeout\n                    try:\n                        current_url = self.url_queue.get(timeout=5)\n                    except Empty:\n                        # If no active tasks and queue is empty, we're done\n                        if not active_tasks:\n                            break\n                        continue\n\n                    if current_url in self.visited_urls:\n                        continue\n\n                    with self.url_lock:\n                        if current_url in self.visited_urls:\n                            continue\n                        self.visited_urls.add(current_url)\n                        yield self._print_progress()\n\n                    # Submit the scraping task to thread pool\n                    future = executor.submit(self._process_url, current_url)\n                    active_tasks.add(future)\n                    future.add_done_callback(lambda f: active_tasks.remove(f))\n                    future.add_done_callback(self._update_progress)\n\n                except Exception as e:\n                    error_data = {\n                        \"status\": \"error\",\n                        \"data\": {\n                            \"message\": str(e)\n                        }\n                    }\n                    print(f\"data: {json.dumps(error_data)}\")\n                    logging.error(f\"Error in scrape loop: {str(e)}\")\n                    continue\n\n            # Wait for remaining tasks to complete\n            for future in concurrent.futures.as_completed(list(active_tasks)):\n                try:\n                    future.result()\n                except Exception as e:\n                    error_data = {\n                        \"status\": \"error\",\n                        \"data\": {\n                            \"message\": str(e)\n                        }\n                    }\n                    print(f\"data: {json.dumps(error_data)}\")\n                    logging.error(f\"Error in remaining tasks: {str(e)}\")\n\n    def _update_progress(self, future):\n        \"\"\"Callback to update progress\"\"\"\n        try:\n            with self.url_lock:\n                self.current_urls += 1\n                progress_data = self._print_progress()\n                if progress_data:\n                    json_str = json.dumps(progress_data)\n                    print(f\"data: {json_str}\")\n        except Exception as e:\n            error_data = {\n                \"status\": \"error\",\n                \"data\": {\n                    \"message\": str(e)\n                }\n            }\n            print(f\"data: {json.dumps(error_data)}\")\n\n    def _process_url(self, url):\n        \"\"\"Process a single URL - called by thread pool\"\"\"\n        try:\n            # Check for cancellation\n            if self.cancel_event and self.cancel_event.is_set():\n                return\n\n            # Respectful delay\n            time.sleep(self.delay)\n\n            # Scrape the page\n            soup, new_links = self.scrape_page(url)\n            if soup is None:\n                return\n\n            # Save the page\n            if self.save_page(url, str(soup)):\n                # Add new links to queue\n                with self.url_lock:\n                    for link in new_links:\n                        if link not in self.visited_urls and link not in self.url_queue.queue:\n                            self.url_queue.put(link)\n                            self.total_urls += 1\n        except Exception as e:\n            error_data = {\n                \"status\": \"error\",\n                \"data\": {\n                    \"message\": str(e)\n                }\n            }\n            print(f\"data: {json.dumps(error_data)}\")\n            logging.error(f\"Error processing URL {url}: {str(e)}\")\n\n    def save_progress(self):\n        \"\"\"Save progress information\"\"\"\n        with open('scraping_progress.txt', 'w') as f:\n            f.write(f\"Visited URLs: {len(self.visited_urls)}\\n\")\n            f.write(f\"Failed URLs: {len(self.failed_urls)}\\n\")\n            f.write(\"\\nFailed URLs:\\n\")\n            for url in self.failed_urls:\n                f.write(f\"{url}\\n\")\n"
  },
  {
    "path": "Backend/src/data/dataFetch/youtube.py",
    "content": "import os\nfrom src.endpoint.models import YoutubeTranscriptRequest\nfrom src.vectorstorage.vectorstore import get_vectorstore\nfrom src.vectorstorage.helpers.sanitizeCollectionName import sanitize_collection_name\n\nfrom langchain_core.documents import Document\nimport yt_dlp\nimport logging\nimport requests\nimport webvtt\nfrom io import StringIO\nfrom typing import Generator\nimport json\n\nlogging.basicConfig(\n    level=logging.INFO,\n    format='%(asctime)s - %(levelname)s - %(message)s'\n)\nlogger = logging.getLogger(__name__)\n\n\ndef _get_collection_path(user_id, user_name, collection_id, collection_name):\n    \"\"\"Generate the collection path matching the frontend structure\"\"\"\n    app_data_path = os.path.abspath(os.path.join(\n        os.path.dirname(os.path.dirname(os.path.dirname(__file__))), \"..\"\n    ))\n    return os.path.join(\n        app_data_path,\n        \"..\",\n        \"FileCollections\",\n        f\"{user_id}_{user_name}\",\n        f\"{collection_id}_{collection_name}\"\n    )\n\n\ndef youtube_transcript(request: YoutubeTranscriptRequest) -> Generator[dict, None, None]:\n    \"\"\"\n    Fetch video transcript and metadata using yt-dlp\n    \"\"\"\n    logger.info(f\"Starting transcript fetch for URL: {request.url}\")\n    yield {\"status\": \"progress\", \"data\": {\"message\": f\"Starting transcript fetch for URL: {request.url}\", \"chunk\": 1, \"total_chunks\": 4, \"percent_complete\": \"0%\"}}\n\n    ydl_opts = {\n        'writesubtitles': True,\n        'writeautomaticsub': True,\n        'subtitlesformat': 'vtt',\n        'skip_download': True,\n        'quiet': True,  # Suppress yt-dlp's own output\n        'no_warnings': True  # Suppress warnings\n    }\n\n    try:\n        with yt_dlp.YoutubeDL(ydl_opts) as ydl:\n            # Video info extraction (0-5%)\n            yield {\"status\": \"progress\", \"data\": {\"message\": \"Extracting video information...\", \"chunk\": 1, \"total_chunks\": 4, \"percent_complete\": \"5%\"}}\n            info = ydl.extract_info(request.url, download=False)\n\n            video_info = f\"Found video: '{info.get('title', 'Unknown')}' by {info.get('uploader', 'Unknown')}, duration: {info.get('duration', 'Unknown')} seconds\"\n            logger.info(video_info)\n            yield {\"status\": \"progress\", \"data\": {\"message\": video_info, \"chunk\": 1, \"total_chunks\": 4, \"percent_complete\": \"10%\"}}\n\n            # Get automatic captions if available\n            subtitles = None\n            if 'automatic_captions' in info and 'en' in info['automatic_captions']:\n                logger.info(\"Using automatic captions\")\n                yield {\"status\": \"progress\", \"data\": {\"message\": \"Found automatic captions, processing...\", \"chunk\": 0, \"total_chunks\": 0, \"percent_complete\": \"0%\"}}\n                subtitles = info['automatic_captions']['en']\n            # Fall back to manual subtitles if available\n            elif 'subtitles' in info and 'en' in info['subtitles']:\n                logger.info(\"Using manual subtitles\")\n                yield {\"status\": \"progress\", \"data\": {\"message\": \"Found manual subtitles, processing...\", \"chunk\": 0, \"total_chunks\": 0, \"percent_complete\": \"0%\"}}\n                subtitles = info['subtitles']['en']\n\n            if not subtitles:\n                error_msg = \"No English subtitles or automatic captions available\"\n                logger.error(error_msg)\n                raise Exception(error_msg)\n\n            # Download the VTT format subtitles\n            subtitle_url = None\n            for fmt in subtitles:\n                if fmt.get('ext') == 'vtt':\n                    subtitle_url = fmt['url']\n                    break\n\n            if not subtitle_url:\n                error_msg = \"No VTT format subtitles found\"\n                logger.error(error_msg)\n                raise Exception(error_msg)\n\n            # Update progress for subtitle download (10-15%)\n            yield {\"status\": \"progress\", \"data\": {\"message\": \"Downloading subtitles...\", \"chunk\": 2, \"total_chunks\": 4, \"percent_complete\": \"15%\"}}\n\n            # Download the VTT content\n            response = requests.get(subtitle_url)\n            if response.status_code != 200:\n                error_msg = \"Failed to download subtitles\"\n                logger.error(error_msg)\n                raise Exception(error_msg)\n\n            # Parse the VTT content\n            vtt_content = response.text\n            vtt_file = StringIO(vtt_content)\n            vtt_captions = webvtt.read_buffer(vtt_file)\n\n            # Start of transcript processing (15-35%)\n            yield {\"status\": \"progress\", \"data\": {\"message\": \"Processing subtitles...\", \"chunk\": 2, \"total_chunks\": 4, \"percent_complete\": \"15%\"}}\n\n            def clean_caption(text):\n                # Remove common VTT artifacts and clean text\n                text = ' '.join(text.split())  # Remove extra whitespace\n                # Remove text within brackets (often contains sound effects or speaker labels)\n                if text.startswith('[') and text.endswith(']'):\n                    return \"\"\n                # Remove common YouTube caption artifacts\n                text = text.replace('>>>', '').replace('>>', '')\n                # Remove any remaining brackets and their contents\n                while '[' in text and ']' in text:\n                    start = text.find('[')\n                    end = text.find(']') + 1\n                    text = text[:start] + text[end:]\n                return text.strip()\n\n            def is_substantial_difference(text1, text2):\n                # More aggressive deduplication\n                if not text1 or not text2:\n                    return True\n\n                # Convert to lowercase and split into words\n                words1 = text1.lower().split()\n                words2 = text2.lower().split()\n\n                # If either text is too short, consider them different\n                if len(words1) < 3 or len(words2) < 3:\n                    return True\n\n                # Create word sequences for comparison\n                seq1 = ' '.join(words1)\n                seq2 = ' '.join(words2)\n\n                # Check if one is contained within the other\n                if seq1 in seq2 or seq2 in seq1:\n                    return False\n\n                # Calculate word overlap\n                words1_set = set(words1)\n                words2_set = set(words2)\n                overlap = len(words1_set.intersection(words2_set))\n                max_words = max(len(words1_set), len(words2_set))\n\n                # If more than 50% overlap, consider it a duplicate\n                return (overlap / max_words) < 0.5 if max_words > 0 else True\n\n            # Create documents from transcript chunks\n            documents = []\n            total_captions = len(vtt_captions)\n            processed_captions = 0\n            chunk_size = 60  # Increased chunk size to 60 seconds\n            current_chunk = []\n            chunk_start = 0\n            chunk_count = 0\n            last_text = \"\"\n\n            # Process captions with progress updates from 15-35%\n            for caption in vtt_captions:\n                cleaned_text = clean_caption(caption.text)\n                if not cleaned_text:\n                    continue\n\n                start_seconds = _time_to_seconds(caption.start)\n\n                # Only add text if it's substantially different from the last added text\n                if is_substantial_difference(last_text, cleaned_text):\n                    # Don't add if it's just a subset of any recent text in current chunk\n                    if not any(cleaned_text in existing or existing in cleaned_text\n                               for existing in current_chunk[-3:] if current_chunk):\n                        current_chunk.append(cleaned_text)\n                        last_text = cleaned_text\n\n                # Create new chunk every chunk_size seconds or if chunk is getting too long\n                if (start_seconds - chunk_start >= chunk_size and current_chunk) or \\\n                   (len(' '.join(current_chunk)) > 1000):  # Limit chunk size to ~1000 chars\n                    if current_chunk:  # Only create chunk if there's content\n                        chunk_count += 1\n                        doc = Document(\n                            page_content=\" \".join(current_chunk),\n                            metadata={\n                                \"title\": info.get('title', ''),\n                                \"description\": info.get('description', ''),\n                                \"author\": info.get('uploader', ''),\n                                \"source\": request.url,\n                                \"chunk_start\": chunk_start,\n                                \"chunk_end\": start_seconds,\n                                \"chunk_number\": chunk_count\n                            }\n                        )\n                        documents.append(doc)\n                        current_chunk = []\n                        chunk_start = start_seconds\n                        last_text = \"\"\n\n                processed_captions += 1\n                if processed_captions % 100 == 0:  # Update every 100 captions\n                    # Progress from 15% to 35%\n                    percent = 15 + ((processed_captions / total_captions) * 20)\n                    yield {\"status\": \"progress\", \"data\": {\n                        \"message\": f\"Processing transcript: {processed_captions}/{total_captions} captions\",\n                        \"chunk\": 2,\n                        \"total_chunks\": 4,\n                        \"percent_complete\": f\"{percent:.1f}%\"\n                    }}\n\n            # Add final chunk if any remains\n            if current_chunk:\n                chunk_count += 1\n                doc = Document(\n                    page_content=\" \".join(current_chunk),\n                    metadata={\n                        \"title\": info.get('title', ''),\n                        \"description\": info.get('description', ''),\n                        \"author\": info.get('uploader', ''),\n                        \"source\": request.url,\n                        \"chunk_start\": chunk_start,\n                        \"chunk_end\": _time_to_seconds(vtt_captions[-1].end),\n                        \"chunk_number\": chunk_count\n                    }\n                )\n                documents.append(doc)\n\n            # Vectorstore initialization (35-40%)\n            yield {\"status\": \"progress\", \"data\": {\n                \"message\": \"Initializing vector database...\",\n                \"chunk\": 3,\n                \"total_chunks\": 4,\n                \"percent_complete\": \"40%\"\n            }}\n\n            # Store documents in ChromaDB\n            collection_name = sanitize_collection_name(\n                str(request.collection_name))\n            vectordb = get_vectorstore(\n                request.api_key, collection_name, request.is_local, request.local_embedding_model)\n            if not vectordb:\n                raise Exception(\"Failed to initialize vector database\")\n\n            # Add documents in batches with progress updates (40-95%)\n            total_docs = len(documents)\n            docs_processed = 0\n            batch_size = 100\n\n            for i in range(0, len(documents), batch_size):\n                batch = documents[i:i + batch_size]\n                vectordb.add_documents(batch)\n\n                docs_processed += len(batch)\n                percent = 40 + ((docs_processed / total_docs)\n                                * 55)  # Progress from 40% to 95%\n                yield {\"status\": \"progress\", \"data\": {\n                    \"message\": f\"Embedding chunks in vector database: {docs_processed}/{total_docs}\",\n                    \"chunk\": 4,\n                    \"total_chunks\": 4,\n                    \"percent_complete\": f\"{percent:.1f}%\"\n                }}\n\n            # Final completion (95-100%)\n            success_msg = f\"Successfully processed and stored {chunk_count} transcript chunks. Total length: {sum(len(doc.page_content) for doc in documents)} characters\"\n            logger.info(success_msg)\n            yield {\"status\": \"progress\", \"data\": {\"message\": success_msg, \"chunk\": 4, \"total_chunks\": 4, \"percent_complete\": \"100%\"}}\n\n            # Save transcript to file\n            collection_path = _get_collection_path(\n                request.user_id,\n                request.username,\n                request.collection_id,\n                request.collection_name\n            )\n\n            if not os.path.exists(collection_path):\n                os.makedirs(collection_path, exist_ok=True)\n\n            # Create filename using video title and timestamp\n            safe_title = \"\".join(c for c in info.get(\n                'title', 'unknown') if c.isalnum() or c in (' ', '-', '_')).rstrip()\n            folder_name = f\"{safe_title}_youtube\"\n            folder_path = os.path.join(collection_path, folder_name)\n            os.makedirs(folder_path, exist_ok=True)\n\n            # Save metadata\n            metadata = {\n                \"title\": info.get('title', ''),\n                \"uploader\": info.get('uploader', ''),\n                \"duration\": info.get('duration', ''),\n                \"description\": info.get('description', ''),\n                \"url\": request.url\n            }\n            with open(os.path.join(folder_path, \"metadata.json\"), \"w\", encoding=\"utf-8\") as f:\n                json.dump(metadata, f, ensure_ascii=False, indent=2)\n\n            # Save full transcript\n            with open(os.path.join(folder_path, \"transcript.txt\"), \"w\", encoding=\"utf-8\") as f:\n                f.write(f\"Title: {info.get('title', 'Unknown')}\\n\")\n                f.write(f\"Author: {info.get('uploader', 'Unknown')}\\n\")\n                f.write(f\"Duration: {info.get('duration', 'Unknown')} seconds\\n\")\n                f.write(f\"Source URL: {request.url}\\n\")\n                f.write(\"\\n--- Transcript ---\\n\\n\")\n                for doc in documents:\n                    f.write(f\"[{doc.metadata['chunk_start']:.1f}s - {doc.metadata['chunk_end']:.1f}s]\\n\")\n                    f.write(f\"{doc.page_content}\\n\\n\")\n\n            # Save chunked transcripts with timestamps\n            with open(os.path.join(folder_path, \"transcript_chunks.json\"), \"w\", encoding=\"utf-8\") as f:\n                chunks = [{\n                    \"content\": doc.page_content,\n                    \"start_time\": doc.metadata.get(\"chunk_start\", 0),\n                    \"end_time\": doc.metadata.get(\"chunk_end\", 0),\n                    \"chunk_number\": doc.metadata.get(\"chunk_number\", 0)\n                } for doc in documents]\n                json.dump(chunks, f, ensure_ascii=False, indent=2)\n\n            # Log success\n            logger.info(f\"Saved transcript to {folder_path}\")\n\n            return documents\n\n    except Exception as e:\n        error_msg = f\"Error processing YouTube transcript: {str(e)}\"\n        logger.error(error_msg, exc_info=True)\n        raise Exception(error_msg)\n\n\ndef _time_to_seconds(time_str):\n    \"\"\"Convert VTT timestamp to seconds\"\"\"\n    h, m, s = time_str.split(':')\n    return float(h) * 3600 + float(m) * 60 + float(s)\n"
  },
  {
    "path": "Backend/src/data/dataIntake/csvFallbackSplitting.py",
    "content": "from langchain_core.documents import Document\nimport pandas as pd\nimport io\nimport time\nfrom typing import Generator\n\n\ndef split_csv_text(text: str, file_path: str, metadata: dict = None) -> Generator[dict | list, None, None]:\n    \"\"\"Split CSV text into chunks for embedding while preserving row integrity.\"\"\"\n    try:\n        # Convert text back to DataFrame using StringIO\n        yield {\"status\": \"progress\", \"data\": {\"message\": \"Loading CSV data...\", \"chunk\": 1, \"total_chunks\": 4, \"percent_complete\": \"25%\"}}\n        df = pd.read_csv(io.StringIO(text))\n\n        # Get headers\n        headers = df.columns.tolist()\n\n        # Calculate approximate number of rows per chunk (targeting ~2000 characters per chunk)\n        yield {\"status\": \"progress\", \"data\": {\"message\": \"Calculating chunk sizes...\", \"chunk\": 2, \"total_chunks\": 4, \"percent_complete\": \"50%\"}}\n        sample_row = df.iloc[0].to_string(index=False)\n        chars_per_row = len(sample_row)\n        rows_per_chunk = max(1, int(2000 / chars_per_row))\n\n        documents = []\n        total_rows = len(df)\n        start_time = time.time()\n\n        # Process DataFrame in chunks\n        for i in range(0, total_rows, rows_per_chunk):\n            # Calculate progress\n            progress = min(100, int((i / total_rows) * 100))\n            elapsed_time = time.time() - start_time\n            est_remaining_time = \"calculating...\" if i == 0 else f\"{(elapsed_time / (i + 1)) * (total_rows - i):.1f}s\"\n\n            yield {\n                \"status\": \"progress\",\n                \"data\": {\n                    \"message\": f\"Processing rows {i} to {min(i + rows_per_chunk, total_rows)}...\",\n                    \"chunk\": 3,\n                    \"total_chunks\": 4,\n                    \"percent_complete\": f\"{progress}%\",\n                    \"est_remaining_time\": est_remaining_time\n                }\n            }\n\n            chunk_df = df.iloc[i:i + rows_per_chunk]\n\n            # Convert chunk to string more efficiently\n            chunk_text = []\n            chunk_text.append(\",\".join(headers))  # Add headers\n\n            # Convert rows to strings efficiently\n            for _, row in chunk_df.iterrows():\n                chunk_text.append(\",\".join(str(val) for val in row))\n\n            chunk_content = \"\\n\".join(chunk_text)\n\n            # Create document with metadata\n            doc_metadata = {\"source\": file_path, \"chunk_start\": i}\n            if metadata:\n                doc_metadata.update(metadata)\n\n            documents.append(\n                Document(page_content=chunk_content, metadata=doc_metadata))\n\n        yield {\"status\": \"progress\", \"data\": {\"message\": \"Finalizing chunks...\", \"chunk\": 4, \"total_chunks\": 4, \"percent_complete\": \"100%\"}}\n        print(f\"Split CSV into {len(documents)} chunks\")\n        return documents\n\n    except Exception as e:\n        print(f\"Error splitting CSV text: {str(e)}\")\n        yield {\"status\": \"error\", \"message\": f\"Error splitting CSV text: {str(e)}\"}\n        return []\n"
  },
  {
    "path": "Backend/src/data/dataIntake/fileTypes/loadX.py",
    "content": "import pandas as pd\nimport json\nimport markdown\nfrom bs4 import BeautifulSoup\nfrom pptx import Presentation\nfrom langchain_community.document_loaders import Docx2txtLoader\nfrom langchain_community.document_loaders.csv_loader import CSVLoader\nfrom pypdf import PdfReader\nfrom langchain_core.documents import Document\nimport logging\nimport os\nimport asyncio\n\n\nasync def load_pdf(file_path):\n    try:\n        logging.info(f\"Starting to load PDF: {file_path}\")\n\n        # Verify file exists and is readable\n        if not os.path.exists(file_path):\n            raise FileNotFoundError(f\"PDF file not found: {file_path}\")\n\n        def read_pdf():\n            reader = PdfReader(file_path)\n            pages = []\n            for i, page in enumerate(reader.pages):\n                text = page.extract_text()\n                if text.strip():  # Only include pages with content\n                    pages.append(\n                        Document(\n                            page_content=text,\n                            metadata={\"source\": file_path, \"page\": i}\n                        )\n                    )\n            return pages\n\n        # Run PDF reading in a thread pool to avoid blocking\n        pages = await asyncio.get_event_loop().run_in_executor(None, read_pdf)\n\n        if not pages:\n            logging.error(f\"No valid pages found in {file_path}\")\n            return None\n\n        logging.info(\n            f\"Successfully loaded {len(pages)} pages from {file_path}\")\n        logging.info(f\"First page metadata: {pages[0].metadata}\")\n        logging.info(\n            f\"First page content sample: {pages[0].page_content[:200]}...\")\n\n        return pages\n    except Exception as e:\n        logging.error(\n            f\"Error loading PDF {file_path}: {str(e)}\", exc_info=True)\n        return None\n\n\nasync def load_py(file):\n    try:\n        with open(file, 'r', encoding='utf-8') as f:\n            content = f.read()\n            return content.strip()\n    except Exception as e:\n        print(f\"Error loading PY: {str(e)}\")\n        return None\n\n\nasync def load_docx(file):\n    try:\n        loader = Docx2txtLoader(file)\n        data = loader.load()\n        print(data)\n        return data[0].page_content\n    except Exception as e:\n        print(f\"Error loading DOCX: {str(e)}\")\n        return None\n\n\nasync def load_txt(file):\n    try:\n        with open(file, 'r', encoding='utf-8') as f:\n            return f.read().strip()\n    except Exception as e:\n        print(f\"Error loading TXT: {str(e)}\")\n        return None\n\n\nasync def load_md(file):\n    try:\n        with open(file, 'r', encoding='utf-8') as f:\n            md_text = f.read()\n            html = markdown.markdown(md_text)\n            soup = BeautifulSoup(html, 'html.parser')\n            return soup.get_text().strip()\n    except Exception as e:\n        print(f\"Error loading MD: {str(e)}\")\n        return None\n\n\nasync def load_html(file_path: str) -> str:\n    \"\"\"Load and process HTML file content\"\"\"\n    try:\n        with open(file_path, 'r', encoding='utf-8') as f:\n            content = f.read()\n\n        # Parse HTML with BeautifulSoup\n        soup = BeautifulSoup(content, 'html.parser')\n\n        # Remove script and style elements\n        for script in soup([\"script\", \"style\"]):\n            script.decompose()\n\n        # Get text content\n        text = soup.get_text()\n\n        # Break into lines and remove leading/trailing space\n        lines = (line.strip() for line in text.splitlines())\n\n        # Break multi-headlines into a line each\n        chunks = (phrase.strip()\n                  for line in lines for phrase in line.split(\"  \"))\n\n        # Drop blank lines\n        text = ' '.join(chunk for chunk in chunks if chunk)\n\n        return text\n    except Exception as e:\n        logging.error(f\"Error loading HTML file {file_path}: {str(e)}\")\n        return None\n\n\nasync def load_csv(file):\n    try:\n        loader = CSVLoader(file)\n        data = loader.load()\n        return data\n    except Exception as e:\n        print(f\"Error loading CSV: {str(e)}\")\n        return None\n\n\nasync def load_json(file):\n    try:\n        with open(file, 'r', encoding='utf-8') as f:\n            data = json.load(f)\n            return json.dumps(data, indent=2)\n    except Exception as e:\n        print(f\"Error loading JSON: {str(e)}\")\n        return None\n\n\ndef load_pptx(file):\n    try:\n        prs = Presentation(file)\n        text = []\n        for slide in prs.slides:\n            for shape in slide.shapes:\n                if hasattr(shape, \"text\"):\n                    text.append(shape.text)\n        return \"\\n\".join(text).strip()\n    except Exception as e:\n        print(f\"Error loading PPTX: {str(e)}\")\n        return None\n\n\ndef load_xlsx(file):\n    try:\n        df = pd.read_excel(file)\n        return df.to_string().strip()\n    except Exception as e:\n        print(f\"Error loading XLSX: {str(e)}\")\n        return None\n\n\nasync def load_docx(file):\n    try:\n        # Run the synchronous loader in a thread pool to avoid blocking\n        def load_docx_sync():\n            loader = Docx2txtLoader(file)\n            data = loader.load()\n            return data[0].page_content if data else None\n\n        content = await asyncio.get_event_loop().run_in_executor(None, load_docx_sync)\n        if content:\n            logging.info(f\"Successfully loaded DOCX file: {file}\")\n            return content\n        return None\n    except Exception as e:\n        logging.error(f\"Error loading DOCX: {str(e)}\")\n        return None\n"
  },
  {
    "path": "Backend/src/data/dataIntake/getHtmlFiles.py",
    "content": "import os\n\n\ndef get_html_files(directory):\n    \"\"\"Recursively get all HTML files in a directory and its subdirectories\"\"\"\n    html_files = []\n    for root, _, files in os.walk(directory):\n        for file in files:\n            if file.endswith('.html'):\n                file_path = os.path.join(root, file)\n                html_files.append(file_path)\n    return html_files\n"
  },
  {
    "path": "Backend/src/data/dataIntake/loadFile.py",
    "content": "import os\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nfrom src.data.dataIntake.fileTypes.loadX import (\n    load_csv,\n    load_docx,\n    load_html,\n    load_json,\n    load_md,\n    load_pptx,\n    load_txt,\n    load_xlsx,\n    load_py,\n    load_pdf,\n)\n\nfile_handlers = {\n    \"pdf\": load_pdf,\n    \"docx\": load_docx,\n    \"txt\": load_txt,\n    \"md\": load_md,\n    \"html\": load_html,\n    \"csv\": load_csv,\n    \"json\": load_json,\n    \"pptx\": load_pptx,\n    \"xlsx\": load_xlsx,\n    \"py\": load_py,\n}\n\nasync def load_document(file: str):\n    try:\n        file_type = file.split(\".\")[-1].lower()\n        logger.info(f\"Loading file of type: {file_type}\")\n        \n        # Get file size\n        file_size = os.path.getsize(file)\n        logger.info(f\"File size: {file_size / (1024*1024):.2f}MB\")\n\n        handler = file_handlers.get(file_type)\n        print(handler)\n        if not handler:\n            logger.error(f\"Unsupported file type: {file_type}\")\n            return None\n\n        # Special handling for large PDFs\n        if file_type == \"pdf\" and file_size > 25 * 1024 * 1024:  # 25MB\n            logger.info(\"Large PDF detected - using chunked processing\")\n            return await handler(file, chunk_size=50)  # Process 50 pages at a time\n        \n        return await handler(file)\n\n    except Exception as e:\n        logger.error(f\"Error loading file: {str(e)}\")\n        return None\n"
  },
  {
    "path": "Backend/src/data/dataIntake/textSplitting.py",
    "content": "from langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain_core.documents import Document\nimport logging\n\n\ndef split_text(text: str, file_path: str, metadata: dict = None) -> list:\n    \"\"\"Split text into chunks for embedding.\"\"\"\n    try:\n        # Handle None or empty text\n        if not text:\n            logging.error(f\"Empty or None text received from {file_path}\")\n            return []\n\n        # Pre-process text to remove excessive whitespace\n        text = \" \".join(text.split())\n\n        text_splitter = RecursiveCharacterTextSplitter(\n            chunk_size=500,\n            chunk_overlap=20,\n            length_function=len,\n            is_separator_regex=False,\n            # Prioritize sentence boundaries\n            separators=[\". \", \"? \", \"! \", \"\\n\\n\", \"\\n\", \" \", \"\"]\n        )\n\n        # Directly split text and create documents in one go\n        texts = text_splitter.split_text(text)\n\n        # Create metadata if none provided\n        if metadata is None:\n            metadata = {}\n        metadata[\"source\"] = file_path\n\n        docs = [Document(page_content=t.strip(), metadata=metadata.copy())\n                for t in texts]\n\n        if not docs:\n            logging.warning(\n                f\"No documents created after splitting text from {file_path}\")\n        else:\n            logging.info(\n                f\"Successfully split text into {len(docs)} chunks from {file_path}\")\n\n        return docs\n    except Exception as e:\n        logging.error(f\"Error splitting text from {file_path}: {str(e)}\")\n        return []\n"
  },
  {
    "path": "Backend/src/data/database/checkAPIKey.py",
    "content": "from src.data.database.db import db\n\n\ndef check_api_key(user_id: int):\n    \"\"\" check to see if the userId has API key in SQLite \"\"\"\n    print(\"Checking API key for user:\", user_id)\n    try:\n        conn = db()\n        if not conn:\n            print(\"Failed to connect to database\")\n            return False\n\n        cursor = conn.cursor()\n\n        # Check for valid, non-expired API key\n        cursor.execute(\"\"\"\n            SELECT * FROM dev_api_keys \n            WHERE user_id = ? \n        \"\"\", (user_id,))\n\n        api_key = cursor.fetchone()\n        conn.close()\n        print(f\"API key count for user {user_id}: {api_key}\")\n        return api_key is not None\n\n    except Exception as e:\n        print(f\"Error checking API key: {e}\")\n        return False\n"
  },
  {
    "path": "Backend/src/data/database/db.py",
    "content": "import sqlite3\nimport os\nimport pathlib\nimport platform\n\nIS_DEV = os.environ.get(\"IS_DEV\") == \"1\"\n\n\ndef get_user_data_path():\n    system = platform.system()\n    home = os.path.expanduser(\"~\")\n\n    if system == \"Darwin\":  # macOS\n        base_path = os.path.join(\n            home, \"Library\", \"Application Support\", \"notate\")\n    elif system == \"Windows\":\n        base_path = os.path.join(os.getenv(\"APPDATA\"), \"notate\")\n    else:  # Linux and others\n        base_path = os.path.join(home, \".config\", \"notate\")\n\n    # Add development subdirectory if in dev mode\n    if IS_DEV:\n        return os.path.join(base_path, \"development\")\n    return base_path\n\n\ndef db():\n    if IS_DEV:\n        try:\n            # Get the absolute path to the project root\n            root_dir = pathlib.Path(__file__).parent.parent.parent.parent\n            db_path = os.path.join(root_dir, \"..\", 'Database', 'database.sqlite')\n            # Ensure the Database directory exists\n            os.makedirs(os.path.dirname(db_path), exist_ok=True)\n            print(f\"Connected to Database at: {db_path}\")\n\n            return sqlite3.connect(db_path)\n\n        except Exception as e:\n            print(f\"Error connecting to database: {e}\")\n            return None\n\n    else:\n        # For production, use the user data directory\n        user_data_path = get_user_data_path()\n        db_dir = os.path.join(user_data_path, \"Database\")\n        db_path = os.path.join(db_dir, \"database.sqlite\")\n\n        # Ensure the Database directory exists\n        os.makedirs(db_dir, exist_ok=True)\n        print(f\"Connected to Database at: {db_path}\")\n\n        return sqlite3.connect(db_path)\n"
  },
  {
    "path": "Backend/src/data/database/getCollectionInfo.py",
    "content": "from src.data.database.db import db\nfrom dataclasses import dataclass\nfrom typing import Optional\n\n\n@dataclass\nclass CollectionSettings:\n    id: int\n    user_id: int\n    name: str\n    description: str\n    is_local: bool\n    local_embedding_model: Optional[str]\n    type: str\n    files: Optional[str]\n    created_at: str\n\n\ndef get_collection_settings(user_id: str, collection_name: str) -> Optional[CollectionSettings]:\n    \"\"\"\n    Get collection settings for a specific user and collection name\n    Args:\n        user_id (str): The user ID\n        collection_name (str): The name of the collection\n    Returns:\n        CollectionSettings: Collection settings object or None if not found\n    \"\"\"\n    try:\n        conn = db()\n        if not conn:\n            print(\"Failed to connect to database\")\n            return None\n\n        cursor = conn.cursor()\n\n        cursor.execute(\"\"\"\n            SELECT id, user_id, name, description, is_local, local_embedding_model, type, files, created_at \n            FROM collections\n            WHERE name = ? AND user_id = ?\n        \"\"\", (collection_name, user_id))\n\n        row = cursor.fetchone()\n        conn.close()\n\n        if not row:\n            return None\n\n        return CollectionSettings(\n            id=row[0],\n            user_id=row[1],\n            name=row[2],\n            description=row[3],\n            is_local=bool(row[4]),\n            local_embedding_model=row[5],\n            type=row[6],\n            files=row[7],\n            created_at=row[8]\n        )\n\n    except Exception as e:\n        print(f\"Error retrieving collection settings: {e}\")\n        return None\n"
  },
  {
    "path": "Backend/src/data/database/getLLMApiKey.py",
    "content": "from src.data.database.db import db\n\n\ndef get_llm_api_key(user_id, provider):\n    try:\n        conn = db()\n        cursor = conn.cursor()\n        cursor.execute(\n            \"SELECT key FROM api_keys WHERE user_id = ? AND provider = ?\", (user_id, provider))\n        result = cursor.fetchone()\n        conn.close()\n        return result[0] if result else None\n    except Exception as e:\n        print(f\"Error retrieving OpenAI API key: {e}\")\n        return None\n"
  },
  {
    "path": "Backend/src/endpoint/api.py",
    "content": "from typing import AsyncGenerator\nimport json\nfrom src.endpoint.models import ChatCompletionRequest\nfrom transformers import TextIteratorStreamer\nfrom threading import Thread\nimport logging\nfrom src.models.manager import model_manager\nfrom src.models.streamer import TextGenerator, StopOnInterrupt\nimport uuid\nimport time\nimport torch\nimport transformers\n\n\nlogger = logging.getLogger(__name__)\n\n\nasync def chat_completion_stream(request: ChatCompletionRequest) -> AsyncGenerator[str, None]:\n    \"\"\"Stream chat completion from the model\"\"\"\n    try:\n        model = model_manager.current_model\n        if not model:\n            yield f\"data: {json.dumps({'error': 'No model loaded'})}\\n\\n\"\n            return\n        print(request.messages)\n        # Convert messages to prompts\n\n        try:\n            prompt = \"\"  # Initialize prompt variable\n            # Format messages without explicit User/Assistant markers\n            for msg in request.messages:\n                if msg.role == \"system\":\n                    prompt += f\"{msg.content}\\n\"\n                elif msg.role == \"user\":\n                    prompt += f\"Question: {msg.content}\\n\"\n                elif msg.role == \"assistant\":\n                    prompt += f\"Response: {msg.content}\\n\"\n            prompt += \"Response: \"\n\n            logger.info(f\"Generated prompt: {prompt}\")\n        except Exception as e:\n            logger.error(f\"Error formatting prompt: {str(e)}\", exc_info=True)\n            raise\n\n        # Create text generator\n        try:\n            generator = TextGenerator(\n                model, model_manager.current_tokenizer, model_manager.device)\n\n            # For llama.cpp models, we don't need to pre-encode the input\n            if model_manager.model_type != \"llama.cpp\":\n                # Only encode for transformers models\n                input_ids = model_manager.current_tokenizer.encode(\n                    prompt, return_tensors=\"pt\")\n                attention_mask = torch.ones_like(input_ids)\n                if hasattr(model, \"device\"):\n                    input_ids = input_ids.to(model.device)\n                    attention_mask = attention_mask.to(model.device)\n        except Exception as e:\n            logger.error(\n                f\"Error setting up generator: {str(e)}\", exc_info=True)\n            raise\n\n        if request.stream:\n            try:\n                # Different handling for llama.cpp vs transformers models\n                if model_manager.model_type == \"llama.cpp\":\n                    # Use the TextGenerator's built-in streaming for llama.cpp\n                    stream_iterator = generator.generate(\n                        prompt=prompt,\n                        max_new_tokens=min(request.max_tokens or 2048, 2048),\n                        temperature=request.temperature or 0.7,\n                        top_p=request.top_p or 0.95,\n                        top_k=request.top_k or 40,\n                        repetition_penalty=1.2,\n                        stream=True\n                    )\n                    async for chunk in stream_iterator:\n                        yield chunk\n                    yield \"data: [DONE]\\n\\n\"\n                else:\n                    # Set up generation config for transformers models\n                    gen_config = {\n                        # Cap at 2048 if not specified\n                        \"max_new_tokens\": min(request.max_tokens or 2048, 2048),\n                        \"temperature\": request.temperature or 0.7,\n                        \"top_p\": request.top_p or 0.95,\n                        \"top_k\": request.top_k or 40,  # Slightly lower for more focused sampling\n                        \"repetition_penalty\": 1.2,  # Increased to reduce repetition\n                        \"do_sample\": True,\n                        \"pad_token_id\": model_manager.current_tokenizer.pad_token_id,\n                        \"eos_token_id\": model_manager.current_tokenizer.eos_token_id,\n                        \"no_repeat_ngram_size\": 5,  # Increased to catch longer repetitive phrases\n                        \"min_new_tokens\": 32,  # Increased minimum for more complete thoughts\n                        \"max_time\": 30.0,\n                        \"stopping_criteria\": transformers.StoppingCriteriaList([StopOnInterrupt()]),\n                        \"forced_eos_token_id\": model_manager.current_tokenizer.eos_token_id,\n                        \"length_penalty\": 0.8,  # Slight penalty for longer sequences\n                        \"num_return_sequences\": 1,\n                        \"remove_invalid_values\": True\n                    }\n\n                    # Add [END] token to the tokenizer's special tokens\n                    special_tokens = {\"additional_special_tokens\": [\"[END]\"]}\n                    model_manager.current_tokenizer.add_special_tokens(\n                        special_tokens)\n\n                    logger.info(f\"Generation config: {gen_config}\")\n\n                    # Create streamer with token-by-token streaming\n                    streamer = TextIteratorStreamer(\n                        model_manager.current_tokenizer,\n                        skip_prompt=True,\n                        skip_special_tokens=True,\n                        timeout=None,  # No timeout to prevent queue.Empty errors\n                        skip_word_before_colon=False,\n                        spaces_between_special_tokens=False,\n                        tokenizer_decode_kwargs={\"skip_special_tokens\": True}\n                    )\n                    generation_kwargs = dict(\n                        input_ids=input_ids,\n                        attention_mask=attention_mask,\n                        streamer=streamer,\n                        **gen_config\n                    )\n\n                    # Create thread for generation\n                    thread = Thread(target=model.generate,\n                                    kwargs=generation_kwargs)\n                    thread.start()\n\n                    # Generate a consistent ID for this completion\n                    completion_id = f\"chatcmpl-{uuid.uuid4()}\"\n\n                    # Send the initial role message\n                    response = {\n                        \"id\": completion_id,\n                        \"object\": \"chat.completion.chunk\",\n                        \"created\": int(time.time()),\n                        \"model\": \"local-model\",\n                        \"choices\": [{\n                            \"index\": 0,\n                            \"delta\": {\"role\": \"assistant\"},\n                            \"finish_reason\": None\n                        }]\n                    }\n                    yield f\"data: {json.dumps(response)}\\n\\n\"\n\n                    # Stream the output\n                    accumulated_text = \"\"\n                    for new_text in streamer:\n                        if not new_text:\n                            continue\n\n                        # Split into individual characters/tokens for smoother streaming\n                        chars = list(new_text)\n                        for char in chars:\n                            accumulated_text += char\n                            response = {\n                                \"id\": completion_id,\n                                \"object\": \"chat.completion.chunk\",\n                                \"created\": int(time.time()),\n                                \"model\": \"local-model\",\n                                \"choices\": [{\n                                    \"index\": 0,\n                                    \"delta\": {\"content\": char},\n                                    \"finish_reason\": None\n                                }]\n                            }\n                            yield f\"data: {json.dumps(response)}\\n\\n\"\n\n                    # Send the final message\n                    response = {\n                        \"id\": completion_id,\n                        \"object\": \"chat.completion.chunk\",\n                        \"created\": int(time.time()),\n                        \"model\": \"local-model\",\n                        \"choices\": [{\n                            \"index\": 0,\n                            \"delta\": {},\n                            \"finish_reason\": \"stop\"\n                        }]\n                    }\n                    yield f\"data: {json.dumps(response)}\\n\\n\"\n                    yield \"data: [DONE]\\n\\n\"\n\n            except Exception as e:\n                logger.error(\n                    f\"Error during streaming: {str(e)}\", exc_info=True)\n                raise\n\n    except Exception as e:\n        logger.error(f\"Error in chat completion: {str(e)}\", exc_info=True)\n        error_response = {\n            \"id\": f\"chatcmpl-{uuid.uuid4()}\",\n            \"object\": \"chat.completion.chunk\",\n            \"created\": int(time.time()),\n            \"model\": \"local-model\",\n            \"choices\": [{\n                \"index\": 0,\n                \"delta\": {\n                    \"content\": f\"Error: {str(e)}\"\n                },\n                \"finish_reason\": \"error\"\n            }]\n        }\n        yield f\"data: {json.dumps(error_response)}\\n\\n\"\n        yield \"data: [DONE]\\n\\n\"  # Make sure to send DONE even on error\n"
  },
  {
    "path": "Backend/src/endpoint/deleteStore.py",
    "content": "from src.endpoint.models import DeleteCollectionRequest\nfrom src.vectorstorage.vectorstore import get_vectorstore\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef delete_vectorstore_collection(data: DeleteCollectionRequest):\n    try:\n        logger.info(f\"Deleting vectorstore collection: {data.collection_name}\")\n        vectorstore = get_vectorstore(\n            data.api_key, data.collection_name, data.is_local)\n        if vectorstore:\n            vectorstore.delete_collection()\n            return True\n        return False\n    except Exception as e:\n        logger.error(f\"Error deleting vectorstore collection: {str(e)}\")\n        return False\n"
  },
  {
    "path": "Backend/src/endpoint/devApiCall.py",
    "content": "from src.data.database.getCollectionInfo import get_collection_settings\nfrom src.data.database.getLLMApiKey import get_llm_api_key\nfrom src.endpoint.models import VectorStoreQueryRequest\nfrom src.endpoint.ragQuery import rag_query\nfrom src.endpoint.vectorQuery import query_vectorstore\nfrom src.llms.llmQuery import llm_query\nfrom src.endpoint.models import ChatCompletionRequest\n\n\ndef vector_call(query_request: VectorStoreQueryRequest, user_id: str):\n    print(f\"API vector query received for user {user_id}\")\n    if not query_request.model:\n        print(f\"No model provided in request body for user {user_id}\")\n        \"\"\" VECTORSTORE QUERY IF NO MODEL PROVIDED IN REQUEST BODY \"\"\"\n        collectionSettings = get_collection_settings(\n            user_id, query_request.collection_name)\n        if collectionSettings.is_local == False:\n            api_key = get_llm_api_key(int(user_id), \"openai\")\n        else:\n            api_key = None\n        if not collectionSettings:\n            raise ValueError(\"Collection settings not found\")\n\n        vectorStoreData = VectorStoreQueryRequest(\n            query=query_request.input,\n            collection=collectionSettings.id,\n            collection_name=query_request.collection_name,\n            user=user_id,\n            api_key=api_key,\n            top_k=query_request.top_k,\n            is_local=collectionSettings.is_local,\n            local_embedding_model=collectionSettings.local_embedding_model\n        )\n        return query_vectorstore(vectorStoreData, collectionSettings.is_local)\n\n\nasync def rag_call(query_request: VectorStoreQueryRequest, user_id: str):\n    print(f\"Model provided in request body for user {user_id}\")\n    \"\"\" MODEL + VECTORSTORE QUERY IF MODEL AND COLLECTION NAME PROVIDED IN REQUEST BODY \"\"\"\n    collectionSettings = get_collection_settings(\n        user_id, query_request.collection_name)\n    if not collectionSettings:\n        raise ValueError(\"Collection settings not found\")\n    if query_request.is_local == False:\n        api_key = get_llm_api_key(int(user_id), query_request.provider)\n    else:\n        api_key = None\n    ragData = VectorStoreQueryRequest(\n        query=query_request.input,\n        collection=collectionSettings.id,\n        collection_name=query_request.collection_name,\n        user=user_id,\n        api_key=api_key,\n        top_k=query_request.top_k,\n        is_local=collectionSettings.is_local,\n        local_embedding_model=collectionSettings.local_embedding_model,\n        temperature=query_request.temperature,\n        max_completion_tokens=query_request.max_completion_tokens,\n        top_p=query_request.top_p,\n        frequency_penalty=query_request.frequency_penalty,\n        presence_penalty=query_request.presence_penalty,\n        provider=query_request.provider,\n        model=query_request.model,\n        is_ooba=query_request.is_ooba\n    )\n    return await rag_query(ragData, collectionSettings)\n\n\nasync def llm_call(query_request: ChatCompletionRequest, user_id: str):\n    print(\n        f\"Model and collection name provided in request body for user {user_id}\")\n    \"\"\" MODEL QUERY IF MODEL BUT NO COLLECTION NAME PROVIDED IN REQUEST BODY \"\"\"\n    if query_request.is_local == False:\n        api_key = get_llm_api_key(int(user_id), query_request.provider)\n    else:\n        api_key = None\n    return await llm_query(query_request, api_key)\n"
  },
  {
    "path": "Backend/src/endpoint/embed.py",
    "content": "from src.data.dataIntake.textSplitting import split_text\nfrom src.data.dataIntake.loadFile import load_document\nfrom src.endpoint.models import EmbeddingRequest\nfrom src.vectorstorage.helpers.sanitizeCollectionName import sanitize_collection_name\nfrom src.vectorstorage.vectorstore import get_vectorstore\nfrom src.vectorstorage.embeddings import embed_chunk, chunk_list\n\nimport os\nimport multiprocessing\nimport concurrent.futures\nimport time\nfrom typing import AsyncGenerator\nfrom collections import deque\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nasync def embed(data: EmbeddingRequest) -> AsyncGenerator[dict, None]:\n    file_name = os.path.basename(data.file_path)\n    try:\n        yield {\"status\": \"info\", \"message\": f\"Starting embedding process for file: {file_name}\"}\n\n        # Get file size\n        file_size = os.path.getsize(data.file_path)\n        if file_size > 25 * 1024 * 1024:  # If file is larger than 25MB\n            yield {\"status\": \"info\", \"message\": f\"Processing large file ({file_size / (1024*1024):.1f}MB). This may take longer.\"}\n\n        text_output = await load_document(data.file_path)\n\n        if text_output is None:\n            raise Exception(\"Failed to load document\")\n\n        # Handle generator output from CSV loader\n        if hasattr(text_output, '__iter__') and not isinstance(text_output, (str, list)):\n            texts = []\n            for item in text_output:\n                if isinstance(item, dict) and \"status\" in item:\n                    # Forward progress updates from CSV processing\n                    yield item\n                else:\n                    texts = item\n        else:\n            yield {\"status\": \"info\", \"message\": \"File loaded successfully\"}\n\n        # Check if file is CSV or PDF\n        if file_name.lower().endswith('.csv'):\n            texts = text_output  # CSV loader already returns list of documents\n        elif file_name.lower().endswith('.pdf'):\n            # PDF loader returns list of Documents, no need to split\n            texts = text_output\n        else:\n            # Pass metadata to split_text if it exists\n            texts = split_text(text_output, data.file_path,\n                             data.metadata if hasattr(data, 'metadata') else None)\n\n        if not texts:\n            raise Exception(\"No text content extracted from file\")\n\n        yield {\"status\": \"info\", \"message\": f\"Split text into {len(texts)} chunks\"}\n\n        collection_name = sanitize_collection_name(str(data.collection_name))\n        vectordb = get_vectorstore(\n            data.api_key, collection_name, data.is_local, data.local_embedding_model)\n        if not vectordb:\n            raise Exception(\"Failed to initialize vector database\")\n\n        # Adjust chunk size based on file size\n        chunk_size = min(50, max(10, int(1000000 / file_size)))  # Dynamic chunk size\n        chunks = list(chunk_list(texts, chunk_size))\n        total_chunks = len(chunks)\n        yield {\"status\": \"info\", \"message\": f\"Split into {total_chunks} chunks of {chunk_size} documents each\"}\n\n        start_time = time.time()\n        time_history = deque(maxlen=5)\n\n        # Process chunks with reduced parallelism for large files\n        num_cores = max(1, min(multiprocessing.cpu_count() - 1, 4))  # Use fewer cores for large files\n        yield {\"status\": \"info\", \"message\": f\"Using {num_cores} CPU cores for processing\"}\n\n        with concurrent.futures.ThreadPoolExecutor(max_workers=num_cores) as executor:\n            futures = []\n            for i, chunk in enumerate(chunks):\n                chunk_arg = (vectordb, chunk, i + 1, total_chunks, start_time, time_history)\n                future = executor.submit(embed_chunk, chunk_arg)\n                futures.append(future)\n                \n                # Process results as they complete\n                for completed in concurrent.futures.as_completed(futures):\n                    try:\n                        result = completed.result()\n                        yield {\"status\": \"progress\", \"data\": result}\n                    except Exception as e:\n                        logger.error(f\"Error processing chunk: {str(e)}\")\n                        yield {\"status\": \"error\", \"message\": f\"Error processing chunk: {str(e)}\"}\n                \n                futures = [f for f in futures if not f.done()]  # Clean up completed futures\n\n        yield {\"status\": \"success\", \"message\": \"Embedding completed successfully\"}\n\n    except Exception as e:\n        error_msg = f\"Error embedding file: {str(e)}\"\n        logger.error(error_msg)\n        yield {\"status\": \"error\", \"message\": error_msg}\n"
  },
  {
    "path": "Backend/src/endpoint/models.py",
    "content": "from pydantic import BaseModel\nfrom typing import Optional, Dict, Any, List, Literal\n\n\nclass EmbeddingRequest(BaseModel):\n    file_path: str\n    api_key: Optional[str] = None\n    collection: int\n    collection_name: str\n    user: int\n    metadata: Optional[Dict[str, Any]] = None\n    is_local: Optional[bool] = False\n    local_embedding_model: Optional[str] = \"granite-embedding:278m\"\n\n\nclass ModelLoadRequest(BaseModel):\n    model_name: str\n    model_type: Optional[str] = \"auto\"  # 'auto', 'Transformers', 'llama.cpp', 'llamacpp_HF', 'ExLlamav2', 'ExLlamav2_HF', 'HQQ', 'TensorRT-LLM'\n    device: Optional[str] = \"auto\"  # 'cpu', 'cuda', 'auto'\n    \n    # Transformers specific settings\n    load_in_8bit: Optional[bool] = False\n    load_in_4bit: Optional[bool] = False\n    use_flash_attention: Optional[bool] = False\n    trust_remote_code: Optional[bool] = True\n    use_safetensors: Optional[bool] = True\n    max_memory: Optional[Dict[str, str]] = None\n    compute_dtype: Optional[str] = \"float16\"  # float16, bfloat16, float32\n    rope_scaling: Optional[Dict[str, Any]] = None\n    use_cache: Optional[bool] = True\n    revision: Optional[str] = None\n    padding_side: Optional[str] = \"right\"\n    use_fast_tokenizer: Optional[bool] = True\n    hf_token: Optional[str] = None  # HuggingFace token for gated models\n    \n    # ExLlamav2 specific settings\n    max_seq_len: Optional[int] = None\n    compress_pos_emb: Optional[float] = 1.0\n    alpha_value: Optional[float] = 1\n    \n    # llama.cpp specific settings\n    n_ctx: Optional[int] = 2048\n    n_batch: Optional[int] = 512\n    n_threads: Optional[int] = None\n    n_threads_batch: Optional[int] = None\n    n_gpu_layers: Optional[int] = 32\n    main_gpu: Optional[int] = 0\n    tensor_split: Optional[List[float]] = None\n    mul_mat_q: Optional[bool] = True\n    use_mmap: Optional[bool] = True\n    use_mlock: Optional[bool] = False\n    offload_kqv: Optional[bool] = False\n    split_mode: Optional[str] = None\n    flash_attn: Optional[bool] = False\n    cache_type: Optional[str] = None\n    cache_size: Optional[int] = None\n    rope_scaling_type: Optional[str] = None\n    rope_freq_base: Optional[float] = None\n    rope_freq_scale: Optional[float] = None\n    \n    # HQQ specific settings\n    hqq_backend: Optional[str] = \"PYTORCH_COMPILE\"  # PYTORCH_COMPILE, ATEN, TENSORRT\n    \n    # TensorRT-LLM specific settings\n    engine_dir: Optional[str] = None\n    max_batch_size: Optional[int] = 1\n    max_input_len: Optional[int] = 2048\n    max_output_len: Optional[int] = 512\n    \n    # Common settings\n    model_path: Optional[str] = None  # Custom path to model files if not in default location\n    tokenizer_path: Optional[str] = None  # Custom path to tokenizer if different from model path\n    \n    class Config:\n        protected_namespaces = ()\n\nclass VectorStoreQueryRequest(BaseModel):\n    query: str\n    collection: Optional[int] = None\n    collection_name: str\n    user: int\n    api_key: Optional[str] = None\n    top_k: int = 5\n    is_local: Optional[bool] = False\n    local_embedding_model: Optional[str] = \"granite-embedding:278m\"\n    prompt: Optional[str] = None\n    provider: Optional[str] = None\n    model: Optional[str] = None\n    temperature: Optional[float] = 0.5\n    max_completion_tokens: Optional[int] = 2048\n    top_p: Optional[float] = 1\n    frequency_penalty: Optional[float] = 0\n    presence_penalty: Optional[float] = 0\n    is_ooba: Optional[bool] = False\n    character: Optional[str] = None\n    is_ollama: Optional[bool] = False\n\n\nclass YoutubeTranscriptRequest(BaseModel):\n    url: str\n    user_id: int\n    collection_id: int\n    username: str\n    collection_name: str\n    api_key: Optional[str] = None\n    is_local: Optional[bool] = False\n    local_embedding_model: Optional[str] = \"granite-embedding:278m\"\n\n\nclass DeleteCollectionRequest(BaseModel):\n    collection_id: int\n    collection_name: str\n    is_local: Optional[bool] = False\n    api_key: Optional[str] = None\n\n\nclass WebCrawlRequest(BaseModel):\n    base_url: str\n    max_workers: int\n    collection_name: str\n    collection_id: int\n    user_id: int\n    user_name: str\n    api_key: Optional[str] = None\n    is_local: Optional[bool] = False\n    local_embedding_model: Optional[str] = \"granite-embedding:278m\"\n\n\nclass QueryRequest(BaseModel):\n    input: str\n    prompt: Optional[str] = None\n    provider: Optional[str] = None\n    model: Optional[str] = None\n    collection_name: Optional[str] = None\n    top_k: Optional[int] = 5\n    temperature: Optional[float] = 0.5\n    max_completion_tokens: Optional[int] = 2048\n    top_p: Optional[float] = 1\n    frequency_penalty: Optional[float] = 0\n    presence_penalty: Optional[float] = 0\n    is_local: Optional[bool] = False\n    is_ooba: Optional[bool] = False\n    local_embedding_model: Optional[str] = \"granite-embedding:278m\"\n    character: Optional[str] = None\n    is_ollama: Optional[bool] = False\n\n\nclass Message(BaseModel):\n    \"\"\"A single message in a chat completion request\"\"\"\n    role: Literal[\"system\", \"user\", \"assistant\"]\n    content: str\n    name: Optional[str] = None\n\n\nclass ChatCompletionRequest(BaseModel):\n    \"\"\"Request model for chat completion\"\"\"\n    messages: List[Message]\n    model: str = \"local-model\"\n    temperature: Optional[float] = 0.7\n    top_p: Optional[float] = 0.95\n    top_k: Optional[int] = 50\n    n: Optional[int] = 1\n    max_tokens: Optional[int] = 2048\n    presence_penalty: Optional[float] = 0.1\n    frequency_penalty: Optional[float] = 0.1\n    repetition_penalty: Optional[float] = 1.1\n    stop: Optional[List[str]] = None\n    stream: Optional[bool] = True\n    is_local: Optional[bool] = False\n    is_ooba: Optional[bool] = False\n    is_ollama: Optional[bool] = False\n\nclass GenerateRequest(BaseModel):\n    \"\"\"Request model for raw text generation\"\"\"\n    prompt: str\n    max_tokens: Optional[int] = 512\n    temperature: Optional[float] = 0.7\n    top_p: Optional[float] = 0.95\n    top_k: Optional[int] = 50\n    repetition_penalty: Optional[float] = 1.1\n    stop_sequences: Optional[List[str]] = None\n    echo: Optional[bool] = False\n    stream: Optional[bool] = True\n"
  },
  {
    "path": "Backend/src/endpoint/ragQuery.py",
    "content": "from src.endpoint.models import VectorStoreQueryRequest, ChatCompletionRequest\nfrom src.endpoint.vectorQuery import query_vectorstore\nfrom src.llms.llmQuery import llm_query\n\n\nasync def rag_query(data: VectorStoreQueryRequest, collectionInfo):\n    try:\n        results = query_vectorstore(data, data.is_local)\n        data.prompt = f\"The following is the data that the user has provided via their custom data collection: \" + \\\n            f\"\\n\\n{results}\" + \\\n            f\"\\n\\nCollection/Store Name: {collectionInfo.name}\" + \\\n            f\"\\n\\nCollection/Store Files: {collectionInfo.files}\" + \\\n            f\"\\n\\nCollection/Store Description: {collectionInfo.description}\"\n\n        chat_completion_request = ChatCompletionRequest(\n            messages=[\n                {\n                    \"role\": \"system\",\n                    \"content\": data.prompt\n                },\n                {\n                    \"role\": \"user\",\n                    \"content\": data.query\n                }\n            ],\n            model=data.model,\n            temperature=data.temperature,\n            max_completion_tokens=data.max_completion_tokens,\n            top_p=data.top_p,\n            frequency_penalty=data.frequency_penalty,\n            presence_penalty=data.presence_penalty,\n            provider=data.provider,\n            is_local=data.is_local\n        )\n        llm_response = await llm_query(chat_completion_request, data.api_key)\n        return llm_response\n    except Exception as e:\n        print(e)\n        raise e\n"
  },
  {
    "path": "Backend/src/endpoint/transcribe.py",
    "content": "from src.voice.voice_to_text import initialize_model\n\nimport os\nimport tempfile\nfrom fastapi import UploadFile, File, HTTPException\n\n# Global variables\nmodel = None\nffmpeg_path = None\n\n\nasync def transcribe_audio(audio_file: UploadFile = File(...), model_name: str = \"base\") -> dict:\n    \"\"\"Transcribe audio using Whisper.\"\"\"\n    temp_file = None\n    try:\n        # Initialize model and verify FFmpeg is available\n        model = initialize_model(model_name)\n        if not model:\n            raise HTTPException(\n                status_code=500, detail=\"FFmpeg not found or not working\")\n\n        # Create temporary file\n        temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=\".wav\")\n        content = await audio_file.read()\n        temp_file.write(content)\n        temp_file.flush()\n        temp_file.close()\n\n        result = model.transcribe(temp_file.name)\n\n        return {\n            \"status\": \"success\",\n            \"text\": result[\"text\"],\n            \"language\": result.get(\"language\", \"unknown\"),\n            \"segments\": result.get(\"segments\", [])\n        }\n\n    except Exception as e:\n        print(f\"Error transcribing audio: {str(e)}\")\n        return {\n            \"status\": \"error\",\n            \"error\": str(e)\n        }\n    finally:\n        if temp_file and os.path.exists(temp_file.name):\n            try:\n                os.unlink(temp_file.name)\n                print(f\"Deleted temporary file: {temp_file.name}\")\n            except Exception as e:\n                print(\n                    f\"Warning: Could not delete temporary file {temp_file.name}: {str(e)}\")\n"
  },
  {
    "path": "Backend/src/endpoint/vectorQuery.py",
    "content": "from src.endpoint.models import VectorStoreQueryRequest\nfrom src.vectorstorage.helpers.sanitizeCollectionName import sanitize_collection_name\nfrom src.vectorstorage.vectorstore import get_vectorstore\n\n\ndef query_vectorstore(data: VectorStoreQueryRequest, is_local: bool):\n    try:\n        collection_name = sanitize_collection_name(str(data.collection_name))\n        vectordb = get_vectorstore(\n            data.api_key, collection_name, is_local, data.local_embedding_model)\n        results = vectordb.similarity_search(data.query, k=data.top_k)\n        return {\n            \"status\": \"success\",\n            \"results\": [{\"content\": doc.page_content, \"metadata\": doc.metadata} for doc in results],\n        }\n    except Exception as e:\n        print(f\"Error querying vectorstore: {str(e)}\")\n        return {\"status\": \"error\", \"message\": str(e)}\n"
  },
  {
    "path": "Backend/src/endpoint/webcrawl.py",
    "content": "from src.data.dataIntake.fileTypes.loadX import load_html\nfrom src.data.dataIntake.textSplitting import split_text\nfrom src.data.dataIntake.getHtmlFiles import get_html_files\nfrom src.data.dataFetch.webcrawler import WebCrawler\nfrom src.endpoint.models import WebCrawlRequest\nfrom src.vectorstorage.vectorstore import get_vectorstore\n\nfrom typing import Generator\nimport json\nimport os\nfrom urllib.parse import urlparse\nimport logging\n\n\ndef webcrawl(data: WebCrawlRequest, cancel_event=None) -> Generator[dict, None, None]:\n    try:\n        # Create web crawler instance with all required fields\n        scraper = WebCrawler(\n            data.base_url,\n            data.user_id,\n            data.user_name,\n            data.collection_id,\n            data.collection_name,\n            max_workers=data.max_workers,\n            cancel_event=cancel_event\n        )\n\n        # Yield progress updates during scraping\n        for progress in scraper.scrape():\n            if progress:\n                yield f\"data: {json.dumps(progress)}\"\n\n        # After scraping, process and embed all HTML files\n        root_url_dir = urlparse(\n            data.base_url).netloc.replace(\".\", \"_\") + \"_docs\"\n        collection_path = os.path.join(scraper.output_dir, root_url_dir)\n        vector_store = get_vectorstore(\n            data.api_key, data.collection_name, data.is_local, data.local_embedding_model)\n\n        # Get all HTML files recursively\n        html_files = get_html_files(collection_path)\n        print(f\"Found {len(html_files)} HTML files\")\n\n        # Process files in batches for better performance\n        batch_size = 50\n        total_batches = (len(html_files) + batch_size - 1) // batch_size\n        for i in range(0, len(html_files), batch_size):\n            batch = html_files[i:i + batch_size]\n            batch_docs = []\n\n            for file_path in batch:\n                content = load_html(file_path)\n                if content:\n                    split_content = split_text(content, file_path)\n                    batch_docs.extend(split_content)\n\n            if batch_docs:\n                vector_store.add_documents(batch_docs)\n\n            current_batch = i//batch_size + 1\n            progress_data = {\n                \"status\": \"progress\",\n                \"data\": {\n                    \"message\": f\"Part 2 of 2: Processing documents batch {current_batch}/{total_batches}\",\n                    \"chunk\": current_batch,\n                    \"total_chunks\": total_batches,\n                    \"percent_complete\": f\"{(current_batch/total_batches * 100):.1f}%\"\n                }\n            }\n            yield f\"data: {json.dumps(progress_data)}\"\n\n        final_message = f\"Successfully crawled and embedded {len(scraper.visited_urls)} pages from {data.base_url}\"\n        success_data = {\n            \"status\": \"success\",\n            \"data\": {\n                \"message\": final_message\n            }\n        }\n        yield f\"data: {json.dumps(success_data)}\"\n    except Exception as e:\n        error_message = str(e)\n        print(f\"Error during webcrawl: {error_message}\")\n        logging.error(f\"Error during webcrawl: {error_message}\")\n        error_data = {\n            \"status\": \"error\",\n            \"data\": {\n                \"message\": error_message\n            }\n        }\n        yield f\"data: {json.dumps(error_data)}\"\n"
  },
  {
    "path": "Backend/src/llms/llmQuery.py",
    "content": "from src.endpoint.models import ChatCompletionRequest\nfrom src.llms.providers.ooba import ooba_query\nfrom src.llms.providers.openai import openai_query\nfrom src.llms.providers.ollama import ollama_query\nfrom src.llms.providers.local import local_query\nfrom typing import Optional\n\n\nasync def llm_query(data: ChatCompletionRequest, api_key: Optional[str] = None):\n    try:\n        if data.is_ooba:\n            return ooba_query(data, data.messages)\n        elif data.is_ollama is None:\n            return ollama_query(data, data.messages)\n        elif data.is_local:\n            return await local_query(data)\n        else:\n            return openai_query(data, api_key, data.messages)\n\n    except Exception as e:\n        print(f\"Error in llm_query: {str(e)}\")\n        raise e\n"
  },
  {
    "path": "Backend/src/llms/messages/formMessages.py",
    "content": "from src.endpoint.models import QueryRequest\n\n\ndef form_messages(data: QueryRequest):\n    try:\n        if not data.prompt:\n            raise ValueError(\"System prompt cannot be null\")\n\n        query_content = data.query if hasattr(\n            data, 'query') else data.input\n\n        if not query_content:\n            raise ValueError(\"User query/input cannot be null\")\n\n        messages = [\n            {\"role\": \"system\", \"content\": data.prompt},\n            {\"role\": \"user\", \"content\": query_content}\n        ]\n        return messages\n    except Exception as e:\n        print(f\"Error in form_messages: {str(e)}\")\n        raise e\n"
  },
  {
    "path": "Backend/src/llms/providers/local.py",
    "content": "import asyncio\nimport json\nimport time\nimport logging\nfrom src.endpoint.api import chat_completion_stream\nfrom src.endpoint.models import ChatCompletionRequest, ModelLoadRequest\nfrom src.models.manager import model_manager\nfrom src.models.exceptions import ModelLoadError\n\nlogger = logging.getLogger(__name__)\n\n\nasync def local_query(data: ChatCompletionRequest):\n    try:\n        # Check if model is loaded and load it if necessary\n        if not model_manager.is_model_loaded() or model_manager.model_name != data.model:\n            logger.info(f\"Loading model {data.model} as it is not currently loaded\")\n            # Create model load request\n            load_request = ModelLoadRequest(\n                model_name=data.model,\n                model_type=\"Transformers\",  # Default to Transformers for now\n                device=\"auto\",\n                trust_remote_code=True,\n                use_safetensors=True,\n                compute_dtype=\"float16\"\n            )\n            try:\n                # Load the model\n                model_manager.load_model(load_request)\n                logger.info(f\"Successfully loaded model {data.model}\")\n            except ModelLoadError as e:\n                logger.error(f\"Failed to load model {data.model}: {str(e)}\")\n                raise\n\n        # Get the generator\n        response_gen = chat_completion_stream(data)\n        combined_content = \"\"\n        response_id = None\n        finish_reason = None\n\n        # Process each chunk\n        async for chunk in response_gen:\n            if chunk.startswith(\"data: \"):\n                chunk = chunk[6:]  # Remove \"data: \" prefix\n                if chunk.strip() == \"[DONE]\":\n                    continue\n\n                try:\n                    chunk_data = json.loads(chunk)\n                    if \"choices\" in chunk_data and len(chunk_data[\"choices\"]) > 0:\n                        choice = chunk_data[\"choices\"][0]\n                        if \"delta\" in choice:\n                            delta = choice[\"delta\"]\n                            if \"content\" in delta:\n                                combined_content += delta[\"content\"]\n                            if \"finish_reason\" in choice and choice[\"finish_reason\"]:\n                                finish_reason = choice[\"finish_reason\"]\n                        if not response_id:\n                            response_id = chunk_data.get(\"id\")\n                except json.JSONDecodeError as e:\n                    logger.warning(f\"Failed to parse chunk as JSON: {str(e)}\")\n                    continue\n\n        # Create final response structure\n        response = {\n            \"id\": response_id or f\"chatcmpl-{int(time.time())}\",\n            \"object\": \"chat.completion\",\n            \"created\": int(time.time()),\n            \"model\": data.model,\n            \"choices\": [{\n                \"index\": 0,\n                \"message\": {\n                    \"role\": \"assistant\",\n                    \"content\": combined_content\n                },\n                \"finish_reason\": finish_reason or \"stop\"\n            }]\n        }\n\n        return response\n\n    except Exception as e:\n        logger.error(f\"Error in local_query: {str(e)}\", exc_info=True)\n        raise\n"
  },
  {
    "path": "Backend/src/llms/providers/ollama.py",
    "content": "from src.endpoint.models import QueryRequest\nimport requests\nimport json\nimport time\n\n\n\ndef ollama_query(data: QueryRequest, messages: list = None):\n    try:\n        print(\"Local Ollama model enabled\")\n        model_data = {\n            \"model\": data.model,\n            \"messages\": messages,\n            \"stream\": False,  # Disable streaming for now\n            \"keep_alive\": -1,\n            \"max_tokens\": data.max_completion_tokens,\n            \"keep_alive\": -1,\n        }\n        print(f\"Model data: {model_data}\")\n        response = requests.post(\n            \"http://localhost:11434/api/chat\", json=model_data)\n\n        print(f\"Raw response: {response.text}\")\n\n        if response.status_code == 200:\n            try:\n                response_json = response.json()\n                print(f\"Parsed response: {response_json}\")\n                # Extract content from the nested message structure\n                content = response_json.get(\"message\", {}).get(\n                    \"content\", \"No response from model\")\n\n                # Standardized response format\n                return {\n                    \"id\": f\"local-{data.model}-{int(time.time())}\",\n                    \"choices\": [{\n                        \"finish_reason\": \"stop\",\n                        \"index\": 0,\n                        \"message\": {\n                                \"content\": content,\n                                \"role\": \"assistant\"\n                                }\n                    }],\n                    \"created\": int(time.time()),\n                    \"model\": data.model,\n                    \"object\": \"chat.completion\",\n                    \"usage\": {\n                        \"completion_tokens\": -1,  # Token count not available for local models\n                        \"prompt_tokens\": -1,\n                        \"total_tokens\": -1\n                    }\n                }\n            except json.JSONDecodeError as e:\n                print(f\"JSON decode error: {e}\")\n                raise ValueError(\n                    f\"Failed to parse response from Ollama: {e}\")\n        return ollama_query(data)\n    except Exception as e:\n        print(f\"Error in ollama_query: {str(e)}\")\n        raise e\n"
  },
  {
    "path": "Backend/src/llms/providers/ooba.py",
    "content": "from src.endpoint.models import QueryRequest\nimport requests\n\n\ndef ooba_query(data: QueryRequest, messages: list = None):\n    try:\n        print(\"Ooba mode enabled\")\n        ooba_data = {\n            \"messages\": messages,\n            \"mode\": \"chat\",\n            \"character\": data.character\n        }\n        response = requests.post(\n            \"http://127.0.0.1:5000/v1/chat/completions\", json=ooba_data)\n        return response.json()\n    except Exception as e:\n        print(f\"Error in ooba_query: {str(e)}\")\n        raise e\n"
  },
  {
    "path": "Backend/src/llms/providers/openai.py",
    "content": "from src.endpoint.models import QueryRequest\nfrom openai import OpenAI\nfrom typing import Optional\n\n\ndef openai_query(data: QueryRequest, api_key: Optional[str] = None, messages: list = None):\n    try:\n        print(f\"API key3: {api_key}\")\n        client = OpenAI(api_key=api_key)\n        response = client.chat.completions.create(\n            model=data.model,\n            messages=messages,\n            response_format={\n                \"type\": \"text\"\n            },\n            temperature=data.temperature,\n            max_completion_tokens=data.max_completion_tokens,\n            top_p=data.top_p,\n            frequency_penalty=data.frequency_penalty,\n            presence_penalty=data.presence_penalty\n        )\n        # Convert OpenAI response to dict for consistent format\n        return response.model_dump()\n    except Exception as e:\n        print(f\"Error in openai_query: {str(e)}\")\n        raise e\n"
  },
  {
    "path": "Backend/src/models/__init__.py",
    "content": ""
  },
  {
    "path": "Backend/src/models/exceptions.py",
    "content": "class ModelLoadError(Exception):\n    \"\"\"Exception raised when there is an error loading a model.\"\"\"\n    pass\n\nclass ModelNotFoundError(Exception):\n    \"\"\"Exception raised when a requested model cannot be found.\"\"\"\n    pass\n\nclass ModelDownloadError(Exception):\n    \"\"\"Exception raised when there is an error downloading a model.\"\"\"\n    pass\n"
  },
  {
    "path": "Backend/src/models/loaders/__init__.py",
    "content": "from .transformers import TransformersLoader\nfrom .llamacpp import LlamaCppLoader\nfrom .llamaccphf import LlamaCppHFLoader\nfrom .exllama import ExLlamaV2Loader, ExLlamaV2HFLoader\nfrom .hqq import HQQLoader\nfrom .tensorrt import TensorRTLoader\n\n__all__ = [\n    'TransformersLoader',\n    'LlamaCppLoader',\n    'LlamaCppHFLoader',\n    'ExLlamaV2Loader',\n    'ExLlamaV2HFLoader',\n    'HQQLoader',\n    'TensorRTLoader',\n] "
  },
  {
    "path": "Backend/src/models/loaders/base.py",
    "content": "from abc import ABC, abstractmethod\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple\nimport logging\nfrom dataclasses import asdict\n\nfrom src.endpoint.models import ModelLoadRequest\nfrom src.models.exceptions import ModelLoadError\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseLoader(ABC):\n    \"\"\"\n    Abstract base class for model loaders.\n\n    This class defines the interface that all model loaders must implement\n    and provides some common utility methods.\n\n    Attributes:\n        request (ModelLoadRequest): The request object containing loading parameters\n        manager (Any): Reference to the model manager instance\n        model_path (Path): Path to the model files\n    \"\"\"\n\n    def __init__(self, request: ModelLoadRequest, manager: Any):\n        \"\"\"\n        Initialize the loader with request parameters and manager reference.\n\n        Args:\n            request: ModelLoadRequest object containing all loading parameters\n            manager: Reference to the ModelManager instance\n        \"\"\"\n        self.request = request\n        self.manager = manager\n        self.model_path = self._resolve_model_path()\n\n    @abstractmethod\n    def load(self) -> Tuple[Any, Any]:\n        \"\"\"\n        Load the model and tokenizer.\n\n        Returns:\n            Tuple containing (model, tokenizer)\n\n        Raises:\n            ModelLoadError: If there's an error during model loading\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def get_metadata(self) -> Optional[Dict[str, Any]]:\n        \"\"\"\n        Get model metadata without loading the full model.\n\n        Returns:\n            Dictionary containing model metadata or None if not available\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def get_config(self) -> Dict[str, Any]:\n        \"\"\"\n        Get the current model configuration.\n\n        Returns:\n            Dictionary containing model configuration\n        \"\"\"\n        pass\n\n    def _resolve_model_path(self) -> Path:\n        \"\"\"\n        Resolve the model path from the request parameters.\n\n        Returns:\n            Path object pointing to the model location\n\n        Raises:\n            ModelLoadError: If the path cannot be resolved\n        \"\"\"\n        try:\n            if self.request.model_path:\n                path = Path(self.request.model_path)\n            else:\n                path = Path(f\"models/{self.request.model_name}\")\n\n            # Create parent directories if they don't exist\n            path.parent.mkdir(parents=True, exist_ok=True)\n\n            return path\n        except Exception as e:\n            raise ModelLoadError(f\"Failed to resolve model path: {str(e)}\")\n\n    def get_request_dict(self) -> Dict[str, Any]:\n        \"\"\"\n        Convert the request object to a dictionary, filtering out None values.\n\n        Returns:\n            Dictionary containing all non-None request parameters\n        \"\"\"\n        return {k: v for k, v in asdict(self.request).items() if v is not None}\n\n    def log_loading_info(self) -> None:\n        \"\"\"Log information about the model being loaded.\"\"\"\n        logger.info(f\"Loading model: {self.request.model_name}\")\n        logger.info(f\"Model type: {self.request.model_type}\")\n        logger.info(f\"Model path: {self.model_path}\")\n        logger.info(f\"Device: {self.request.device}\")\n\n    @staticmethod\n    def cleanup(model: Any) -> None:\n        \"\"\"\n        Clean up model resources.\n\n        Args:\n            model: The model instance to clean up\n        \"\"\"\n        try:\n            if hasattr(model, 'cpu'):\n                model.cpu()\n            del model\n        except Exception as e:\n            logger.warning(f\"Error during model cleanup: {str(e)}\")\n\n    def validate_model_path(self) -> None:\n        \"\"\"\n        Validate that the model path exists and is accessible.\n\n        Raises:\n            ModelLoadError: If the model path is invalid or inaccessible\n        \"\"\"\n        if not self.model_path.exists():\n            raise ModelLoadError(\n                f\"Model path does not exist: {self.model_path}\")\n\n    def get_common_metadata(self) -> Dict[str, Any]:\n        \"\"\"\n        Get common metadata that applies to all model types.\n\n        Returns:\n            Dictionary containing common metadata fields\n        \"\"\"\n        return {\n            \"model_name\": self.request.model_name,\n            \"model_type\": self.request.model_type,\n            \"model_path\": str(self.model_path),\n            \"device\": self.request.device,\n            \"file_size\": self.model_path.stat().st_size if self.model_path.exists() else None,\n        }\n\n    def validate_request(self) -> None:\n        \"\"\"\n        Validate the model load request parameters.\n\n        Raises:\n            ModelLoadError: If the request parameters are invalid\n        \"\"\"\n        if not self.request.model_name:\n            raise ModelLoadError(\"Model name is required\")\n\n        if not self.request.model_type:\n            raise ModelLoadError(\"Model type is required\")\n\n    def check_dependencies(self) -> None:\n        \"\"\"\n        Check if all required dependencies are installed.\n\n        Raises:\n            ModelLoadError: If any required dependency is missing\n        \"\"\"\n        pass  # Implement in specific loaders\n\n    def prepare_loading(self) -> None:\n        \"\"\"\n        Prepare for model loading by performing all necessary checks.\n\n        This method combines several validation steps and should be\n        called at the start of the load method in implementing classes.\n\n        Raises:\n            ModelLoadError: If any preparation step fails\n        \"\"\"\n        try:\n            self.validate_request()\n            self.check_dependencies()\n            self.validate_model_path()\n            self.log_loading_info()\n        except Exception as e:\n            raise ModelLoadError(\n                f\"Failed to prepare for model loading: {str(e)}\")\n\n    def get_device_config(self) -> Dict[str, Any]:\n        \"\"\"\n        Get device-specific configuration.\n\n        Returns:\n            Dictionary containing device configuration\n        \"\"\"\n        import torch\n\n        return {\n            \"device\": self.request.device,\n            \"cuda_available\": torch.cuda.is_available(),\n            \"cuda_device_count\": torch.cuda.device_count() if torch.cuda.is_available() else 0,\n            \"mps_available\": hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available(),\n        }\n\n    def get_memory_info(self) -> Dict[str, Any]:\n        \"\"\"\n        Get system memory information.\n\n        Returns:\n            Dictionary containing memory information\n        \"\"\"\n        try:\n            import psutil\n            vm = psutil.virtual_memory()\n            return {\n                \"total_memory\": vm.total,\n                \"available_memory\": vm.available,\n                \"memory_percent\": vm.percent,\n            }\n        except ImportError:\n            return {}\n\n    def get_system_info(self) -> Dict[str, Any]:\n        \"\"\"\n        Get system information.\n\n        Returns:\n            Dictionary containing system information\n        \"\"\"\n        import platform\n\n        return {\n            \"platform\": platform.system(),\n            \"platform_release\": platform.release(),\n            \"python_version\": platform.python_version(),\n            \"device_config\": self.get_device_config(),\n            \"memory_info\": self.get_memory_info(),\n        }\n\n    def log_error(self, error: Exception, context: str = \"\") -> None:\n        \"\"\"\n        Log an error with context.\n\n        Args:\n            error: The exception that occurred\n            context: Additional context about where/why the error occurred\n        \"\"\"\n        error_msg = f\"{context + ': ' if context else ''}{str(error)}\"\n        logger.error(error_msg, exc_info=True)\n\n    def __repr__(self) -> str:\n        \"\"\"\n        Get string representation of the loader.\n\n        Returns:\n            String representation including model name and type\n        \"\"\"\n        return f\"{self.__class__.__name__}(model_name={self.request.model_name}, model_type={self.request.model_type})\"\n"
  },
  {
    "path": "Backend/src/models/loaders/exllama.py",
    "content": "import logging\nfrom typing import Any, Dict, Optional, Tuple\n\nfrom src.models.loaders.base import BaseLoader\nfrom src.models.exceptions import ModelLoadError\nfrom transformers import AutoTokenizer\n\nlogger = logging.getLogger(__name__)\n\n\nclass ExLlamaV2Loader(BaseLoader):\n    \"\"\"Loader for ExLlamaV2 models.\"\"\"\n\n    def load(self) -> Tuple[Any, Any]:\n        \"\"\"Load an ExLlamav2 model.\"\"\"\n        try:\n            from exllamav2 import ExLlamaV2, ExLlamaV2Config, ExLlamaV2Tokenizer\n            import torch\n        except ImportError:\n            raise ModelLoadError(\n                \"exllamav2 is not installed. Please install it from the ExLlamaV2 repository\")\n\n        if not self.model_path.exists():\n            raise ModelLoadError(\n                f\"Model path does not exist: {self.model_path}\")\n\n        # Clear CUDA cache\n        if torch.cuda.is_available():\n            torch.cuda.empty_cache()\n            logger.info(f\"CUDA Device: {torch.cuda.get_device_name(0)}\")\n            logger.info(\n                f\"CUDA Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**2:.0f}MB\")\n\n        if not torch.cuda.is_available():\n            raise ModelLoadError(\"GPU is required for ExLlama2\")\n\n        # Force CUDA device\n        torch.set_default_device('cuda')\n        torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n        config = ExLlamaV2Config()\n        config.model_dir = str(self.model_path)\n        config.max_seq_len = self.request.max_seq_len or 2048\n        config.compress_pos_emb = self.request.compress_pos_emb\n        config.alpha_value = self.request.alpha_value\n        config.calculate_rotary_embedding_base()  # Important for GPU performance\n\n        logger.info(f\"Loading model with config: {config.__dict__}\")\n        model = ExLlamaV2(config)\n\n        # Force model to GPU\n        model.load()\n        for param in model.parameters():\n            param.data = param.data.cuda()\n\n        logger.info(\n            f\"Model loaded on GPU. CUDA Memory: {torch.cuda.memory_allocated() / 1024**2:.0f}MB\")\n        logger.info(\n            f\"Device for first parameter: {next(model.parameters()).device}\")\n\n        tokenizer = ExLlamaV2Tokenizer(config)\n        logger.info(\"Model and tokenizer loaded successfully\")\n\n        return model, tokenizer\n\n    def get_metadata(self) -> Optional[Dict[str, Any]]:\n        \"\"\"Get model metadata.\"\"\"\n        if not self.model_path.exists():\n            return None\n        return {\n            \"model_type\": \"ExLlamav2\",\n            \"model_path\": str(self.model_path),\n            \"file_size\": self.model_path.stat().st_size\n        }\n\n    def get_config(self) -> Dict[str, Any]:\n        \"\"\"Get model configuration.\"\"\"\n        return {\n            \"model_type\": \"ExLlamav2\",\n            \"model_name\": self.request.model_name,\n            \"device\": self.request.device,\n            \"max_seq_len\": self.request.max_seq_len,\n            \"compress_pos_emb\": self.request.compress_pos_emb,\n            \"alpha_value\": self.request.alpha_value\n        }\n\n\nclass ExLlamaV2HFLoader(BaseLoader):\n    \"\"\"Loader for ExLlamaV2 models with HuggingFace tokenizer.\"\"\"\n\n    def load(self) -> Tuple[Any, Any]:\n        \"\"\"Load an ExLlamav2 model with HF tokenizer.\"\"\"\n        model = ExLlamaV2Loader(self.request, self.manager).load()[0]\n        tokenizer_path = self.request.tokenizer_path or self.model_path\n\n        tokenizer = AutoTokenizer.from_pretrained(\n            tokenizer_path,\n            trust_remote_code=self.request.trust_remote_code,\n            use_fast=self.request.use_fast_tokenizer,\n        )\n\n        return model, tokenizer\n\n    def get_metadata(self) -> Optional[Dict[str, Any]]:\n        \"\"\"Get model metadata.\"\"\"\n        return ExLlamaV2Loader(self.request, self.manager).get_metadata()\n\n    def get_config(self) -> Dict[str, Any]:\n        \"\"\"Get model configuration.\"\"\"\n        return ExLlamaV2Loader(self.request, self.manager).get_config()\n"
  },
  {
    "path": "Backend/src/models/loaders/hqq.py",
    "content": "import logging\nfrom typing import Any, Dict, Optional, Tuple\nimport requests\nfrom tqdm import tqdm\n\nfrom src.models.loaders.base import BaseLoader\nfrom src.models.exceptions import ModelLoadError, ModelDownloadError\nfrom transformers import AutoTokenizer\n\nlogger = logging.getLogger(__name__)\n\n\nclass HQQLoader(BaseLoader):\n    \"\"\"Loader for HQQ quantized models.\"\"\"\n\n    def load(self) -> Tuple[Any, Any]:\n        \"\"\"Load an HQQ model.\"\"\"\n        try:\n            from hqq.core.quantize import HQQBackend, HQQLinear\n            from hqq.models.hf.base import AutoHQQHFModel\n        except ImportError:\n            raise ModelLoadError(\n                \"hqq is not installed. Please install it from the HQQ repository\")\n\n        try:\n            # Create models directory if it doesn't exist\n            self.model_path.parent.mkdir(parents=True, exist_ok=True)\n            logger.info(f\"Using model path: {self.model_path}\")\n\n            # If it's a HuggingFace model ID and doesn't exist locally, try to download it\n            if '/' in self.request.model_name and not self.model_path.exists():\n                self._download_model()\n\n            if not self.model_path.exists():\n                raise ModelLoadError(\n                    f\"Model path does not exist: {self.model_path}\")\n\n            logger.info(f\"Loading HQQ model from {self.model_path}\")\n            model = AutoHQQHFModel.from_quantized(str(self.model_path))\n            logger.info(\"Model loaded successfully\")\n\n            logger.info(f\"Setting HQQ backend to {self.request.hqq_backend}\")\n            HQQLinear.set_backend(\n                getattr(HQQBackend, self.request.hqq_backend))\n            logger.info(\"HQQ backend set successfully\")\n\n            logger.info(\"Loading tokenizer\")\n            tokenizer = AutoTokenizer.from_pretrained(\n                self.request.tokenizer_path or self.model_path,\n                trust_remote_code=self.request.trust_remote_code,\n                use_fast=self.request.use_fast_tokenizer,\n            )\n            logger.info(\"Tokenizer loaded successfully\")\n\n            return model, tokenizer\n\n        except Exception as e:\n            raise ModelLoadError(f\"Failed to load HQQ model: {str(e)}\")\n\n    def _download_model(self) -> None:\n        \"\"\"Download model from HuggingFace.\"\"\"\n        try:\n            # Get repository contents\n            api_url = f\"https://huggingface.co/api/models/{self.request.model_name}/tree/main\"\n            headers = {\"Accept\": \"application/json\"}\n            if self.request.hf_token:\n                headers[\"Authorization\"] = f\"Bearer {self.request.hf_token}\"\n\n            logger.info(f\"Fetching repository contents from {api_url}\")\n            response = requests.get(api_url, headers=headers)\n            response.raise_for_status()\n            files = response.json()\n            logger.info(f\"Found {len(files)} files in repository\")\n\n            # Required files for HQQ models\n            required_files = ['qmodel.pt', 'config.json',\n                              'tokenizer.model', 'tokenizer_config.json', 'tokenizer.json']\n            logger.info(f\"Required files: {required_files}\")\n\n            # Download each required file\n            for file_name in required_files:\n                file_info = next(\n                    (f for f in files if f['path'] == file_name), None)\n                if not file_info:\n                    logger.error(\n                        f\"Required file {file_name} not found in repository. Available files: {[f['path'] for f in files]}\")\n                    raise ModelDownloadError(\n                        f\"Required file {file_name} not found in repository {self.request.model_name}\")\n\n                download_url = f\"https://huggingface.co/{self.request.model_name}/resolve/main/{file_name}\"\n                file_path = self.model_path / file_name\n\n                # Download the file with progress bar\n                logger.info(\n                    f\"Downloading {file_name} ({file_info.get('size', 'unknown size')}) from {download_url}\")\n                response = requests.get(\n                    download_url, stream=True, headers=headers)\n                response.raise_for_status()\n\n                total_size = int(response.headers.get('content-length', 0))\n                block_size = 8192  # 8 KB\n\n                with open(file_path, 'wb') as f, tqdm(\n                    desc=file_name,\n                    total=total_size,\n                    unit='iB',\n                    unit_scale=True,\n                    unit_divisor=1024,\n                ) as pbar:\n                    for data in response.iter_content(block_size):\n                        size = f.write(data)\n                        pbar.update(size)\n\n                logger.info(\n                    f\"Successfully downloaded {file_name} to {file_path}\")\n\n        except Exception as e:\n            logger.error(\n                f\"Failed to download model: {str(e)}\", exc_info=True)\n            # Clean up any partially downloaded files\n            if self.model_path.exists():\n                import shutil\n                shutil.rmtree(self.model_path)\n            raise ModelDownloadError(f\"Failed to download model: {str(e)}\")\n\n    def get_metadata(self) -> Optional[Dict[str, Any]]:\n        \"\"\"Get model metadata.\"\"\"\n        if not self.model_path.exists():\n            return None\n        return {\n            \"model_type\": \"HQQ\",\n            \"model_path\": str(self.model_path),\n            \"file_size\": self.model_path.stat().st_size,\n            \"backend\": self.request.hqq_backend\n        }\n\n    def get_config(self) -> Dict[str, Any]:\n        \"\"\"Get model configuration.\"\"\"\n        return {\n            \"model_type\": \"HQQ\",\n            \"model_name\": self.request.model_name,\n            \"device\": self.request.device,\n            \"backend\": self.request.hqq_backend\n        }\n"
  },
  {
    "path": "Backend/src/models/loaders/llamaccphf.py",
    "content": "from typing import Any, Tuple\n\nfrom src.models.loaders.llamacpp import LlamaCppLoader\n\n\nclass LlamaCppHFLoader(LlamaCppLoader):\n    \"\"\"\n    Loader for llama.cpp models with HuggingFace tokenizer.\n    Inherits from LlamaCppLoader but uses a separate HF tokenizer.\n    \"\"\"\n\n    def load(self) -> Tuple[Any, Any]:\n        \"\"\"Load model with HuggingFace tokenizer.\"\"\"\n        from transformers import AutoTokenizer\n\n        # Load the base model\n        model, _ = super().load()\n\n        # Load HuggingFace tokenizer\n        tokenizer_path = self.request.tokenizer_path or (\n            self.request.model_path if self.request.model_path else f\"models/{self.request.model_name}\")\n\n        tokenizer = AutoTokenizer.from_pretrained(\n            tokenizer_path,\n            trust_remote_code=self.request.trust_remote_code,\n            use_fast=self.request.use_fast_tokenizer,\n        )\n\n        return model, tokenizer"
  },
  {
    "path": "Backend/src/models/loaders/llamacpp.py",
    "content": "import os\nimport logging\nimport requests\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple\nfrom tqdm import tqdm\nimport sys\n\nfrom src.models.loaders.base import BaseLoader\nfrom src.endpoint.models import ModelLoadRequest\nfrom src.models.exceptions import ModelDownloadError, ModelLoadError\n\nlogger = logging.getLogger(__name__)\n\n\nclass LlamaCppLoader(BaseLoader):\n    \"\"\"\n    Loader for llama.cpp models. Handles both local and remote model loading,\n    with support for GGUF format and various optimizations.\n    \"\"\"\n\n    def __init__(self, request: ModelLoadRequest, manager: Any):\n        super().__init__(request, manager)\n        self.llama = None\n        self.cache = None\n\n    def load(self) -> Tuple[Any, Any]:\n        \"\"\"Load a llama.cpp model and return the model and tokenizer.\"\"\"\n        try:\n            import torch\n            from llama_cpp import Llama\n\n            # Force CUDA environment variables before anything else\n            if torch.cuda.is_available():\n                os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n                os.environ['LLAMA_CUDA_FORCE'] = '1'\n\n                # Log CUDA information\n                logger.info(\"CUDA is available\")\n                logger.info(f\"CUDA Device: {torch.cuda.get_device_name(0)}\")\n                logger.info(\n                    f\"Total CUDA Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**2:.0f}MB\")\n                torch.cuda.empty_cache()\n\n            # Get model path and ensure it exists\n            model_path = self._get_model_path()\n            if not model_path.exists():\n                raise ModelLoadError(f\"Model file not found: {model_path}\")\n\n            logger.info(f\"Loading model from path: {model_path}\")\n\n            # Simple CUDA parameters that match working Q8 configurations\n            model_params = {\n                \"model_path\": str(model_path),\n                \"n_ctx\": int(self.request.n_ctx) if self.request.n_ctx is not None else 2048,\n                \"n_batch\": int(self.request.n_batch) if self.request.n_batch is not None else 512,\n                \"n_gpu_layers\": -1,\n                \"main_gpu\": 0,\n                \"use_mmap\": True,  # Enable memory mapping\n                \"use_mlock\": False,\n                \"verbose\": True\n            }\n\n            # Log parameters\n            logger.info(f\"Loading model with parameters: {model_params}\")\n\n            # Load model\n            model = Llama(**model_params)\n            logger.info(\"Initial model load successful\")\n\n            # Simple CUDA test\n            if torch.cuda.is_available():\n                try:\n                    logger.info(\"Testing model...\")\n                    # Basic tokenization test\n                    tokens = model.tokenize(b\"test\")\n                    logger.info(\"Tokenization successful\")\n\n                    # Log memory usage\n                    allocated = torch.cuda.memory_allocated() / 1024**2\n                    reserved = torch.cuda.memory_reserved() / 1024**2\n                    logger.info(f\"CUDA Memory allocated: {allocated:.2f}MB\")\n                    logger.info(f\"CUDA Memory reserved: {reserved:.2f}MB\")\n\n                except Exception as e:\n                    logger.error(f\"Model test failed: {e}\")\n                    raise ModelLoadError(f\"Failed to initialize model: {e}\")\n\n            logger.info(\"Model loaded successfully\")\n            return model, model\n\n        except Exception as e:\n            logger.error(f\"Error loading model: {str(e)}\", exc_info=True)\n            raise ModelLoadError(f\"Failed to load llama.cpp model: {str(e)}\")\n\n    def _get_model_path(self) -> Path:\n        \"\"\"Get and validate the model path, downloading if necessary.\"\"\"\n        # Handle both direct file paths and model names\n        if self.request.model_path:\n            model_path = Path(self.request.model_path)\n        else:\n            # Convert HF style paths to filesystem paths\n            safe_name = self.request.model_name.replace('/', os.path.sep)\n            model_path = Path('models') / safe_name\n\n        model_dir = model_path if model_path.is_dir() else model_path.parent\n        model_dir.mkdir(parents=True, exist_ok=True)\n\n        # Special handling for Ollama paths\n        if '.ollama' in str(model_path):\n            logger.info(\"Detected Ollama model path\")\n\n            # Determine Ollama directory based on OS\n            if sys.platform == 'darwin':  # macOS specific path\n                ollama_dir = Path(os.path.expanduser('~/.ollama'))\n                logger.info(f\"Using macOS Ollama directory: {ollama_dir}\")\n            else:  # Windows and Linux\n                ollama_dir = Path(os.path.expandvars('%USERPROFILE%\\\\.ollama'))\n                if not ollama_dir.exists():\n                    ollama_dir = Path(os.path.expanduser('~/.ollama'))\n\n            if not ollama_dir.exists():\n                raise ModelLoadError(\n                    f\"Ollama directory not found at: {ollama_dir}\")\n\n            # Extract model name from path\n            model_name = self.request.model_name\n            if not model_name and 'registry.ollama.ai/library/' in str(model_path):\n                model_name = str(model_path).split(\n                    'registry.ollama.ai/library/')[-1].split('/')[0]\n            logger.info(f\"Using model name: {model_name}\")\n\n            # First check for the model file in the models directory\n            models_dir = ollama_dir / 'models'\n            logger.info(f\"Checking Ollama models directory: {models_dir}\")\n\n            if models_dir.exists():\n                # First try to find a .gguf file\n                gguf_files = list(models_dir.glob(\"**/*.gguf\"))\n                if gguf_files:\n                    logger.info(f\"Found Ollama GGUF file: {gguf_files[0]}\")\n                    return gguf_files[0]\n\n                # Look for manifest\n                manifest_dir = models_dir / 'manifests' / \\\n                    'registry.ollama.ai' / 'library' / model_name\n                manifest_path = manifest_dir / 'latest'\n                logger.info(f\"Looking for manifest at: {manifest_path}\")\n\n                if manifest_path.exists():\n                    with open(manifest_path, 'r') as f:\n                        manifest = f.read()\n                        logger.info(f\"Manifest content: {manifest}\")\n                        import json\n                        try:\n                            manifest_data = json.loads(manifest)\n                            for layer in manifest_data.get('layers', []):\n                                if layer.get('mediaType') == 'application/vnd.ollama.image.model':\n                                    blob_hash = layer.get('digest', '').replace(\n                                        'sha256:', 'sha256-')\n                                    if blob_hash:\n                                        # Check both blobs and models directories for the file\n                                        possible_paths = [\n                                            models_dir / 'blobs' / blob_hash,\n                                            ollama_dir / 'blobs' / blob_hash\n                                        ]\n\n                                        for blob_path in possible_paths:\n                                            logger.info(\n                                                f\"Checking for blob at: {blob_path}\")\n                                            if blob_path.exists():\n                                                logger.info(\n                                                    f\"Found Ollama model blob: {blob_path}\")\n                                                return blob_path\n\n                        except json.JSONDecodeError as e:\n                            logger.error(f\"Failed to parse manifest: {e}\")\n                            pass\n\n            logger.warning(f\"No Ollama model files found in: {models_dir}\")\n            raise ModelLoadError(\n                f\"Could not find Ollama model files in {models_dir}\")\n\n        # Check for existing GGUF files in the directory\n        if model_dir.exists():\n            existing_gguf = list(model_dir.glob(\"*.gguf\"))\n            if existing_gguf:\n                logger.info(f\"Found existing GGUF model: {existing_gguf[0]}\")\n                return existing_gguf[0]\n\n        # Only attempt to download if it looks like a HF model ID\n        if '/' in self.request.model_name:\n            return self._download_model(model_dir)\n\n        raise ModelLoadError(f\"No model files found in: {model_dir}\")\n\n    def _download_model(self, model_dir: Path) -> Path:\n        \"\"\"Download model from Hugging Face.\"\"\"\n        logger.info(f\"Attempting to download model: {self.request.model_name}\")\n\n        try:\n            # Setup API request\n            api_url = f\"https://huggingface.co/api/models/{self.request.model_name}/tree/main\"\n            headers = {\"Accept\": \"application/json\"}\n            if self.request.hf_token:\n                headers[\"Authorization\"] = f\"Bearer {self.request.hf_token}\"\n\n            # Get repository contents\n            response = requests.get(api_url, headers=headers)\n            response.raise_for_status()\n            files = response.json()\n\n            # Find GGUF files\n            gguf_files = [f for f in files if f.get(\n                'path', '').endswith('.gguf')]\n            if not gguf_files:\n                raise ModelDownloadError(\n                    f\"No GGUF files found in repository {self.request.model_name}\")\n\n            # Sort by preference (q4_k_m) and size\n            gguf_files.sort(key=lambda x: (\n                0 if 'q4_k_m' in x['path'].lower() else 1,\n                x.get('size', float('inf'))\n            ))\n\n            # Download the best candidate\n            file_info = gguf_files[0]\n            file_name = file_info['path']\n            download_url = f\"https://huggingface.co/{self.request.model_name}/resolve/main/{file_name}\"\n            model_path = model_dir / file_name\n\n            if not model_path.exists() or model_path.stat().st_size == 0:\n                self._download_file(download_url, model_path, headers)\n\n            return model_path\n\n        except Exception as e:\n            raise ModelDownloadError(f\"Failed to download model: {str(e)}\")\n\n    def _download_file(self, url: str, path: Path, headers: Dict[str, str]) -> None:\n        \"\"\"Download a file with progress bar.\"\"\"\n        response = requests.get(url, stream=True, headers=headers)\n        response.raise_for_status()\n\n        total_size = int(response.headers.get('content-length', 0))\n        block_size = 8192\n\n        with open(path, 'wb') as f, tqdm(\n            desc=path.name,\n            total=total_size,\n            unit='iB',\n            unit_scale=True,\n            unit_divisor=1024,\n        ) as pbar:\n            for data in response.iter_content(block_size):\n                size = f.write(data)\n                pbar.update(size)\n\n    def _get_model_params(self) -> Dict[str, Any]:\n        \"\"\"Configure model parameters based on request and system capabilities.\"\"\"\n        import torch\n\n        # Base parameters\n        params = {\n            \"n_ctx\": int(self.request.n_ctx) if self.request.n_ctx is not None else 2048,\n            \"n_batch\": int(self.request.n_batch) if self.request.n_batch is not None else 512,\n            \"n_threads\": int(self.request.n_threads) if self.request.n_threads is not None else os.cpu_count(),\n            \"verbose\": True,  # Enable verbose output for debugging\n        }\n\n        # Add CUDA parameters if available\n        if torch.cuda.is_available():\n            logger.info(\"Configuring CUDA parameters...\")\n\n            # Force CUDA environment variables\n            os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n            os.environ['LLAMA_CUDA_FORCE'] = '1'\n            os.environ['LLAMA_FORCE_GPU'] = '1'  # Force GPU usage\n            os.environ['LLAMA_CPU_DISABLE'] = '1'  # Disable CPU fallback\n\n            # Enhanced CUDA parameters - optimized for GPU usage\n            cuda_params = {\n                \"n_gpu_layers\": -1,    # Use all layers on GPU\n                \"main_gpu\": 0,         # Use the first GPU\n                \"tensor_split\": None,   # No tensor splitting\n                \"use_mmap\": False,     # Disable memory mapping\n                \"use_mlock\": True,     # Lock memory to prevent swapping\n                \"mul_mat_q\": True,     # Enable matrix multiplication\n                \"offload_kqv\": True,   # Keep KQV on GPU\n                \"f16_kv\": True,        # Use float16 for KV cache\n                \"logits_all\": True,    # Compute logits for all tokens\n                \"embedding\": True      # Use GPU for embeddings\n            }\n\n            params.update(cuda_params)\n            logger.info(f\"CUDA parameters configured: {cuda_params}\")\n\n            # Log CUDA device info\n            logger.info(f\"CUDA Device: {torch.cuda.get_device_name(0)}\")\n            logger.info(\n                f\"CUDA Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**2:.0f}MB\")\n\n        # Add optional parameters if specified in request\n        optional_params = {\n            \"tensor_split\": self.request.tensor_split,\n            \"split_mode\": self.request.split_mode,\n            \"cache_type\": self.request.cache_type,\n        }\n\n        # Only add optional params if they have non-None values\n        params.update(\n            {k: v for k, v in optional_params.items() if v is not None})\n\n        logger.info(f\"Final model parameters: {params}\")\n        return params\n\n    def _configure_gpu_layers(self) -> int:\n        \"\"\"Configure the number of GPU layers based on hardware and request.\"\"\"\n        import torch\n\n        if not torch.cuda.is_available():\n            return 0\n\n        # Force environment variables for CUDA\n        os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n        os.environ['LLAMA_CUDA_FORCE'] = '1'\n\n        # If n_gpu_layers is specified in request, use that\n        if self.request.n_gpu_layers is not None:\n            return self.request.n_gpu_layers\n\n        # Otherwise, use all layers on GPU\n        return -1  # -1 means use all layers on GPU\n\n    def _setup_cache(self, model: Any) -> None:\n        \"\"\"Setup model cache if supported.\"\"\"\n        try:\n            from llama_cpp import LlamaCache\n            if hasattr(model, 'set_cache'):\n                # Convert GB to bytes\n                cache_size = self.request.cache_size * 1024 * 1024 * 1024\n                cache_type = \"fp16\"  # or q8_0 or q4_0 depending on your needs\n                model.set_cache(LlamaCache(capacity_bytes=cache_size))\n                logger.info(\n                    f\"Initialized LLM cache with {self.request.cache_size}GB capacity using {cache_type}\")\n        except Exception as e:\n            logger.warning(f\"Failed to initialize cache: {e}\")\n\n    def get_metadata(self) -> Optional[Dict[str, Any]]:\n        \"\"\"Get model metadata without loading the full model.\"\"\"\n        try:\n            model_path = self._get_model_path()\n            if not model_path.exists():\n                return None\n\n            # Basic metadata\n            metadata = {\n                \"model_type\": \"llama.cpp\",\n                \"model_path\": str(model_path),\n                \"file_size\": model_path.stat().st_size,\n                \"format\": \"GGUF\" if model_path.suffix == '.gguf' else \"Unknown\"\n            }\n\n            # Try to get additional metadata from the GGUF file\n            try:\n                from llama_cpp import Llama\n                model = Llama(model_path=str(model_path),\n                              n_ctx=8, n_gpu_layers=0)\n                metadata.update({\n                    \"n_vocab\": model.n_vocab(),\n                    \"n_ctx_train\": model.n_ctx_train(),\n                    \"n_embd\": model.n_embd(),\n                    \"desc\": model.desc(),\n                })\n            except:\n                pass\n\n            return metadata\n        except Exception as e:\n            logger.error(f\"Error getting model metadata: {str(e)}\")\n            return None\n\n    def get_config(self) -> Dict[str, Any]:\n        \"\"\"Get the current model configuration.\"\"\"\n        return {\n            \"model_type\": \"llama.cpp\",\n            \"n_ctx\": self.request.n_ctx,\n            \"n_batch\": self.request.n_batch,\n            \"n_gpu_layers\": self.request.n_gpu_layers,\n            \"device\": self.request.device,\n        }\n\n    @staticmethod\n    def cleanup(model: Any) -> None:\n        \"\"\"Clean up model resources.\"\"\"\n        try:\n            del model\n        except:\n            pass\n"
  },
  {
    "path": "Backend/src/models/loaders/tensorrt.py",
    "content": "import logging\nfrom typing import Any, Dict, Optional, Tuple\n\nfrom src.models.loaders.base import BaseLoader\nfrom src.models.exceptions import ModelLoadError\nfrom transformers import AutoTokenizer\n\nlogger = logging.getLogger(__name__)\n\n\nclass TensorRTLoader(BaseLoader):\n    \"\"\"Loader for TensorRT-LLM models.\"\"\"\n\n    def load(self) -> Tuple[Any, Any]:\n        \"\"\"Load a TensorRT-LLM model.\"\"\"\n        try:\n            import tensorrt_llm\n            from tensorrt_llm.runtime import ModelConfig\n        except ImportError:\n            raise ModelLoadError(\n                \"tensorrt-llm is not installed. Please install it from the TensorRT-LLM repository\")\n\n        engine_path = self.request.engine_dir if self.request.engine_dir else self.model_path\n        if not engine_path.exists():\n            raise ModelLoadError(f\"Engine path does not exist: {engine_path}\")\n\n        config = ModelConfig(\n            engine_dir=str(engine_path),\n            max_batch_size=self.request.max_batch_size,\n            max_input_len=self.request.max_input_len,\n            max_output_len=int(\n                self.request.max_output_len) if self.request.max_output_len is not None else None,\n        )\n\n        model = tensorrt_llm.runtime.GenerationSession(config)\n\n        tokenizer = AutoTokenizer.from_pretrained(\n            self.request.tokenizer_path or str(engine_path),\n            trust_remote_code=self.request.trust_remote_code,\n            use_fast=self.request.use_fast_tokenizer,\n        )\n\n        return model, tokenizer\n\n    def get_metadata(self) -> Optional[Dict[str, Any]]:\n        \"\"\"Get model metadata.\"\"\"\n        if not self.model_path.exists():\n            return None\n        return {\n            \"model_type\": \"TensorRT-LLM\",\n            \"model_path\": str(self.model_path),\n            \"file_size\": self.model_path.stat().st_size,\n            \"engine_dir\": self.request.engine_dir\n        }\n\n    def get_config(self) -> Dict[str, Any]:\n        \"\"\"Get model configuration.\"\"\"\n        return {\n            \"model_type\": \"TensorRT-LLM\",\n            \"model_name\": self.request.model_name,\n            \"device\": self.request.device,\n            \"engine_dir\": self.request.engine_dir,\n            \"max_batch_size\": self.request.max_batch_size,\n            \"max_input_len\": self.request.max_input_len,\n            \"max_output_len\": self.request.max_output_len\n        }\n"
  },
  {
    "path": "Backend/src/models/loaders/transformers.py",
    "content": "import logging\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple\nimport torch\nfrom transformers import (\n    BitsAndBytesConfig,\n    PreTrainedModel,\n)\n\nfrom src.models.loaders.base import BaseLoader\nfrom src.models.exceptions import ModelLoadError\n\nlogger = logging.getLogger(__name__)\n\n\nclass TransformersLoader(BaseLoader):\n    \"\"\"\n    Loader for Hugging Face Transformers models.\n    Handles both local and remote model loading with various optimizations.\n    \"\"\"\n\n    def load(self) -> Tuple[Any, Any]:\n        \"\"\"Load a transformers model and return the model and tokenizer.\"\"\"\n        try:\n            from transformers import AutoModelForCausalLM, AutoTokenizer\n\n            logger.info(f\"Loading model: {self.request.model_name}\")\n            logger.info(f\"Model type: {self.request.model_type}\")\n            logger.info(f\"Model path: {self.request.model_path}\")\n            logger.info(f\"Device: {self.request.device}\")\n\n            # Configure model loading parameters\n            model_kwargs = self._get_model_kwargs()\n\n            # If we have a local path, use it directly\n            if self.request.model_path and Path(self.request.model_path).exists():\n                logger.info(\n                    f\"Loading model from local path: {self.request.model_path}\")\n                try:\n                    # Try to load tokenizer from local path first\n                    tokenizer = AutoTokenizer.from_pretrained(\n                        self.request.model_path,\n                        trust_remote_code=self.request.trust_remote_code,\n                        use_fast=self.request.use_fast_tokenizer,\n                        padding_side=self.request.padding_side\n                    )\n                    logger.info(\"Loaded tokenizer from local path\")\n\n                    # Load model from local path\n                    model = AutoModelForCausalLM.from_pretrained(\n                        self.request.model_path,\n                        **model_kwargs\n                    )\n                    logger.info(\"Loaded model from local path\")\n\n                    # Ensure model is on the correct device if not using device_map\n                    if model_kwargs.get(\"device_map\") is None and hasattr(model, \"to\"):\n                        # Handle device placement\n                        if self.request.device == \"auto\":\n                            device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n                        else:\n                            device = self.request.device\n\n                        model = model.to(device)\n                        logger.info(f\"Moved model to device: {device}\")\n\n                    return model, tokenizer\n                except Exception as e:\n                    logger.warning(f\"Failed to load from local path: {e}\")\n                    raise ModelLoadError(\n                        f\"Failed to load model from local path: {str(e)}\")\n            else:\n                # Download from HuggingFace\n                logger.info(\n                    \"Attempting to download from HuggingFace: \" + self.request.model_name)\n\n                try:\n                    # Download and save tokenizer\n                    tokenizer = AutoTokenizer.from_pretrained(\n                        self.request.model_name,\n                        trust_remote_code=self.request.trust_remote_code,\n                        use_fast=self.request.use_fast_tokenizer,\n                        padding_side=self.request.padding_side\n                    )\n                    if self.request.model_path:\n                        tokenizer.save_pretrained(self.request.model_path)\n                        logger.info(\n                            f\"Tokenizer downloaded and saved to {self.request.model_path}\")\n\n                    # Download and save config\n                    if self.request.model_path:\n                        from transformers import AutoConfig\n                        config = AutoConfig.from_pretrained(\n                            self.request.model_name,\n                            trust_remote_code=self.request.trust_remote_code\n                        )\n                        config.save_pretrained(self.request.model_path)\n                        logger.info(\n                            f\"Config downloaded and saved to {self.request.model_path}\")\n\n                    # Download model weights\n                    logger.info(\n                        \"Downloading model weights (this may take a while)...\")\n                    model = AutoModelForCausalLM.from_pretrained(\n                        self.request.model_name,\n                        **model_kwargs\n                    )\n\n                    # Save the model if we have a path\n                    if self.request.model_path:\n                        model.save_pretrained(self.request.model_path)\n                        logger.info(\n                            f\"Model weights saved to {self.request.model_path}\")\n\n                    return model, tokenizer\n                except Exception as e:\n                    raise ModelLoadError(f\"Failed to download model: {str(e)}\")\n\n        except Exception as e:\n            raise ModelLoadError(\n                f\"Failed to load transformers model: {str(e)}\")\n\n    def _get_model_kwargs(self) -> Dict[str, Any]:\n        \"\"\"Get model loading parameters.\"\"\"\n        # Get the compute dtype\n        compute_dtype = torch.bfloat16 if self.request.compute_dtype == \"bfloat16\" else torch.float16\n\n        # Determine device map\n        device_map = None\n        if self.request.device == \"cuda\":\n            if torch.cuda.is_available():\n                device_map = \"auto\"\n            else:\n                logger.warning(\n                    \"CUDA requested but not available, falling back to CPU\")\n                self.request.device = \"cpu\"\n\n        # Base parameters without gradient checkpointing\n        load_params = {\n            \"low_cpu_mem_usage\": True,\n            \"torch_dtype\": compute_dtype,\n            \"trust_remote_code\": self.request.trust_remote_code,\n            \"use_flash_attention_2\": self.request.use_flash_attention,\n            \"device_map\": device_map,\n            \"revision\": self.request.revision,\n        }\n\n        # Only add gradient checkpointing for explicitly supported models\n        model_name_lower = self.request.model_name.lower()\n        if (\"llama\" in model_name_lower or\n            \"mistral\" in model_name_lower or\n                \"mpt\" in model_name_lower):\n            load_params[\"use_gradient_checkpointing\"] = True\n\n        # Configure quantization\n        if self.request.load_in_8bit or self.request.load_in_4bit:\n            load_params[\"quantization_config\"] = self._get_quantization_config()\n\n        # Add optional parameters\n        if self.request.max_memory is not None and self.request.device == \"cuda\":\n            load_params[\"max_memory\"] = self.request.max_memory\n\n        if self.request.rope_scaling is not None:\n            load_params[\"rope_scaling\"] = self.request.rope_scaling\n\n        if self.request.use_cache is False:\n            load_params[\"use_cache\"] = False\n\n        # For model loading, return the original params with torch.dtype\n        if not hasattr(self, '_serializing_for_response'):\n            return load_params\n\n        # For JSON response, convert torch.dtype to string\n        response_params = load_params.copy()\n        response_params[\"torch_dtype\"] = str(compute_dtype)\n        return response_params  # Return string version for JSON serialization\n\n    def _get_quantization_config(self) -> BitsAndBytesConfig:\n        \"\"\"Get quantization configuration.\"\"\"\n        return BitsAndBytesConfig(\n            load_in_8bit=self.request.load_in_8bit,\n            load_in_4bit=self.request.load_in_4bit,\n            bnb_4bit_compute_dtype=eval(f\"torch.{self.request.compute_dtype}\"),\n            llm_int8_enable_fp32_cpu_offload=True,\n            bnb_4bit_use_double_quant=True\n        )\n\n    def get_metadata(self) -> Optional[Dict[str, Any]]:\n        \"\"\"Get model metadata without loading the full model.\"\"\"\n        try:\n            if '/' in self.request.model_name and not self.model_path.exists():\n                config = self._load_config(self.request.model_name)\n                metadata = self._make_json_serializable(config.to_dict())\n                metadata['model_type'] = 'Transformers'\n                return metadata\n\n            if self.model_path.exists():\n                config = self._load_config(self.model_path)\n                metadata = self._make_json_serializable(config.to_dict())\n                metadata['model_type'] = 'Transformers'\n                return metadata\n\n            return None\n        except Exception as e:\n            logger.error(f\"Error getting model metadata: {str(e)}\")\n            return None\n\n    def get_config(self) -> Dict[str, Any]:\n        \"\"\"Get the current model configuration.\"\"\"\n        # Set flag to get JSON serializable params\n        self._serializing_for_response = True\n        load_params = self._get_model_kwargs()\n        delattr(self, '_serializing_for_response')\n\n        config = {\n            \"model_type\": \"Transformers\",\n            \"model_name\": self.request.model_name,\n            \"device\": self.request.device,\n            \"load_params\": load_params\n        }\n\n        if self.model_path.exists():\n            try:\n                model_config = self._load_config(self.model_path)\n                config[\"model_config\"] = model_config.to_dict()\n            except Exception as e:\n                logger.warning(f\"Could not load model config: {str(e)}\")\n\n        return self._make_json_serializable(config)\n\n    def _make_json_serializable(self, obj: Any) -> Any:\n        \"\"\"Convert a dictionary with torch dtypes to JSON serializable format.\"\"\"\n        if isinstance(obj, dict):\n            return {k: self._make_json_serializable(v) for k, v in obj.items()}\n        elif isinstance(obj, list):\n            return [self._make_json_serializable(v) for v in obj]\n        elif hasattr(obj, 'dtype'):  # Handle torch dtypes\n            return str(obj)\n        return obj\n\n    @staticmethod\n    def cleanup(model: PreTrainedModel) -> None:\n        \"\"\"Clean up model resources.\"\"\"\n        try:\n            if hasattr(model, 'cpu'):\n                model.cpu()\n            del model\n        except Exception as e:\n            logger.warning(f\"Error during model cleanup: {str(e)}\")\n"
  },
  {
    "path": "Backend/src/models/manager.py",
    "content": "import logging\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Any, Dict, Union\n\nfrom src.endpoint.models import ModelLoadRequest\nfrom src.models.utils.device import get_device\nfrom src.models.utils.platform import check_platform_compatibility\nfrom src.models.utils.detect_type import detect_model_type\nfrom src.models.exceptions import ModelLoadError, ModelNotFoundError\nfrom src.models.loaders import (\n    TransformersLoader,\n    LlamaCppLoader,\n    LlamaCppHFLoader,\n    ExLlamaV2Loader,\n    ExLlamaV2HFLoader,\n    HQQLoader,\n    TensorRTLoader\n)\n\nlogger = logging.getLogger(__name__)\n\nclass ModelManager:\n    \"\"\"\n    Manages the loading, unloading, and switching of different AI models.\n    Supports multiple model types and handles resource management.\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"Initialize the model manager with empty state.\"\"\"\n        self.current_model: Optional[Any] = None\n        self.current_tokenizer: Optional[Any] = None\n        self.model_type: Optional[str] = None\n        self.device: Optional[str] = None\n        self.model_name: Optional[str] = None\n        self._is_loading: bool = False\n        self.model_config: Optional[Dict[str, Any]] = None\n\n        # Map model types to their respective loaders\n        self.loader_mapping = {\n            'Transformers': TransformersLoader,\n            'llama.cpp': LlamaCppLoader,\n            'llamacpp_HF': LlamaCppHFLoader,\n            'ExLlamav2': ExLlamaV2Loader,\n            'ExLlamav2_HF': ExLlamaV2HFLoader,\n            'HQQ': HQQLoader,\n            'TensorRT-LLM': TensorRTLoader\n        }\n\n    def check_platform_compatibility(self, model_type: str) -> Tuple[bool, str]:\n        \"\"\"Check if the current platform is compatible with the specified model type.\"\"\"\n        return check_platform_compatibility(model_type)\n\n    def get_model_metadata(self, request: ModelLoadRequest) -> Optional[Dict[str, Any]]:\n        \"\"\"\n        Get model metadata without loading the full model.\n        \n        Args:\n            request: Model load request containing model information\n            \n        Returns:\n            Dictionary containing model metadata or None if not found\n        \"\"\"\n        try:\n            model_path = Path(request.model_path) if request.model_path else Path(\n                f\"models/{request.model_name}\")\n\n            # Get the appropriate loader\n            loader_class = self.loader_mapping.get(request.model_type)\n            if loader_class:\n                loader = loader_class(request, self)\n                return loader.get_metadata()\n            \n            return None\n        except Exception as e:\n            logger.error(f\"Error getting model metadata: {str(e)}\")\n            return None\n\n    def is_model_loaded(self) -> bool:\n        \"\"\"Check if a model is currently loaded.\"\"\"\n        return self.current_model is not None\n\n    def get_model_info(self) -> Dict[str, Any]:\n        \"\"\"\n        Get information about the currently loaded model.\n        \n        Returns:\n            Dictionary containing model information\n        \"\"\"\n        info = {\n            \"model_name\": self.model_name,\n            \"model_type\": self.model_type,\n            \"device\": self.device,\n            \"is_loaded\": self.is_model_loaded(),\n            \"is_loading\": self._is_loading,\n        }\n        if self.model_config:\n            info[\"config\"] = self._make_json_serializable(self.model_config)\n        return self._make_json_serializable(info)\n\n    def clear_model(self) -> None:\n        \"\"\"Unload the current model and clear CUDA cache.\"\"\"\n        try:\n            if self.current_model is not None:\n                # Let the specific loader handle cleanup if method exists\n                loader_class = self.loader_mapping.get(self.model_type)\n                if loader_class and hasattr(loader_class, 'cleanup'):\n                    loader_class.cleanup(self.current_model)\n                else:\n                    # Default cleanup\n                    if hasattr(self.current_model, 'cpu'):\n                        self.current_model.cpu()\n                    del self.current_model\n\n            if self.current_tokenizer is not None:\n                del self.current_tokenizer\n\n            # Reset all attributes\n            self.current_model = None\n            self.current_tokenizer = None\n            self.model_type = None\n            self.device = None\n            self.model_name = None\n            self.model_config = None\n\n            # Clear CUDA cache if available\n            import torch\n            import gc\n            gc.collect()\n            if torch.cuda.is_available():\n                torch.cuda.empty_cache()\n\n        except Exception as e:\n            logger.error(f\"Error clearing model: {str(e)}\")\n            raise\n\n    def _make_json_serializable(self, obj: Any) -> Any:\n        \"\"\"Convert objects to JSON serializable format.\"\"\"\n        if isinstance(obj, dict):\n            return {k: self._make_json_serializable(v) for k, v in obj.items()}\n        elif isinstance(obj, list):\n            return [self._make_json_serializable(v) for v in obj]\n        elif hasattr(obj, 'dtype'):  # Handle torch dtypes\n            return str(obj)\n        return obj\n\n    def load_model(self, request: ModelLoadRequest) -> Tuple[Any, Any]:\n        \"\"\"\n        Load a model based on the request configuration.\n        \n        Args:\n            request: Model load request containing all necessary parameters\n            \n        Returns:\n            Tuple of (model, tokenizer)\n            \n        Raises:\n            ModelLoadError: If there's an error during model loading\n            ModelNotFoundError: If the requested model is not found\n        \"\"\"\n        if self._is_loading:\n            raise ModelLoadError(\"A model is already being loaded\")\n\n        try:\n            self._is_loading = True\n            self.clear_model()  # Clear any existing model\n\n            # Set device using imported get_device function\n            self.device = get_device(request)\n            self.model_name = request.model_name\n\n            # Handle Ollama models first - convert to llama.cpp\n            if request.model_type == 'ollama':\n                try:\n                    # Read the manifest to get the blob SHA\n                    manifest_path = Path(request.model_path) / 'latest'\n                    logger.info(f\"Looking for manifest at: {manifest_path}\")\n                    if not manifest_path.exists():\n                        raise ModelLoadError(f\"Manifest file not found at: {manifest_path}\")\n\n                    import json\n                    with open(manifest_path) as f:\n                        manifest = json.load(f)\n                    logger.info(f\"Manifest content: {json.dumps(manifest, indent=2)}\")\n                    \n                    # Get the model layer (first layer with mediaType 'application/vnd.ollama.image.model')\n                    try:\n                        model_layer = next(layer for layer in manifest['layers'] \n                                        if layer['mediaType'] == 'application/vnd.ollama.image.model')\n                    except StopIteration:\n                        raise ModelLoadError(\"No model layer found in manifest\")\n\n                    # Extract SHA and construct blob path\n                    sha = model_layer['digest'].split(':')[1]\n                    # Ollama stores the files directly in the blobs directory with a sha256- prefix\n                    blob_path = Path(request.model_path).parent.parent.parent.parent / 'blobs' / f'sha256-{sha}'\n                    logger.info(f\"Looking for blob at: {blob_path}\")\n                    \n                    if not blob_path.exists():\n                        raise ModelLoadError(f\"Model file not found at: {blob_path}\")\n\n                    # Update the request to use the actual model file\n                    request.model_path = str(blob_path)\n                    request.model_type = \"llama.cpp\"\n                    logger.info(f\"Converting Ollama model to llama.cpp with path: {request.model_path}\")\n\n                except Exception as e:\n                    logger.error(f\"Error processing Ollama model: {str(e)}\")\n                    raise ModelLoadError(f\"Failed to process Ollama model: {str(e)}\")\n\n            # Check if model exists locally first\n            model_path = Path(request.model_path) if request.model_path else Path(f\"models/{request.model_name}\")\n            if model_path.exists():\n                logger.info(f\"Found local model at: {model_path}\")\n                # Auto-detect model type if not specified\n                if not request.model_type or request.model_type == \"auto\":\n                    request.model_type = self._detect_model_type(request)\n                    logger.info(f\"Detected model type: {request.model_type}\")\n            else:\n                # Only attempt to download if it looks like a HF model ID\n                if '/' in request.model_name:\n                    logger.info(f\"Model not found locally, will attempt to download from HuggingFace\")\n                else:\n                    raise ModelNotFoundError(f\"Model not found at: {model_path}\")\n\n            # Check platform compatibility\n            is_compatible, message = check_platform_compatibility(request.model_type)\n            if not is_compatible:\n                raise ModelLoadError(message)\n            logger.info(message)\n\n            # Get the appropriate loader\n            loader_class = self.loader_mapping.get(request.model_type)\n            logger.info(f\"Model type: {request.model_type}\")\n            logger.info(f\"Available loaders: {list(self.loader_mapping.keys())}\")\n            if not loader_class:\n                raise ModelLoadError(f\"Unsupported model type: {request.model_type}\")\n\n            # Initialize and use the loader\n            loader = loader_class(request, self)\n            model, tokenizer = loader.load()\n\n            # Store the results\n            self.current_model = model\n            self.current_tokenizer = tokenizer\n            self.model_type = request.model_type\n            # Make config JSON serializable before storing\n            self.model_config = self._make_json_serializable(loader.get_config())\n\n            return model, tokenizer\n\n        except Exception as e:\n            logger.error(f\"Error loading model: {str(e)}\", exc_info=True)\n            self.clear_model()  # Cleanup on failure\n            if isinstance(e, (ModelLoadError, ModelNotFoundError)):\n                raise\n            raise ModelLoadError(str(e))\n\n        finally:\n            self._is_loading = False\n\n    def _detect_model_type(self, request: ModelLoadRequest) -> str:\n        \"\"\"\n        Detect the type of model based on the model path and name.\n        \n        Args:\n            request: Model load request\n            \n        Returns:\n            String indicating the detected model type\n        \"\"\"\n        model_path = Path(request.model_path) if request.model_path else Path(\n            f\"models/{request.model_name}\")\n\n        if model_path.exists():\n            return detect_model_type(model_path)\n        \n        # Default to Transformers for HF models\n        if '/' in request.model_name:\n            return \"Transformers\"\n        \n        raise ModelNotFoundError(\n            f\"Could not detect model type: {request.model_name}\")\n\n\n# Global model manager instance\nmodel_manager = ModelManager()"
  },
  {
    "path": "Backend/src/models/streamer.py",
    "content": "import traceback\nfrom queue import Queue\nfrom threading import Thread\nfrom typing import Optional, Callable, Any, List, Union, AsyncIterator, Iterator, Dict\nimport torch\nimport time\nimport asyncio\nimport json\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass StopNowException(Exception):\n    pass\n\n\nclass StreamingStoppingCriteria:\n    \"\"\"Base class for stopping criteria during text generation\"\"\"\n\n    def __init__(self):\n        pass\n\n    def __call__(self, input_ids, scores) -> bool:\n        return False\n\n\nclass StopOnInterrupt(StreamingStoppingCriteria):\n    \"\"\"Stopping criteria that checks for interruption signals\"\"\"\n\n    def __init__(self, stop_signal=None):\n        super().__init__()\n        self.stop_signal = stop_signal or (lambda: False)\n\n    def __call__(self, input_ids, scores) -> bool:\n        return self.stop_signal()\n\n\nclass StreamIterator(AsyncIterator[str], Iterator[str]):\n    \"\"\"Iterator that streams tokens as they are generated.\"\"\"\n\n    def __init__(self, func: Callable, callback: Optional[Callable] = None):\n        self.func = func\n        self.callback = callback\n        self.queue = Queue()\n        self.async_queue = asyncio.Queue()\n        self.sentinel = object()\n        self.stop_now = False\n        self.thread = None\n\n    def _queue_callback(self, data):\n        \"\"\"Callback that puts data into both queues\"\"\"\n        if self.stop_now:\n            raise StopNowException\n\n        if data is None:\n            self.queue.put(self.sentinel)\n            self.async_queue.put_nowait(None)\n            return\n\n        if self.callback:\n            self.callback(data)\n\n        formatted_data = f\"data: {json.dumps(data)}\\n\\n\"\n        self.queue.put(formatted_data)\n        self.async_queue.put_nowait(formatted_data)\n\n    def _start_generation(self):\n        if not self.thread:\n            def task():\n                try:\n                    self.func(self._queue_callback)\n                except StopNowException:\n                    pass\n                except Exception:\n                    traceback.print_exc()\n                finally:\n                    self._queue_callback(None)\n\n            self.thread = Thread(target=task)\n            self.thread.start()\n\n    def __iter__(self) -> Iterator[str]:\n        self._start_generation()\n        return self\n\n    def __next__(self) -> str:\n        if not self.thread:\n            self._start_generation()\n\n        item = self.queue.get()\n        if item is self.sentinel:\n            raise StopIteration\n        return item\n\n    def __aiter__(self):\n        self._start_generation()\n        return self\n\n    async def __anext__(self) -> str:\n        if not self.thread:\n            self._start_generation()\n\n        try:\n            item = await self.async_queue.get()\n            if item is None:\n                raise StopAsyncIteration\n            return item\n        except Exception as e:\n            if isinstance(e, StopAsyncIteration):\n                raise\n            raise StopAsyncIteration from e\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        self.stop_now = True\n\n\nclass TextGenerator:\n    \"\"\"A text generator that streams tokens as they are generated.\"\"\"\n\n    def __init__(self, model, tokenizer, device: str = \"cpu\"):\n        self.model = model\n        self.tokenizer = tokenizer\n        self.device = device\n        self.stop_signal = False\n        self._log_cuda_status()\n\n    def _log_cuda_status(self):\n        \"\"\"Log CUDA status if available\"\"\"\n        if hasattr(torch.cuda, 'is_available') and torch.cuda.is_available():\n            logger.info(\"CUDA is available in TextGenerator\")\n            logger.info(\n                f\"Model GPU layers: {getattr(self.model, 'n_gpu_layers', 'unknown')}\")\n            logger.info(\n                f\"CUDA Memory allocated: {torch.cuda.memory_allocated() / 1024**2:.2f}MB\")\n            logger.info(\n                f\"CUDA Memory reserved: {torch.cuda.memory_reserved() / 1024**2:.2f}MB\")\n\n    def _create_stream_response(self, text: str, generated_text: str, is_final: bool = False) -> Dict:\n        \"\"\"Create a standardized streaming response\"\"\"\n        response = {\n            \"id\": \"chatcmpl-\" + str(hash(generated_text))[-12:],\n            \"object\": \"chat.completion.chunk\",\n            \"created\": int(time.time()),\n            \"model\": \"local-model\",\n            \"choices\": [{\n                \"index\": 0,\n                \"delta\": {} if is_final else {\"content\": text},\n                \"finish_reason\": \"stop\" if is_final else None\n            }]\n        }\n        return response\n\n    def _stream_tokens(self, callback: Callable, generator, decode_func: Callable) -> str:\n        \"\"\"Generic token streaming implementation\"\"\"\n        generated_text = \"\"\n        for output in generator:\n            text = decode_func(output)\n            generated_text += text\n            callback(self._create_stream_response(text, generated_text))\n\n        # Send final message\n        callback(self._create_stream_response(\n            \"\", generated_text, is_final=True))\n        callback(None)\n        return generated_text\n\n    def generate(self,\n                 prompt: str,\n                 max_new_tokens: int = 100,\n                 temperature: float = 0.7,\n                 top_p: float = 0.95,\n                 top_k: int = 50,\n                 repetition_penalty: float = 1.1,\n                 stopping_criteria: Optional[List[StreamingStoppingCriteria]] = None,\n                 callback: Optional[Callable[[dict], Any]] = None,\n                 stream: bool = True) -> Union[str, Any]:\n        \"\"\"Generate text from a prompt, optionally streaming the output.\"\"\"\n\n        if hasattr(self.model, 'create_completion'):\n            # llama.cpp model\n            completion_args = {\n                \"prompt\": prompt,\n                \"max_tokens\": max_new_tokens,\n                \"temperature\": temperature,\n                \"top_p\": top_p,\n                \"top_k\": top_k,\n                \"repeat_penalty\": repetition_penalty,\n                \"stream\": stream\n            }\n\n            if stream:\n                def _stream(callback):\n                    completion = self.model.create_completion(\n                        **completion_args)\n                    return self._stream_tokens(\n                        callback,\n                        completion,\n                        lambda x: x[\"choices\"][0][\"text\"]\n                    )\n                return StreamIterator(_stream, callback=callback)\n            else:\n                completion = self.model.create_completion(**completion_args)\n                return completion[\"choices\"][0][\"text\"]\n        else:\n            # Other models (transformers)\n            inputs = self.tokenizer(\n                prompt, return_tensors=\"pt\", padding=True).to(self.device)\n            gen_config = {\n                \"max_new_tokens\": max_new_tokens,\n                \"temperature\": max(temperature, 1e-2),\n                \"top_p\": min(max(top_p, 0.1), 0.95),\n                \"top_k\": top_k,\n                \"repetition_penalty\": repetition_penalty,\n                \"do_sample\": True,\n                \"pad_token_id\": self.tokenizer.pad_token_id,\n                \"eos_token_id\": self.tokenizer.eos_token_id,\n                \"use_cache\": True\n            }\n\n            if stream:\n                def _stream(callback):\n                    with torch.no_grad():\n                        generator = self.model.generate(\n                            **inputs,\n                            **gen_config,\n                            stopping_criteria=stopping_criteria,\n                            return_dict_in_generate=True,\n                            output_scores=True\n                        )\n                        return self._stream_tokens(\n                            callback,\n                            generator,\n                            lambda x: self.tokenizer.decode(\n                                [x.sequences[0, -1].item() if not isinstance(x,\n                                                                             torch.Tensor) else x.item()],\n                                skip_special_tokens=True\n                            )\n                        )\n                return StreamIterator(_stream, callback=callback)\n            else:\n                with torch.no_grad():\n                    output = self.model.generate(\n                        **inputs,\n                        **gen_config,\n                        stopping_criteria=stopping_criteria,\n                        return_dict_in_generate=True,\n                        output_scores=True\n                    )\n                    return self.tokenizer.decode(output.sequences[0], skip_special_tokens=True)\n\n# End of TextGenerator class - everything after this line should be removed\n"
  },
  {
    "path": "Backend/src/models/utils/__init__.py",
    "content": ""
  },
  {
    "path": "Backend/src/models/utils/detect_type.py",
    "content": "import json\nfrom pathlib import Path\nfrom typing import Union\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef detect_model_type(model_path: Union[str, Path]) -> str:\n    \"\"\"\n    Detect the model type from the model files and metadata\n    Returns one of: 'ollama', 'Transformers', 'llama.cpp', 'llamacpp_HF', 'ExLlamav2', 'ExLlamav2_HF', 'HQQ', 'TensorRT-LLM'\n    \"\"\"\n    model_path = Path(model_path)\n    if not model_path.exists():\n        raise ValueError(f\"Model path does not exist: {model_path}\")\n\n    # Check for model metadata\n    metadata_path = model_path / \"metadata.json\"\n    if metadata_path.exists():\n        try:\n            with open(metadata_path, 'r') as f:\n                metadata = json.load(f)\n                if \"model_type\" in metadata:\n                    return metadata[\"model_type\"]\n        except:\n            logger.warning(f\"Could not read metadata from {metadata_path}\")\n\n    # Check for specific file patterns\n    files = list(model_path.glob(\"*\"))\n    file_names = [f.name for f in files]\n\n    # TensorRT-LLM check\n    if any(f.endswith('.engine') for f in file_names) or any(f.endswith('.plan') for f in file_names):\n        return 'TensorRT-LLM'\n\n    # llama.cpp check\n    if any(f.endswith('.gguf') for f in file_names):\n        # Check if there's a HF tokenizer\n        if any(f == 'tokenizer_config.json' for f in file_names):\n            return 'ExLlamav2_HF'\n        return 'ExLlamav2'\n\n    # HQQ check\n    if any(f.endswith('.hqq') for f in file_names):\n        return 'HQQ'\n\n    # Default to Transformers for standard HF models\n    if any(f in file_names for f in ['config.json', 'pytorch_model.bin', 'model.safetensors']):\n        # Only check for ExLlamav2 if we find specific ExLlamav2 files\n        if (model_path / 'tokenizer.model').exists():\n            config_path = model_path / 'config.json'\n            try:\n                with open(config_path, 'r') as f:\n                    config = json.load(f)\n                    if config.get('model_type', '').lower() in ['llama', 'mistral']:\n                        return 'ExLlamav2'\n            except:\n                pass\n        return 'Transformers'\n\n    raise ValueError(\n        f\"Could not determine model type from files in {model_path}\")\n"
  },
  {
    "path": "Backend/src/models/utils/device.py",
    "content": "import torch\nfrom src.endpoint.models import ModelLoadRequest\n\n\ndef get_device(request: ModelLoadRequest) -> str:\n    if request.device != \"auto\":\n        return request.device\n\n    if torch.cuda.is_available():\n        print(\"CUDA is available\")\n        return \"cuda\"\n    elif torch.backends.mps.is_available():\n        print(\"MPS is available\")\n        return \"mps\"\n    else:\n        print(\"No GPU available\")\n        return \"cpu\"\n"
  },
  {
    "path": "Backend/src/models/utils/download.py",
    "content": "import os\nimport logging\nimport requests\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom typing import List, Dict, Optional\n\nlogger = logging.getLogger(__name__)\n\ndef download_file_with_progress(url: str, file_path: Path, headers: Optional[Dict[str, str]] = None) -> None:\n    \"\"\"Download a file with progress bar\"\"\"\n    try:\n        response = requests.get(url, stream=True, headers=headers or {})\n        response.raise_for_status()\n\n        total_size = int(response.headers.get('content-length', 0))\n        block_size = 8192  # 8 KB\n\n        with open(file_path, 'wb') as f, tqdm(\n            desc=file_path.name,\n            total=total_size,\n            unit='iB',\n            unit_scale=True,\n            unit_divisor=1024,\n        ) as pbar:\n            for data in response.iter_content(block_size):\n                size = f.write(data)\n                pbar.update(size)\n\n        logger.info(f\"Successfully downloaded {file_path.name}\")\n    except Exception as e:\n        if file_path.exists() and file_path.stat().st_size == 0:\n            file_path.unlink()  # Remove empty/partial file\n        raise ValueError(f\"Failed to download file {file_path.name}: {str(e)}\")\n\ndef get_hf_repo_files(repo_id: str, hf_token: Optional[str] = None) -> List[Dict]:\n    \"\"\"Get list of files in a HuggingFace repository\"\"\"\n    api_url = f\"https://huggingface.co/api/models/{repo_id}/tree/main\"\n    headers = {\"Accept\": \"application/json\"}\n    if hf_token:\n        headers[\"Authorization\"] = f\"Bearer {hf_token}\"\n        logger.info(\"Using provided HuggingFace token\")\n\n    logger.info(f\"Fetching repository contents from {api_url}\")\n    response = requests.get(api_url, headers=headers)\n    response.raise_for_status()\n    return response.json()\n\ndef download_hf_model_files(repo_id: str, model_path: Path, required_files: List[str], hf_token: Optional[str] = None) -> None:\n    \"\"\"Download required files from a HuggingFace repository\"\"\"\n    try:\n        files = get_hf_repo_files(repo_id, hf_token)\n        logger.info(f\"Found {len(files)} files in repository\")\n        logger.info(f\"Required files: {required_files}\")\n\n        headers = {}\n        if hf_token:\n            headers[\"Authorization\"] = f\"Bearer {hf_token}\"\n\n        for file_name in required_files:\n            file_info = next((f for f in files if f['path'] == file_name), None)\n            if not file_info:\n                logger.error(f\"Required file {file_name} not found in repository. Available files: {[f['path'] for f in files]}\")\n                raise ValueError(f\"Required file {file_name} not found in repository {repo_id}\")\n\n            download_url = f\"https://huggingface.co/{repo_id}/resolve/main/{file_name}\"\n            file_path = model_path / file_name\n\n            logger.info(f\"Downloading {file_name} ({file_info.get('size', 'unknown size')}) from {download_url}\")\n            download_file_with_progress(download_url, file_path, headers)\n\n    except Exception as e:\n        logger.error(f\"Failed to download model: {str(e)}\", exc_info=True)\n        # Clean up any partially downloaded files\n        if model_path.exists():\n            import shutil\n            shutil.rmtree(model_path)\n        raise ValueError(f\"Failed to download model: {str(e)}\")\n\ndef find_best_gguf_file(files: List[Dict]) -> Optional[Dict]:\n    \"\"\"Find the best GGUF file from a list of files, preferring q4_k_m files and sorting by size\"\"\"\n    gguf_files = [f for f in files if f.get('path', '').endswith('.gguf')]\n    if not gguf_files:\n        return None\n\n    # Sort by preference for q4_k_m files and then by size\n    gguf_files.sort(key=lambda x: (\n        0 if 'q4_k_m' in x['path'].lower() else 1,\n        x.get('size', float('inf'))\n    ))\n    return gguf_files[0]\n\ndef download_gguf_model(repo_id: str, model_path: Path, hf_token: Optional[str] = None) -> Path:\n    \"\"\"Download a GGUF model from HuggingFace\"\"\"\n    try:\n        files = get_hf_repo_files(repo_id, hf_token)\n        file_info = find_best_gguf_file(files)\n        if not file_info:\n            raise ValueError(f\"No GGUF files found in repository {repo_id}\")\n\n        file_name = file_info['path']\n        download_url = f\"https://huggingface.co/{repo_id}/resolve/main/{file_name}\"\n        model_path = model_path / file_name\n\n        # Only download if file doesn't exist or is empty\n        if not model_path.exists() or model_path.stat().st_size == 0:\n            headers = {\"Authorization\": f\"Bearer {hf_token}\"} if hf_token else {}\n            download_file_with_progress(download_url, model_path, headers)\n\n        return model_path\n    except Exception as e:\n        if model_path.exists() and model_path.stat().st_size == 0:\n            model_path.unlink()\n        raise ValueError(f\"Failed to download GGUF model: {str(e)}\")\n"
  },
  {
    "path": "Backend/src/models/utils/platform.py",
    "content": "import platform\nfrom typing import Tuple\n\n\ndef check_platform_compatibility(model_type: str) -> Tuple[bool, str]:\n    \"\"\"\n    Check if the model type is compatible with the current platform\n    Returns (is_compatible, message)\n    \"\"\"\n    current_platform = platform.system().lower()\n\n    platform_compatibility = {\n        'TensorRT-LLM': ['linux'],  # TensorRT only works on Linux\n        # ExLlama works on Windows and Linux\n        'ExLlamav2': ['windows', 'linux'],\n        'ExLlamav2_HF': ['windows', 'linux'],\n        # HQQ works on all platforms\n        'HQQ': ['linux', 'windows', 'darwin'],\n        # llama.cpp works on all platforms\n        'llama.cpp': ['linux', 'windows', 'darwin'],\n        'llamacpp_HF': ['linux', 'windows', 'darwin'],\n        # Transformers works on all platforms\n        'Transformers': ['linux', 'windows', 'darwin'],\n        'ollama': ['linux', 'windows', 'darwin']\n    }\n\n    compatible_platforms = platform_compatibility.get(model_type, [])\n    is_compatible = current_platform in compatible_platforms\n\n    if not is_compatible:\n        message = f\"Model type '{model_type}' is not compatible with {platform.system()}. Compatible platforms: {', '.join(compatible_platforms)}\"\n    else:\n        message = f\"Model type '{model_type}' is compatible with {platform.system()}\"\n\n    return is_compatible, message\n"
  },
  {
    "path": "Backend/src/vectorstorage/embeddings.py",
    "content": "import time\n\n\ndef chunk_list(lst, n):\n    \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n    for i in range(0, len(lst), n):\n        yield lst[i:i + n]\n\n\ndef embed_chunk(args):\n    \"\"\"Embed a chunk of documents.\"\"\"\n    vectordb, chunk, chunk_num, total_chunks, start_time, time_history = args\n    try:\n        vectordb.add_documents(chunk)\n\n        # Calculate time taken for this chunk\n        current_time = time.time()\n        chunk_time = current_time - start_time\n        time_history.append(chunk_time)\n\n        # Keep only last 5 times\n        if len(time_history) > 5:\n            time_history.popleft()\n\n        # Basic stats to return for all chunks\n        result = {\n            \"chunk\": chunk_num,\n            \"total_chunks\": total_chunks,\n            \"docs_in_chunk\": len(chunk),\n            \"percent_complete\": round((chunk_num / total_chunks * 100), 2),\n            \"elapsed_time\": current_time - start_time,\n        }\n\n        # Only add time estimates after 20 chunks and if we have enough data points\n        if chunk_num >= 20 and len(time_history) >= 3:\n            current_avg_time = sum(time_history) / len(time_history)\n\n            # Store the lowest average time seen so far\n            if not hasattr(embed_chunk, 'lowest_avg_time') or current_avg_time < embed_chunk.lowest_avg_time:\n                embed_chunk.lowest_avg_time = current_avg_time\n\n            remaining_chunks = total_chunks - chunk_num\n            est_remaining_time = remaining_chunks * embed_chunk.lowest_avg_time\n            est_finish_time = time.strftime(\n                '%H:%M:%S', time.localtime(current_time + est_remaining_time))\n            est_remaining_time_formatted = time.strftime(\n                '%H:%M:%S', time.gmtime(est_remaining_time))\n\n            result.update({\n                \"est_finish_time\": est_finish_time,\n                \"time_per_chunk\": embed_chunk.lowest_avg_time,\n                \"remaining_chunks\": remaining_chunks,\n                \"est_remaining_time\": est_remaining_time_formatted\n            })\n        else:\n            result.update({\n                \"est_finish_time\": \"calculating...\",\n                \"time_per_chunk\": \"calculating...\",\n                \"remaining_chunks\": total_chunks - chunk_num,\n                \"est_remaining_time\": \"calculating...\"\n            })\n\n        return result\n    except Exception as e:\n        raise Exception(\n            f\"Error embedding chunk {chunk_num}/{total_chunks}: {str(e)}\")\n"
  },
  {
    "path": "Backend/src/vectorstorage/helpers/sanitizeCollectionName.py",
    "content": "import re\n\n\ndef sanitize_collection_name(name):\n    try:\n        sanitized = re.sub(r'[^\\w\\-]', '_', name)\n        sanitized = re.sub(r'^[^\\w]|[^\\w]$', '', sanitized)\n        sanitized = re.sub(r'\\.{2,}', '_', sanitized)\n\n        if len(sanitized) < 3:\n            sanitized = sanitized.ljust(3, \"_\")\n        elif len(sanitized) > 63:\n            sanitized = sanitized[:63]\n        return sanitized\n    except Exception as e:\n        print(f\"Error sanitizing collection name: {str(e)}\")\n        return None\n"
  },
  {
    "path": "Backend/src/vectorstorage/init_store.py",
    "content": "from langchain_huggingface import HuggingFaceEmbeddings\nimport logging\nimport torch\nimport os\nfrom pathlib import Path\n\nlogger = logging.getLogger(__name__)\n\ndef get_models_dir():\n    if os.name == 'posix':\n        # For Linux, use ~/.local/share/Notate/models\n        if os.uname().sysname == 'Linux':\n            base_dir = os.path.expanduser('~/.local/share/Notate')\n        # For macOS, use ~/Library/Application Support/Notate/models\n        else:\n            base_dir = os.path.expanduser('~/Library/Application Support/Notate')\n    else:\n        # For Windows, use %APPDATA%/Notate\n        base_dir = os.path.expanduser('~/.notate')\n    \n    models_dir = os.path.join(base_dir, 'embeddings_models')\n    os.makedirs(models_dir, exist_ok=True)\n    return models_dir\n\nasync def init_store(model_name: str = \"HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\"):\n    logger.info(\"Initializing HuggingFace embeddings\")\n\n    # Determine the appropriate device\n    if torch.cuda.is_available():\n        device = \"cuda\"\n    elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():\n        device = \"mps\"\n    else:\n        device = \"cpu\"\n        \n    logger.info(f\"Using device: {device}\")\n    models_dir = get_models_dir()\n    logger.info(f\"Using models directory: {models_dir}\")\n\n    model_kwargs = {\n        \"device\": device\n    }\n\n    encode_kwargs = {\n        \"device\": device,\n        \"normalize_embeddings\": True,\n        \"max_seq_length\": 512\n    }\n\n    try:\n        embeddings = HuggingFaceEmbeddings(\n            model_name=model_name,\n            model_kwargs=model_kwargs,\n            encode_kwargs=encode_kwargs,\n            cache_folder=models_dir\n        )\n        return embeddings\n    except Exception as e:\n        logger.error(f\"Error initializing embeddings: {str(e)}\")\n        # Fallback to CPU if there's an error with the device\n        if device != \"cpu\":\n            logger.info(\"Falling back to CPU\")\n            model_kwargs[\"device\"] = \"cpu\"\n            encode_kwargs[\"device\"] = \"cpu\"\n            return HuggingFaceEmbeddings(\n                model_name=model_name,\n                model_kwargs=model_kwargs,\n                encode_kwargs=encode_kwargs,\n                cache_folder=models_dir\n            )\n        raise\n"
  },
  {
    "path": "Backend/src/vectorstorage/vectorstore.py",
    "content": "from src.vectorstorage.init_store import get_models_dir\nfrom langchain_huggingface import HuggingFaceEmbeddings\nfrom langchain_chroma import Chroma\nfrom langchain_openai import OpenAIEmbeddings\nimport torch\nimport os\nimport logging\nimport platform\n\nlogger = logging.getLogger(__name__)\n\ndef get_app_data_dir():\n    home_dir = os.path.expanduser(\"~\")\n    if platform.system() == \"Darwin\":  # macOS\n        app_data_dir = os.path.join(home_dir, \"Library/Application Support/Notate\")\n    elif platform.system() == \"Linux\":  # Linux\n        app_data_dir = os.path.join(home_dir, \".local/share/Notate\")\n    else:  # Windows and others\n        app_data_dir = os.path.join(home_dir, \".notate\")\n\n    os.makedirs(app_data_dir, exist_ok=True)\n    return app_data_dir\n\nchroma_db_path = os.path.join(get_app_data_dir(), \"chroma_db\")\nlogger.info(f\"Using Chroma DB path: {chroma_db_path}\")\n\ndef get_vectorstore(api_key: str, collection_name: str, use_local_embeddings: bool = False, local_embedding_model: str = \"HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\"):\n    try:\n        # Get embeddings\n        if use_local_embeddings or api_key is None:\n            logger.info(f\"Using local embedding model: {local_embedding_model}\")\n            \n            # Determine the appropriate device\n            if torch.cuda.is_available():\n                device = \"cuda\"\n            elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():\n                device = \"mps\"\n            else:\n                device = \"cpu\"\n                \n            logger.info(f\"Using device: {device}\")\n            models_dir = get_models_dir()\n            logger.info(f\"Using models directory: {models_dir}\")\n\n            model_kwargs = {\"device\": device}\n            encode_kwargs = {\n                \"device\": device,\n                \"normalize_embeddings\": True,\n                \"max_seq_length\": 512\n            }\n\n            try:\n                embeddings = HuggingFaceEmbeddings(\n                    model_name=local_embedding_model,\n                    model_kwargs=model_kwargs,\n                    encode_kwargs=encode_kwargs,\n                    cache_folder=models_dir\n                )\n            except Exception as e:\n                logger.error(f\"Error initializing embeddings with {device}: {str(e)}\")\n                if device != \"cpu\":\n                    logger.info(\"Falling back to CPU\")\n                    model_kwargs[\"device\"] = \"cpu\"\n                    encode_kwargs[\"device\"] = \"cpu\"\n                    embeddings = HuggingFaceEmbeddings(\n                        model_name=local_embedding_model,\n                        model_kwargs=model_kwargs,\n                        encode_kwargs=encode_kwargs,\n                        cache_folder=models_dir\n                    )\n                else:\n                    raise\n        else:\n            logger.info(\"Using OpenAI embedding model\")\n            embeddings = OpenAIEmbeddings(api_key=api_key)\n\n        # Try to create vectorstore with specific settings\n        try:\n            from chromadb.config import Settings\n            import chromadb\n\n            # Use in-memory store if persistent store fails\n            try:\n                chroma_client = chromadb.PersistentClient(\n                    path=chroma_db_path,\n                    settings=Settings(\n                        anonymized_telemetry=False,\n                        allow_reset=True,\n                        is_persistent=True\n                    )\n                )\n            except Exception as e:\n                logger.warning(f\"Failed to create persistent client: {str(e)}, falling back to in-memory\")\n                chroma_client = chromadb.Client(\n                    settings=Settings(\n                        anonymized_telemetry=False,\n                        allow_reset=True,\n                        is_persistent=False\n                    )\n                )\n\n            vectorstore = Chroma(\n                client=chroma_client,\n                embedding_function=embeddings,\n                collection_name=collection_name,\n            )\n            logger.info(f\"Successfully initialized vectorstore for collection: {collection_name}\")\n            return vectorstore\n        except Exception as e:\n            logger.error(f\"Error creating Chroma instance: {str(e)}\")\n            # Try one more time with in-memory store\n            try:\n                chroma_client = chromadb.Client(\n                    settings=Settings(\n                        anonymized_telemetry=False,\n                        allow_reset=True,\n                        is_persistent=False\n                    )\n                )\n                vectorstore = Chroma(\n                    client=chroma_client,\n                    embedding_function=embeddings,\n                    collection_name=collection_name,\n                )\n                return vectorstore\n            except Exception as e2:\n                logger.error(f\"Error creating in-memory Chroma instance: {str(e2)}\")\n                return None\n\n    except Exception as e:\n        logger.error(f\"Error getting vectorstore: {str(e)}\")\n        return None\n"
  },
  {
    "path": "Backend/src/voice/voice_to_text.py",
    "content": "import whisper\nimport os\nimport warnings\nimport torch\nimport shutil\nimport subprocess\n\n# Suppress specific warnings\nwarnings.filterwarnings(\n    \"ignore\", message=\".*weights_only=False.*\", category=FutureWarning)\nwarnings.filterwarnings(\n    \"ignore\", message=\"FP16 is not supported on CPU; using FP32 instead\")\nwarnings.filterwarnings(\"ignore\", category=FutureWarning,\n                        module=\"torch.serialization\")\n\n# Global variables\nmodel = None\nffmpeg_path = None\n\n\ndef initialize_model(model_name: str = \"base\"):\n    \"\"\"Initialize the Whisper model with optimal device and precision settings.\"\"\"\n    global model, ffmpeg_path\n    if model is None:\n        # Get FFmpeg path from environment variable\n        ffmpeg_path = os.environ.get('FFMPEG_PATH')\n        if ffmpeg_path:\n            try:\n                # Verify FFmpeg works\n                subprocess.run([ffmpeg_path, \"-version\"],\n                               capture_output=True, check=True)\n                print(f\"FFmpeg verified at: {ffmpeg_path}\")\n                # Set environment variables for Whisper\n                os.environ[\"PATH\"] = os.pathsep.join(\n                    [os.path.dirname(ffmpeg_path), os.environ.get('PATH', '')])\n                os.environ[\"FFMPEG_BINARY\"] = ffmpeg_path\n            except Exception as e:\n                print(f\"Warning: Error verifying FFmpeg at {ffmpeg_path}: {e}\")\n                ffmpeg_path = None\n\n        if not ffmpeg_path:\n            # Try to find system FFmpeg\n            ffmpeg_system = shutil.which('ffmpeg')\n            if ffmpeg_system:\n                ffmpeg_path = ffmpeg_system\n                os.environ[\"FFMPEG_BINARY\"] = ffmpeg_path\n                print(f\"Using system FFmpeg from: {ffmpeg_path}\")\n            else:\n                print(\"FFmpeg not found or not working\")\n                return None\n\n        # Initialize Whisper model\n        device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n        fp16 = device == \"cuda\"\n\n        print(f\"Loading Whisper model '{model_name}' on {device}...\")\n        model = whisper.load_model(model_name)\n        model.to(device)\n\n        if device == \"cuda\" and fp16:\n            model = model.half()\n            print(f\"Using GPU with FP16={fp16}\")\n        else:\n            print(\"Using CPU with FP32\")\n\n    return model\n"
  },
  {
    "path": "Backend/tests/testApi.py",
    "content": "import pytest\nfrom fastapi.testclient import TestClient\nfrom main import app\nfrom src.endpoint.models import EmbeddingRequest, QueryRequest, YoutubeTranscriptRequest\n\nclient = TestClient(app)\n\ndef test_embed_endpoint():\n    # Test successful embedding\n    data = EmbeddingRequest(\n        file_path=\"test_file.txt\",\n        api_key=\"test_api_key\",\n        collection=1,\n        collection_name=\"test_collection\",\n        user=1,\n        metadata={\"title\": \"Test Document\"}\n    )\n    response = client.post(\"/embed\", json=data.dict())\n    assert response.status_code == 200\n    assert \"text/event-stream\" in response.headers[\"content-type\"]\n\ndef test_concurrent_embedding():\n    # Test that only one embedding process can run at a time\n    data = EmbeddingRequest(\n        file_path=\"test_file.txt\",\n        api_key=\"test_api_key\",\n        collection=1,\n        collection_name=\"test_collection\",\n        user=1,\n        metadata={\"title\": \"Test Document\"}\n    )\n    # Start first embedding\n    response1 = client.post(\"/embed\", json=data.dict())\n    assert response1.status_code == 200\n    \n    # Try to start second embedding\n    response2 = client.post(\"/embed\", json=data.dict())\n    assert response2.status_code == 200\n    response_data = response2.json()\n    assert response_data[\"status\"] == \"error\"\n    assert response_data[\"message\"] == \"An embedding process is already running\"\n\ndef test_youtube_ingest():\n    data = YoutubeTranscriptRequest(\n        url=\"https://www.youtube.com/watch?v=test_id\",\n        user_id=1,\n        collection_id=1,\n        username=\"test_user\",\n        collection_name=\"test_collection\",\n        api_key=\"test_api_key\"\n    )\n    response = client.post(\"/youtube-ingest\", json=data.dict())\n    assert response.status_code == 200\n    assert \"text/event-stream\" in response.headers[\"content-type\"]\n\ndef test_cancel_embedding():\n    # Test cancelling when no embedding is running\n    response = client.post(\"/cancel-embed\")\n    assert response.status_code == 200\n    response_data = response.json()\n    assert response_data[\"status\"] == \"error\"\n    assert response_data[\"message\"] == \"No embedding process running\"\n\n    # Start an embedding process\n    embed_data = EmbeddingRequest(\n        file_path=\"test_file.txt\",\n        api_key=\"test_api_key\",\n        collection=1,\n        collection_name=\"test_collection\",\n        user=1,\n        metadata={\"title\": \"Test Document\"}\n    )\n    embed_response = client.post(\"/embed\", json=embed_data.dict())\n    assert embed_response.status_code == 200\n\n    # Cancel the embedding process\n    cancel_response = client.post(\"/cancel-embed\")\n    assert cancel_response.status_code == 200\n    cancel_data = cancel_response.json()\n    assert cancel_data[\"status\"] == \"success\"\n    assert cancel_data[\"message\"] == \"Embedding process cancelled\"\n\ndef test_query():\n    data = QueryRequest(\n        query=\"test query\",\n        collection=1,\n        collection_name=\"test_collection\",\n        user=1,\n        api_key=\"test_api_key\",\n        top_k=5\n    )\n    response = client.post(\"/vector-query\", json=data.dict())\n    assert response.status_code == 200\n\n    # Test error handling\n    invalid_data = QueryRequest(\n        query=\"\",  # Empty query should raise an error\n        collection=1,\n        collection_name=\"test_collection\",\n        user=1,\n        api_key=\"test_api_key\",\n        top_k=5\n    )\n    response = client.post(\"/vector-query\", json=invalid_data.dict())\n    assert response.status_code == 200  # FastAPI still returns 200 but with error message\n    response_data = response.json()\n    assert response_data[\"status\"] == \"error\"\n\n# Note: We don't test the restart-server endpoint directly as it would terminate our test process\n\n"
  },
  {
    "path": "Backend/tests/test_voice.py",
    "content": "import pytest\nfrom fastapi.testclient import TestClient\nfrom main import app\nimport os\nimport tempfile\nimport wave\nimport numpy as np\nimport sounddevice as sd\n\n\nclient = TestClient(app)\n\ndef create_test_wav(duration=3.0, frequency=440.0, sample_rate=16000):\n    \"\"\"Create a test WAV file with a sine wave.\"\"\"\n    # Generate time array\n    t = np.linspace(0, duration, int(sample_rate * duration), False)\n    \n    # Generate sine wave\n    note = np.sin(2 * np.pi * frequency * t)\n    \n    # Normalize to 16-bit range and convert to integers\n    audio = note * 32767\n    audio = audio.astype(np.int16)\n    \n    # Create a temporary file\n    temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.wav')\n    \n    # Write WAV file\n    with wave.open(temp_file.name, 'wb') as wav_file:\n        wav_file.setnchannels(1)  # Mono\n        wav_file.setsampwidth(2)  # 2 bytes per sample (16-bit)\n        wav_file.setframerate(sample_rate)\n        wav_file.writeframes(audio.tobytes())\n    \n    return temp_file.name\n\ndef test_voice_to_text_basic():\n    \"\"\"Test basic voice-to-text functionality with a generated WAV file.\"\"\"\n    # Create a test WAV file\n    test_file = create_test_wav()\n    \n    try:\n        with open(test_file, 'rb') as f:\n            files = {'audio_file': ('test.wav', f, 'audio/wav')}\n            response = client.post(\"/voice-to-text\", files=files)\n            \n        assert response.status_code == 200\n        result = response.json()\n        assert \"status\" in result\n        assert \"text\" in result\n        assert \"language\" in result\n        assert \"segments\" in result\n        \n    finally:\n        # Clean up the test file\n        os.unlink(test_file)\n\ndef test_voice_to_text_models():\n    \"\"\"Test voice-to-text with different Whisper models.\"\"\"\n    test_file = create_test_wav()\n    \n    try:\n        models = ['tiny', 'base', 'small']  # We'll test with smaller models for speed\n        \n        for model in models:\n            with open(test_file, 'rb') as f:\n                files = {'audio_file': ('test.wav', f, 'audio/wav')}\n                response = client.post(\"/voice-to-text\", \n                                    files=files,\n                                    data={'model_name': model})\n                \n            assert response.status_code == 200\n            result = response.json()\n            assert result[\"status\"] == \"success\"\n            \n    finally:\n        os.unlink(test_file)\n\ndef test_voice_to_text_invalid_audio():\n    \"\"\"Test voice-to-text with invalid audio data.\"\"\"\n    # Create an invalid audio file\n    with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as temp_file:\n        temp_file.write(b'This is not valid audio data')\n    \n    try:\n        with open(temp_file.name, 'rb') as f:\n            files = {'audio_file': ('invalid.wav', f, 'audio/wav')}\n            response = client.post(\"/voice-to-text\", files=files)\n        \n        assert response.status_code == 200\n        result = response.json()\n        assert result[\"status\"] == \"error\"\n        assert \"error\" in result\n        \n    finally:\n        os.unlink(temp_file.name)\n\ndef test_voice_to_text_missing_file():\n    \"\"\"Test voice-to-text without providing an audio file.\"\"\"\n    response = client.post(\"/voice-to-text\")\n    assert response.status_code == 422  # FastAPI validation error\n\ndef test_voice_to_text_long_audio():\n    \"\"\"Test voice-to-text with a longer audio file.\"\"\"\n    test_file = create_test_wav(duration=10.0)  # 10 seconds\n    \n    try:\n        with open(test_file, 'rb') as f:\n            files = {'audio_file': ('long.wav', f, 'audio/wav')}\n            response = client.post(\"/voice-to-text\", files=files)\n            \n        assert response.status_code == 200\n        result = response.json()\n        assert result[\"status\"] == \"success\"\n        assert \"text\" in result\n        assert \"language\" in result\n        assert \"segments\" in result\n        \n    finally:\n        os.unlink(test_file)\n\ndef test_voice_to_text_different_frequencies():\n    \"\"\"Test voice-to-text with different audio frequencies.\"\"\"\n    frequencies = [440.0, 880.0, 1760.0]  # A4, A5, A6 notes\n    \n    for freq in frequencies:\n        test_file = create_test_wav(frequency=freq)\n        \n        try:\n            with open(test_file, 'rb') as f:\n                files = {'audio_file': (f'freq_{freq}.wav', f, 'audio/wav')}\n                response = client.post(\"/voice-to-text\", files=files)\n                \n            assert response.status_code == 200\n            result = response.json()\n            assert result[\"status\"] == \"success\"\n            \n        finally:\n            os.unlink(test_file)\n\ndef record_audio(duration=5, sample_rate=16000):\n    \"\"\"Record audio from the microphone.\"\"\"\n    print(f\"Recording for {duration} seconds...\")\n    audio_data = sd.rec(int(duration * sample_rate),\n                       samplerate=sample_rate,\n                       channels=1,\n                       dtype=np.int16)\n    sd.wait()  # Wait until recording is finished\n    return audio_data\n\ndef test_live_voice_to_text(capsys):\n    \"\"\"Test voice-to-text with live microphone input.\"\"\"\n    # Record audio\n    sample_rate = 16000\n    duration = 5  # 5 seconds of recording\n    \n    with capsys.disabled():\n        print(\"\\n=== Live Voice-to-Text Test ===\")\n        print(\"Please speak into your microphone...\")\n        audio_data = record_audio(duration, sample_rate)\n    \n    # Create a temporary WAV file\n    temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.wav')\n    \n    try:\n        # Save the recorded audio to WAV file\n        with wave.open(temp_file.name, 'wb') as wav_file:\n            wav_file.setnchannels(1)  # Mono\n            wav_file.setsampwidth(2)  # 16-bit\n            wav_file.setframerate(sample_rate)\n            wav_file.writeframes(audio_data.tobytes())\n        \n        # Send the recorded audio for transcription\n        with open(temp_file.name, 'rb') as f:\n            files = {'audio_file': ('recording.wav', f, 'audio/wav')}\n            response = client.post(\"/voice-to-text\", files=files)\n        \n        assert response.status_code == 200\n        result = response.json()\n        assert result[\"status\"] == \"success\"\n        assert \"text\" in result\n        \n        with capsys.disabled():\n            print(f\"\\nTranscribed text: {result['text']}\")\n            print(\"================================\\n\")\n        \n    finally:\n        os.unlink(temp_file.name)\n\nif __name__ == \"__main__\":\n    pytest.main([__file__, \"-v\"]) "
  },
  {
    "path": "Frontend/.gitignore",
    "content": "# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\npnpm-debug.log*\nlerna-debug.log*\n\nnode_modules\ndist\ndist-react\ndist-ssr\ndist-electron\n*.local\n\n# Editor directories and files\n.vscode/*\n!.vscode/extensions.json\n.idea\n.DS_Store\n*.suo\n*.ntvs*\n*.njsproj\n*.sln\n*.sw?\n/test-results/\n/playwright-report/\n/blob-report/\n/playwright/.cache/\n"
  },
  {
    "path": "Frontend/components.json",
    "content": "{\n  \"$schema\": \"https://ui.shadcn.com/schema.json\",\n  \"style\": \"new-york\",\n  \"rsc\": false,\n  \"tsx\": true,\n  \"tailwind\": {\n    \"config\": \"tailwind.config.js\",\n    \"css\": \"src/app/index.css\",\n    \"baseColor\": \"neutral\",\n    \"cssVariables\": true,\n    \"prefix\": \"\"\n  },\n  \"aliases\": {\n    \"components\": \"@/components\",\n    \"utils\": \"@/lib/utils\",\n    \"ui\": \"@/components/ui\",\n    \"lib\": \"@/lib\",\n    \"hooks\": \"@/hooks\"\n  },\n  \"iconLibrary\": \"lucide\"\n}"
  },
  {
    "path": "Frontend/e2e/app.spec.ts",
    "content": "import {\n  test,\n  expect,\n  _electron,\n  Page,\n  ElectronApplication,\n} from \"@playwright/test\";\n\nlet electronApp: ElectronApplication;\nlet loadingWindow: Page;\nlet mainWindow: Page;\n\n// Increase timeout for the entire test file\ntest.setTimeout(160000);\n\nasync function waitForMainWindow(timeout = 45000): Promise<Page> {\n  const startTime = Date.now();\n  while (Date.now() - startTime < timeout) {\n    const windows = await electronApp.windows();\n    // Find the window that's not the loading window\n    const mainWin = windows.find((win) => win !== loadingWindow);\n    if (mainWin) {\n      return mainWin;\n    }\n    await new Promise((resolve) => setTimeout(resolve, 100));\n  }\n  throw new Error(\"Main window did not appear within timeout\");\n}\n\nasync function waitForPreloadScript(page: Page): Promise<unknown> {\n  const timeout = 30000;\n  const startTime = Date.now();\n\n  return new Promise((resolve, reject) => {\n    const interval = setInterval(async () => {\n      try {\n        if (Date.now() - startTime > timeout) {\n          clearInterval(interval);\n          reject(new Error(\"Timeout waiting for preload script\"));\n          return;\n        }\n\n        const electronBridge = await page.evaluate(() => {\n          return (window as { electron?: unknown }).electron;\n        });\n\n        if (electronBridge) {\n          clearInterval(interval);\n          resolve(electronBridge);\n        }\n      } catch (error) {\n        clearInterval(interval);\n        reject(error);\n      }\n    }, 100);\n  });\n}\n\ntest.beforeEach(async () => {\n  // Launch the app with increased timeout\n  electronApp = await _electron.launch({\n    args: [\".\"],\n    env: { NODE_ENV: \"development\" },\n    timeout: 45000,\n  });\n\n  // Get the loading window (first window)\n  loadingWindow = await electronApp.firstWindow();\n\n  // Wait for loading window to be ready and verify its existence\n  await loadingWindow.waitForLoadState(\"domcontentloaded\");\n\n  try {\n    // Verify loading window content before it potentially closes\n    const loadingContent = await loadingWindow.textContent(\"body\");\n    expect(loadingContent).toBeTruthy();\n  } catch (error) {\n    console.log(\"Loading window content check failed:\", error);\n  }\n\n  // Wait for Python server to start and main window to appear\n  mainWindow = await waitForMainWindow();\n  await mainWindow.waitForLoadState(\"domcontentloaded\");\n  await waitForPreloadScript(mainWindow);\n});\n\ntest.afterEach(async () => {\n  if (electronApp) {\n    await electronApp.close();\n  }\n});\n\ntest(\"application startup sequence\", async () => {\n  // Verify main window appears and is loaded\n  await mainWindow.waitForLoadState(\"domcontentloaded\");\n\n  // Verify main window has expected title\n  const title = await mainWindow.title();\n  expect(title).toBe(\"Notate\");\n\n  // Verify window count\n  const windows = await electronApp.windows();\n  expect(windows.length).toBeGreaterThanOrEqual(1);\n});\n\ntest(\"main window functionality after startup\", async () => {\n  // Wait for main window to be ready\n  await mainWindow.waitForLoadState(\"domcontentloaded\");\n\n  // Get all windows and verify main window state\n  const isMinimized = await electronApp.evaluate(({ BrowserWindow }) => {\n    const wins = BrowserWindow.getAllWindows();\n    // Find the window that's not minimized (should be our main window)\n    const mainWin = wins.find((win) => !win.isMinimized());\n    return mainWin ? mainWin.isMinimized() : null;\n  });\n\n  expect(isMinimized).toBe(false);\n});\n\ntest(\"menu structure verification\", async () => {\n  // Get the application menu\n  interface MenuItem {\n    label: string;\n    submenuLabels: string[];\n  }\n\n  const menu = await electronApp.evaluate(({ Menu }) => {\n    const appMenu = Menu.getApplicationMenu();\n    if (!appMenu) return null;\n\n    return appMenu.items.map((item) => ({\n      label: item.label,\n      submenuLabels: item.submenu?.items.map((subItem) => subItem.label) || [],\n    }));\n  });\n\n  // Verify menu exists\n  expect(menu).toBeTruthy();\n  expect(Array.isArray(menu)).toBe(true);\n\n  // Verify File menu\n  const fileMenu = menu?.find((item) => item.label === \"File\") as MenuItem;\n  expect(fileMenu).toBeTruthy();\n  expect(fileMenu.label).toBe(\"File\");\n  expect(fileMenu.submenuLabels).toContain(\"Change User\");\n  expect(fileMenu.submenuLabels).toContain(\"Quit\");\n\n  // Verify Edit menu\n  const editMenu = menu?.find((item) => item.label === \"Edit\") as MenuItem;\n  expect(editMenu).toBeTruthy();\n  expect(editMenu.label).toBe(\"Edit\");\n  expect(editMenu.submenuLabels).toContain(\"Undo\");\n  expect(editMenu.submenuLabels).toContain(\"Redo\");\n  expect(editMenu.submenuLabels).toContain(\"Cut\");\n  expect(editMenu.submenuLabels).toContain(\"Copy\");\n  expect(editMenu.submenuLabels).toContain(\"Paste\");\n  expect(editMenu.submenuLabels).toContain(\"Delete\");\n  expect(editMenu.submenuLabels).toContain(\"Select All\");\n\n  // Verify View menu\n  const viewMenu = menu?.find((item) => item.label === \"View\") as MenuItem;\n  expect(viewMenu).toBeTruthy();\n  expect(viewMenu.label).toBe(\"View\");\n  expect(viewMenu.submenuLabels).toContain(\"Chat\");\n  expect(viewMenu.submenuLabels).toContain(\"History\");\n  expect(viewMenu.submenuLabels).toContain(\"Temp DevTools\");\n});\n\ntest(\"menu DevTools functionality\", async () => {\n  // Test menu functionality - Toggle DevTools\n  const devToolsVisible = await electronApp.evaluate(({ BrowserWindow }) => {\n    const win = BrowserWindow.getAllWindows().find((w) => !w.isDestroyed());\n    return win?.webContents.isDevToolsOpened() || false;\n  });\n  expect(devToolsVisible).toBe(false);\n\n  // Toggle DevTools through menu\n  await electronApp.evaluate(async ({ Menu, BrowserWindow }) => {\n    const appMenu = Menu.getApplicationMenu();\n    if (!appMenu) return;\n\n    const viewMenu = appMenu.items.find((item) => item.label === \"View\");\n    if (!viewMenu?.submenu) return;\n\n    const devToolsItem = viewMenu.submenu.items.find(\n      (item) => item.label === \"Temp DevTools\"\n    );\n    if (devToolsItem) {\n      const win = BrowserWindow.getAllWindows().find((w) => !w.isDestroyed());\n      if (win) {\n        win.webContents.toggleDevTools();\n        // Add a longer wait time for DevTools to open\n        await new Promise(resolve => setTimeout(resolve, 2000));\n      }\n    }\n  });\n\n  // Verify DevTools is now open\n  const devToolsNowVisible = await electronApp.evaluate(({ BrowserWindow }) => {\n    const win = BrowserWindow.getAllWindows().find((w) => !w.isDestroyed());\n    return win?.webContents.isDevToolsOpened() || false;\n  });\n  expect(devToolsNowVisible).toBe(true);\n});\n\ntest(\"menu View functionality\", async () => {\n  // Wait for initial load\n  await mainWindow.waitForLoadState(\"domcontentloaded\");\n\n  // Test View menu functionality - Chat view\n  const chatClicked = await electronApp.evaluate(async ({ Menu }) => {\n    try {\n      const appMenu = Menu.getApplicationMenu();\n      if (!appMenu) return false;\n\n      const viewMenu = appMenu.items.find((item) => item.label === \"View\");\n      if (!viewMenu?.submenu) return false;\n\n      const chatItem = viewMenu.submenu.items.find(\n        (item) => item.label === \"Chat\"\n      );\n      if (!chatItem) return false;\n\n      await chatItem.click();\n      return true;\n    } catch (error) {\n      console.error(\"Error clicking Chat menu item:\", error);\n      return false;\n    }\n  });\n\n  expect(chatClicked).toBe(true);\n\n  // Add a small delay to allow for view change\n  await new Promise((resolve) => setTimeout(resolve, 1000));\n\n  // Verify the view changed to Chat\n  const isChatView = await mainWindow.evaluate(() => {\n    // Try multiple possible selectors\n    return Boolean(\n      document.querySelector('[data-view=\"Chat\"]') ||\n        document.querySelector(\".chat-view\") ||\n        document.querySelector(\"#chat-view\") ||\n        // Look for any element containing \"Chat\" text in a heading\n        Array.from(document.querySelectorAll(\"h1,h2,h3,h4,h5,h6\")).some((el) =>\n          el.textContent?.includes(\"Notate\")\n        )\n    );\n  });\n  expect(isChatView).toBe(true);\n\n  // Additional verification - try to find chat-related elements\n  const hasChatElements = await mainWindow.evaluate(() => {\n    return Boolean(\n      document.querySelector('input[type=\"text\"]') || // Chat input\n        document.querySelector(\"textarea\") || // Chat input\n        document.querySelector(\".message\") || // Chat messages\n        document.querySelector(\".chat-container\") // Chat container\n    );\n  });\n  expect(hasChatElements).toBe(true);\n});\n\ntest(\"menu Change User functionality\", async () => {\n  // Test menu functionality - Change User\n  // Note: This will close the app, so it should be the last test\n  const changeUserClicked = await electronApp.evaluate(async ({ Menu }) => {\n    try {\n      const appMenu = Menu.getApplicationMenu();\n      if (!appMenu) return false;\n\n      const fileMenu = appMenu.items.find((item) => item.label === \"File\");\n      if (!fileMenu?.submenu) return false;\n\n      const changeUserItem = fileMenu.submenu.items.find(\n        (item) => item.label === \"Change User\"\n      );\n      if (!changeUserItem) return false;\n\n      await changeUserItem.click();\n      return true;\n    } catch (error) {\n      console.error(\"Error clicking Change User menu item:\", error);\n      return false;\n    }\n  });\n\n  expect(changeUserClicked).toBe(true);\n});\n\ntest(\"keyboard shortcuts and DevTools functionality\", async () => {\n  // Test common keyboard shortcuts\n  await mainWindow.keyboard.press(\"Control+Z\"); // Test Undo\n  await mainWindow.keyboard.press(\"Control+Y\"); // Test Redo\n  await mainWindow.keyboard.press(\"Control+A\"); // Test Select All\n\n  // Test DevTools using Electron API directly\n  await electronApp.evaluate(({ BrowserWindow }) => {\n    const win = BrowserWindow.getAllWindows().find((w) => !w.isDestroyed());\n    if (win && !win.webContents.isDevToolsOpened()) {\n      win.webContents.openDevTools();\n    }\n  });\n\n  // Add a small delay to allow DevTools to open\n  await new Promise((resolve) => setTimeout(resolve, 1000));\n\n  const devToolsOpen = await electronApp.evaluate(({ BrowserWindow }) => {\n    const win = BrowserWindow.getAllWindows().find((w) => !w.isDestroyed());\n    return win?.webContents.isDevToolsOpened() || false;\n  });\n  expect(devToolsOpen).toBe(true);\n\n  // Close DevTools\n  await electronApp.evaluate(({ BrowserWindow }) => {\n    const win = BrowserWindow.getAllWindows().find((w) => !w.isDestroyed());\n    if (win && win.webContents.isDevToolsOpened()) {\n      win.webContents.closeDevTools();\n    }\n  });\n\n  // Verify DevTools is closed\n  const devToolsClosed = await electronApp.evaluate(({ BrowserWindow }) => {\n    const win = BrowserWindow.getAllWindows().find((w) => !w.isDestroyed());\n    return !win?.webContents.isDevToolsOpened();\n  });\n  expect(devToolsClosed).toBe(true);\n});\n\ntest(\"window state management\", async () => {\n  // Test minimize with retry logic\n  let retries = 3;\n  let isMinimized = false;\n  \n  while (retries > 0 && !isMinimized) {\n    await electronApp.evaluate(({ BrowserWindow }) => {\n      const win = BrowserWindow.getAllWindows().find((w) => !w.isDestroyed());\n      if (win && !win.isMinimized()) {\n        win.minimize();\n      }\n    });\n    \n    // Wait longer for the window state to change\n    await new Promise((resolve) => setTimeout(resolve, 2000));\n    \n    isMinimized = await electronApp.evaluate(({ BrowserWindow }) => {\n      const win = BrowserWindow.getAllWindows().find((w) => !w.isDestroyed());\n      return win?.isMinimized() || false;\n    });\n    \n    retries--;\n  }\n  \n  expect(isMinimized).toBe(true);\n\n  // Test restore\n  await electronApp.evaluate(({ BrowserWindow }) => {\n    const win = BrowserWindow.getAllWindows().find((w) => !w.isDestroyed());\n    win?.restore();\n  });\n});\n\ntest(\"chat interaction flow\", async () => {\n  // Set up response mocking\n  await mainWindow.route(\"**/chat\", async (route) => {\n    await route.fulfill({\n      status: 200,\n      contentType: \"application/json\",\n      body: JSON.stringify({\n        id: 1,\n        messages: [\n          {\n            role: \"user\",\n            content: \"Test message\",\n            timestamp: new Date().toISOString(),\n          },\n          {\n            role: \"assistant\",\n            content: \"This is a mock AI response\",\n            timestamp: new Date().toISOString(),\n          },\n        ],\n        title: \"Test Conversation\",\n      }),\n    });\n  });\n\n  // Navigate to chat view and wait for it to be ready\n  const chatClicked = await electronApp.evaluate(async ({ Menu }) => {\n    try {\n      const appMenu = Menu.getApplicationMenu();\n      const viewMenu = appMenu?.items.find((item) => item.label === \"View\");\n      const chatItem = viewMenu?.submenu?.items.find(\n        (item) => item.label === \"Chat\"\n      );\n      if (!chatItem) return false;\n      await chatItem.click();\n      return true;\n    } catch (error) {\n      console.error(\"Error clicking Chat menu item:\", error);\n      return false;\n    }\n  });\n\n  expect(chatClicked).toBe(true);\n\n  // Wait for chat interface to load\n  const chatInput = await mainWindow.waitForSelector(\n    '[data-testid=\"chat-input\"]',\n    {\n      timeout: 10000,\n      state: \"visible\",\n    }\n  );\n  expect(chatInput).toBeTruthy();\n\n  // Type the message\n  await chatInput.type(\"Test message\");\n\n  // Click the send button instead of pressing Enter\n  const sendButton = await mainWindow.waitForSelector(\n    '[data-testid=\"chat-submit\"]',\n    {\n      timeout: 5000,\n      state: \"visible\",\n    }\n  );\n  expect(sendButton).toBeTruthy();\n  await sendButton.click();\n\n  // Add debug logging\n  console.log(\"Waiting for user message to appear...\");\n\n  // Wait for user message to appear with increased timeout\n  const userMessage = await mainWindow.waitForSelector(\n    [\n      '[data-testid=\"chat-message-user\"]',\n      '[data-testid=\"message-content-user\"]',\n      \".user-message\",\n      '.message:has-text(\"Test message\")',\n    ].join(\",\"),\n    {\n      timeout: 20000,\n      state: \"visible\",\n    }\n  );\n\n  // Add more debug logging\n  console.log(\"User message found, checking content...\");\n\n  expect(userMessage).toBeTruthy();\n\n  // Get all text content to debug\n  const pageContent = await mainWindow.textContent(\"body\");\n  console.log(\"Page content:\", pageContent);\n\n  // Verify the message content\n  const messageText = await userMessage.textContent();\n  console.log(\"Message text:\", messageText);\n  expect(messageText).toContain(\"Test message\");\n\n  // Clean up route\n  await mainWindow.unroute(\"**/chat\");\n});\n\ntest(\"history view functionality\", async () => {\n  // Navigate to history view\n  const historyClicked = await electronApp.evaluate(async ({ Menu }) => {\n    try {\n      const appMenu = Menu.getApplicationMenu();\n      const viewMenu = appMenu?.items.find((item) => item.label === \"View\");\n      const historyItem = viewMenu?.submenu?.items.find(\n        (item) => item.label === \"History\"\n      );\n      if (!historyItem) return false;\n      await historyItem.click();\n      return true;\n    } catch (error) {\n      console.error(\"Error clicking History menu item:\", error);\n      return false;\n    }\n  });\n\n  expect(historyClicked).toBe(true);\n\n  // Wait for history view to be visible\n  const historyView = await mainWindow.waitForSelector(\n    '[data-testid=\"history-view\"]',\n    {\n      timeout: 10000,\n      state: \"visible\",\n    }\n  );\n  expect(historyView).toBeTruthy();\n\n  // Wait for the header to be visible\n  const header = await mainWindow.waitForSelector(\n    'h1:has-text(\"Chat History\")',\n    {\n      timeout: 5000,\n      state: \"visible\",\n    }\n  );\n  expect(header).toBeTruthy();\n\n  // Wait for the search input to be visible\n  const searchInput = await mainWindow.waitForSelector(\n    'input[type=\"text\"][placeholder=\"Search conversations...\"]',\n    {\n      timeout: 5000,\n      state: \"visible\",\n    }\n  );\n  expect(searchInput).toBeTruthy();\n\n  // Verify the scroll area exists using multiple possible selectors\n  const scrollArea = await mainWindow.waitForSelector(\n    [\n      '[data-testid=\"history-scroll-area\"]',\n      \".scroll-area\",\n      \".scrollarea\",\n      '[role=\"scrollarea\"]',\n      \".overflow-auto\",\n    ].join(\",\"),\n    {\n      timeout: 10000,\n      state: \"visible\",\n    }\n  );\n  expect(scrollArea).toBeTruthy();\n\n  // Test search functionality\n  await searchInput.type(\"test\");\n  await new Promise((resolve) => setTimeout(resolve, 500)); // Wait for search to update\n\n  // Get the entire history view content\n  const historyContent = await historyView.textContent();\n  expect(historyContent).toContain(\"Chat History\");\n});\n"
  },
  {
    "path": "Frontend/electron-builder.json",
    "content": "{\n    \"appId\": \"com.electron.notate\",\n    \"productName\": \"Notate\",\n    \"extraResources\": [\n        \"dist-electron/preload.cjs\",\n        {\n            \"from\": \"src/assets\",\n            \"to\": \"assets\"\n        },\n        {\n            \"from\": \"../Backend\",\n            \"to\": \"Backend\",\n            \"filter\": [\n                \"**/*\",\n                \"!**/__pycache__\",\n                \"!**/*.pyc\"\n            ]\n        },\n        {\n            \"from\": \"node_modules/ffmpeg-static/ffmpeg\",\n            \"to\": \"ffmpeg\"\n        },\n        {\n            \"from\": \"node_modules/ffmpeg-static/ffmpeg.exe\",\n            \"to\": \"ffmpeg.exe\"\n        }\n    ],\n    \"asarUnpack\": [\n        \"Backend\",\n        \"ffmpeg\",\n        \"ffmpeg.exe\"\n    ],\n    \"files\": [\n        \"dist-electron\",\n        \"dist-react\",\n        \"src/assets/**/*\",\n        \"build/icons/*\"\n    ],\n    \"icon\": \"./build/icons/icon.icns\",\n    \"mac\": {\n        \"icon\": \"./build/icons/icon.icns\",\n        \"target\": \"dmg\"\n    },\n    \"win\": {\n        \"icon\": \"./build/icons/icon.ico\",\n        \"target\": [\n            \"portable\",\n            {\n                \"target\": \"nsis\",\n                \"arch\": [\"x64\"]\n            }\n        ]\n    },\n    \"linux\": {\n        \"target\": [\n            \"AppImage\",\n            {\n                \"target\": \"deb\",\n                \"arch\": [\n                    \"x64\"\n                ]\n            },\n            {\n                \"target\": \"rpm\",\n                \"arch\": [\n                    \"x64\"\n                ]\n            }\n        ],\n        \"icon\": \"build/icons/icon.png\",\n        \"category\": \"Utility\",\n        \"executableName\": \"notate\",\n        \"desktop\": {\n            \"Name\": \"Notate\",\n            \"Comment\": \"Notate Application\",\n            \"Categories\": \"Utility;\",\n            \"Type\": \"Application\",\n            \"StartupWMClass\": \"Notate\",\n            \"Icon\": \"notate\",\n            \"Terminal\": \"false\"\n        }\n    }\n}"
  },
  {
    "path": "Frontend/eslint.config.js",
    "content": "import js from '@eslint/js'\nimport globals from 'globals'\nimport reactHooks from 'eslint-plugin-react-hooks'\nimport reactRefresh from 'eslint-plugin-react-refresh'\nimport tseslint from 'typescript-eslint'\n\nexport default tseslint.config(\n  { ignores: ['dist'] },\n  {\n    extends: [js.configs.recommended, ...tseslint.configs.recommended],\n    files: ['**/*.{ts,tsx}'],\n    languageOptions: {\n      ecmaVersion: 2020,\n      globals: globals.browser,\n    },\n    plugins: {\n      'react-hooks': reactHooks,\n      'react-refresh': reactRefresh,\n    },\n    rules: {\n      ...reactHooks.configs.recommended.rules,\n      'react-refresh/only-export-components': [\n        'warn',\n        { allowConstantExport: true },\n      ],\n    },\n  },\n)\n"
  },
  {
    "path": "Frontend/index.html",
    "content": "<!doctype html>\n<html lang=\"en\">\n  <head>\n    <meta charset=\"UTF-8\" />\n    <link rel=\"icon\" type=\"image/svg+xml\" href=\"/vite.svg\" />\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n    <title> Notate </title>\n    <meta\n      http-equiv=\"Content-Security-Policy\"\n      content=\"default-src 'self'; img-src 'self' data: blob:; style-src 'self' 'unsafe-inline'; script-src 'self' 'wasm-unsafe-eval'\"\n    />\n  </head>\n  <body>\n    <div id=\"root\"></div>\n    <script type=\"module\" src=\"/src/app/main.tsx\"></script>\n  </body>\n</html>\n"
  },
  {
    "path": "Frontend/package.json",
    "content": "{\n  \"name\": \"notate\",\n  \"description\": \"Notate is a cross-platform chatbot that can help assist in your research\",\n  \"author\": \"Hairetsu <Hairetsu@hairetsu.com>\",\n  \"license\": \"MIT\",\n  \"homepage\": \"https://github.com/CNTRLAI/notate\",\n  \"private\": true,\n  \"version\": \"1.1.31\",\n  \"type\": \"module\",\n  \"main\": \"dist-electron/main.js\",\n  \"scripts\": {\n    \"test:unit\": \"vitest src\",\n    \"test:e2e\": \"playwright test\",\n    \"dev:mac\": \"npm-run-all --parallel dev:react dev:electron-mac\",\n    \"dev:win\": \"npm-run-all --parallel dev:react dev:electron-win\",\n    \"dev:linux\": \"npm-run-all --parallel dev:react dev:electron-linux\",\n    \"dev:react\": \"vite\",\n    \"dev:electron-mac\": \"npm run transpile:electron && NODE_ENV=development electron .\",\n    \"dev:electron-win\": \"npm run transpile:electron && cross-env NODE_ENV=development electron .\",\n    \"dev:electron-linux\": \"npm run transpile:electron && NODE_ENV=development electron .\",\n    \"build\": \"tsc -b && vite build\",\n    \"lint\": \"eslint .\",\n    \"preview\": \"vite preview\",\n    \"transpile:electron\": \"tsc --project src/electron/tsconfig.json\",\n    \"dist:mac\": \"npm run transpile:electron && npm run build && electron-builder --mac --arm64\",\n    \"dist:win\": \"npm run transpile:electron && npm run build && electron-builder --win --x64 --publish never\",\n    \"dist:linux\": \"npm run transpile:electron && npm run build && electron-builder --linux --x64\"\n  },\n  \"dependencies\": {\n    \"@anthropic-ai/sdk\": \"^0.32.1\",\n    \"@dqbd/tiktoken\": \"^1.0.18\",\n    \"@google/generative-ai\": \"^0.21.0\",\n    \"@hookform/resolvers\": \"^3.9.1\",\n    \"@radix-ui/react-avatar\": \"^1.1.1\",\n    \"@radix-ui/react-dialog\": \"^1.1.5\",\n    \"@radix-ui/react-label\": \"^2.1.0\",\n    \"@radix-ui/react-menubar\": \"^1.1.4\",\n    \"@radix-ui/react-popover\": \"^1.1.2\",\n    \"@radix-ui/react-progress\": \"^1.1.1\",\n    \"@radix-ui/react-radio-group\": \"^1.2.2\",\n    \"@radix-ui/react-scroll-area\": \"^1.2.1\",\n    \"@radix-ui/react-select\": \"^2.1.2\",\n    \"@radix-ui/react-separator\": \"^1.1.0\",\n    \"@radix-ui/react-slider\": \"^1.2.1\",\n    \"@radix-ui/react-slot\": \"^1.1.0\",\n    \"@radix-ui/react-switch\": \"^1.1.1\",\n    \"@radix-ui/react-tabs\": \"^1.1.1\",\n    \"@radix-ui/react-toast\": \"^1.2.2\",\n    \"@radix-ui/react-tooltip\": \"^1.1.6\",\n    \"better-sqlite3\": \"^11.7.0\",\n    \"class-variance-authority\": \"^0.7.1\",\n    \"clsx\": \"^2.1.1\",\n    \"cmdk\": \"1.0.0\",\n    \"date-fns\": \"^4.1.0\",\n    \"dotenv\": \"^16.4.7\",\n    \"electron-log\": \"^5.2.4\",\n    \"ffmpeg-static\": \"^5.2.0\",\n    \"framer-motion\": \"^11.15.0\",\n    \"jsonwebtoken\": \"^9.0.2\",\n    \"lucide-react\": \"^0.462.0\",\n    \"next-mdx-remote\": \"^5.0.0\",\n    \"ollama\": \"^0.5.12\",\n    \"openai\": \"^4.82.0\",\n    \"os-utils\": \"^0.0.14\",\n    \"playwright\": \"^1.50.1\",\n    \"react\": \"^18.3.1\",\n    \"react-dom\": \"^18.3.1\",\n    \"react-dropzone\": \"^14.3.5\",\n    \"react-hook-form\": \"^7.53.2\",\n    \"react-markdown\": \"^9.0.3\",\n    \"rehype-format\": \"^5.0.1\",\n    \"rehype-raw\": \"^7.0.0\",\n    \"rehype-sanitize\": \"^6.0.0\",\n    \"rehype-stringify\": \"^10.0.1\",\n    \"remark-frontmatter\": \"^5.0.0\",\n    \"remark-gfm\": \"^4.0.0\",\n    \"remark-math\": \"^6.0.0\",\n    \"remark-parse\": \"^11.0.0\",\n    \"remark-rehype\": \"^11.1.1\",\n    \"tailwind-merge\": \"^2.5.5\",\n    \"tailwindcss-animate\": \"^1.0.7\",\n    \"unified\": \"^11.0.5\",\n    \"use-clipboard-copy\": \"^0.2.0\",\n    \"zod\": \"^3.23.8\",\n    \"zod-to-json-schema\": \"^3.24.1\"\n  },\n  \"devDependencies\": {\n    \"@electron/rebuild\": \"^3.7.1\",\n    \"@eslint/js\": \"^9.15.0\",\n    \"@playwright/test\": \"^1.49.0\",\n    \"@tailwindcss/typography\": \"^0.5.16\",\n    \"@types/better-sqlite3\": \"^7.6.12\",\n    \"@types/hast\": \"^3.0.4\",\n    \"@types/jsonwebtoken\": \"^9.0.7\",\n    \"@types/node\": \"^22.10.1\",\n    \"@types/os-utils\": \"^0.0.4\",\n    \"@types/react\": \"^18.3.12\",\n    \"@types/react-dom\": \"^18.3.1\",\n    \"@types/unist\": \"^3.0.3\",\n    \"@vitejs/plugin-react\": \"^4.3.4\",\n    \"autoprefixer\": \"^10.4.20\",\n    \"cross-env\": \"^7.0.3\",\n    \"electron\": \"^33.2.1\",\n    \"electron-builder\": \"^25.1.8\",\n    \"electron-rebuild\": \"^3.2.9\",\n    \"eslint\": \"^9.15.0\",\n    \"eslint-import-resolver-typescript\": \"^3.6.3\",\n    \"eslint-plugin-import\": \"^2.31.0\",\n    \"eslint-plugin-react-hooks\": \"^5.0.0\",\n    \"eslint-plugin-react-refresh\": \"^0.4.14\",\n    \"globals\": \"^15.12.0\",\n    \"npm-run-all\": \"^4.1.5\",\n    \"postcss\": \"^8.4.49\",\n    \"shiki\": \"^1.24.0\",\n    \"tailwindcss\": \"^3.4.15\",\n    \"typescript\": \"~5.6.2\",\n    \"typescript-eslint\": \"^8.15.0\",\n    \"unist-util-visit\": \"^5.0.0\",\n    \"vite\": \"^6.0.11\",\n    \"vitest\": \"^3.0.5\"\n  }\n}"
  },
  {
    "path": "Frontend/playwright.config.ts",
    "content": "import { defineConfig, devices } from \"@playwright/test\";\n\n/**\n * Read environment variables from file.\n * https://github.com/motdotla/dotenv\n */\n// import dotenv from 'dotenv';\n// import path from 'path';\n// dotenv.config({ path: path.resolve(__dirname, '.env') });\n\n/**\n * See https://playwright.dev/docs/test-configuration.\n */\nexport default defineConfig({\n  testDir: \"./e2e\",\n  /* Run tests in files in parallel */\n  fullyParallel: true,\n  /* Fail the build on CI if you accidentally left test.only in the source code. */\n  forbidOnly: !!process.env.CI,\n  /* Retry on CI only */\n  retries: process.env.CI ? 2 : 0,\n  /* Opt out of parallel tests on CI. */\n  workers: process.env.CI ? 1 : undefined,\n  /* Reporter to use. See https://playwright.dev/docs/test-reporters */\n  reporter: \"html\",\n  /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */\n  use: {\n    /* Base URL to use in actions like `await page.goto('/')`. */\n    // baseURL: 'http://127.0.0.1:3000',\n\n    /* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */\n    trace: \"on-first-retry\",\n  },\n\n  /* Configure projects for major browsers */\n  projects: [\n    {\n      name: \"chromium\",\n      use: { ...devices[\"Desktop Chrome\"] },\n    },\n    /* \n    {\n      name: 'firefox',\n      use: { ...devices['Desktop Firefox'] },\n    },\n\n    {\n      name: 'webkit',\n      use: { ...devices['Desktop Safari'] },\n    }, */\n\n    /* Test against mobile viewports. */\n    // {\n    //   name: 'Mobile Chrome',\n    //   use: { ...devices['Pixel 5'] },\n    // },\n    // {\n    //   name: 'Mobile Safari',\n    //   use: { ...devices['iPhone 12'] },\n    // },\n\n    /* Test against branded browsers. */\n    // {\n    //   name: 'Microsoft Edge',\n    //   use: { ...devices['Desktop Edge'], channel: 'msedge' },\n    // },\n    // {\n    //   name: 'Google Chrome',\n    //   use: { ...devices['Desktop Chrome'], channel: 'chrome' },\n    // },\n  ],\n\n  /* Run your local dev server before starting the tests */\n  webServer: {\n    command: \"pnpm run dev:react\",\n    url: \"http://localhost:5131\",\n    reuseExistingServer: !process.env.CI,\n  },\n});\n"
  },
  {
    "path": "Frontend/postcss.config.js",
    "content": "export default {\n  plugins: {\n    tailwindcss: {},\n    autoprefixer: {},\n  },\n}\n"
  },
  {
    "path": "Frontend/src/app/App.tsx",
    "content": "import { useMemo } from \"react\";\nimport Chat from \"@/components/Chat/Chat\";\nimport { Toaster } from \"@/components/ui/toaster\";\nimport { Header } from \"@/components/Header/Header\";\nimport { useView } from \"@/context/useView\";\nimport CreateAccount from \"@/components/Authentication/CreateAccount\";\nimport SelectAccount from \"@/components/Authentication/SelectAccount\";\nimport History from \"@/components/History/History\";\nimport SettingsAlert from \"@/components/AppAlert/SettingsAlert\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { useAppInitialization } from \"@/hooks/useAppInitialization\";\nimport FileExplorer from \"@/components/FileExplorer/FileExplorer\";\n\nfunction App() {\n  const { activeView } = useView();\n  const { users } = useSysSettings();\n\n  useAppInitialization();\n\n  const activeUsages = useMemo(() => {\n    switch (activeView) {\n      case \"Chat\":\n        return <Chat />;\n      case \"History\":\n        return <History />;\n      case \"Signup\":\n        return <CreateAccount />;\n      case \"SelectAccount\":\n        return <SelectAccount users={users} />;\n      case \"FileExplorer\":\n        return <FileExplorer />;\n      default:\n        return null;\n    }\n  }, [activeView, users]);\n\n  return (\n    <div className=\"flex flex-col h-[calc(100vh-1px)] overflow-hidden\">\n      <Toaster />\n      <Header />\n      <SettingsAlert />\n      <div className=\"flex-1 overflow-hidden pt-4\">{activeUsages}</div>\n    </div>\n  );\n}\n\nexport default App;\n"
  },
  {
    "path": "Frontend/src/app/index.css",
    "content": "@tailwind base;\n@tailwind components;\n@tailwind utilities;\n\n@layer base {\n  :root {\n    --gradient: #4ecdc4;\n\n    --background: 187 36.400000000000006% 4.48%;\n    --foreground: 187 5.6000000000000005% 97.8%;\n\n    --muted: 187 28.000000000000004% 16.8%;\n    --muted-foreground: 187 5.6000000000000005% 55.6%;\n\n    --popover: 187 53.8% 7.280000000000001%;\n    --popover-foreground: 187 5.6000000000000005% 97.8%;\n\n    --card: 187 53.8% 7.280000000000001%;\n    --card-foreground: 187 5.6000000000000005% 97.8%;\n\n    --border: 187 28.000000000000004% 16.8%;\n    --input: 187 28.000000000000004% 16.8%;\n\n    --primary: 187 56% 56%;\n    --primary-foreground: 187 5.6000000000000005% 5.6000000000000005%;\n\n    --secondary: 187 28.000000000000004% 16.8%;\n    --secondary-foreground: 187 5.6000000000000005% 97.8%;\n\n    --accent: 187 28.000000000000004% 16.8%;\n    --accent-foreground: 187 5.6000000000000005% 97.8%;\n\n    --destructive: 0 62.8% 30.6%;\n    --destructive-foreground: 187 5.6000000000000005% 97.8%;\n\n    --ring: 187 56% 56%;\n\n    --chart-1: 220 70% 50%;\n    --chart-2: 160 60% 45%;\n    --chart-3: 30 80% 55%;\n    --chart-4: 280 65% 60%;\n    --chart-5: 340 75% 55%;\n  }\n}\n\n@layer base {\n  * {\n    @apply border-border;\n  }\n  body {\n    @apply font-sans antialiased bg-background text-foreground;\n  }\n}\n\n/* Enhanced Button Styles */\n.btn-provider {\n  @apply transition-all duration-200 hover:scale-[1.02] active:scale-[0.98];\n}\n\n.btn-provider.selected {\n  @apply ring-2 ring-primary/80 ring-offset-2 ring-offset-background shadow-lg;\n}\n\n/* Enhanced Input Fields */\n.input-field {\n  @apply bg-secondary/5 border border-border/50 rounded-[8px] px-4 py-2.5 \n         focus:ring-2 focus:ring-primary/30 focus:border-primary/50 \n         transition-all duration-200;\n}\n\n/* Card and Container Styles */\n.settings-card {\n  @apply bg-secondary/10 backdrop-blur-sm rounded-[8px] p-6 \n         border border-border/50 shadow-lg \n         hover:shadow-primary/5 transition-all duration-300;\n}\n\n.provider-section {\n  @apply space-y-4 bg-background/50 rounded-[8px] p-4 \n         border border-border/50 shadow-inner;\n}\n\n/* Animation Effects */\n@keyframes subtle-pulse {\n  0% {\n    box-shadow: 0 0 0 0 rgba(var(--primary), 0.4);\n  }\n  70% {\n    box-shadow: 0 0 0 6px rgba(var(--primary), 0);\n  }\n  100% {\n    box-shadow: 0 0 0 0 rgba(var(--primary), 0);\n  }\n}\n\n@keyframes dot {\n  0%,\n  20% {\n    opacity: 0;\n  }\n  50% {\n    opacity: 1;\n  }\n  100% {\n    opacity: 0;\n  }\n}\n\n.animate-pulse-subtle {\n  animation: subtle-pulse 2s infinite;\n}\n\n/* Glassmorphism Effects */\n.glass-panel {\n  @apply bg-background/80 backdrop-blur-md border border-white/10 \n         shadow-xl rounded-[8px];\n}\n\n.glass-input {\n  @apply bg-white/5 backdrop-blur-sm border border-white/10 \n         focus:border-primary/50 focus:bg-white/10 transition-all duration-200;\n}\n\n/* Existing Window Control Styles */\n#close {\n  background-color: #4a4a4a;\n}\n\n.focus-within #close {\n  background-color: #ff6057;\n}\n\n#minimize {\n  background-color: #4a4a4a;\n}\n\n.focus-within #minimize {\n  background-color: #ffbd2e;\n}\n\n#maximize {\n  background-color: #4a4a4a;\n}\n\n.focus-within #maximize {\n  background-color: #27c93f;\n}\n\n#unmaximize {\n  background-color: #4a4a4a;\n}\n\n.focus-within #unmaximize {\n  background-color: #27c93f;\n}\n\n/* Existing Header Styles */\n.win-header-button {\n  -webkit-app-region: no-drag;\n  display: inline-flex;\n  justify-content: center;\n  align-items: center;\n  width: 46px;\n  height: 32px;\n  background: transparent;\n  border: none;\n  outline: none;\n  color: #fff;\n  font-family: \"Segoe MDL2 Assets\", \"Segoe UI\", sans-serif;\n  font-size: 10px;\n}\n\n.win-header-button:hover {\n  background: rgba(255, 255, 255, 0.1);\n}\n\n.win-header-button.win-close:hover {\n  background: #e81123;\n}\n\n.win-header-button span {\n  font-size: 16px;\n  line-height: 1;\n}\n\n.win-header-button.win-maximize {\n  background: #0078d4;\n}\n\n.win-header-button.win-restore {\n  background: #0078d4;\n}\n\nheader {\n  position: fixed;\n  top: 0;\n  left: 0;\n  width: 100%;\n  text-align: left;\n  padding-inline: 2px;\n  box-sizing: border-box;\n  background-color: #181818;\n  -webkit-app-region: drag;\n  z-index: 100;\n}\n\n.header-button {\n  all: unset;\n  border-radius: 50%;\n  width: 0.75rem;\n  height: 0.75rem;\n  margin: 0 0.25rem;\n  -webkit-app-region: no-drag;\n}\n\n.clickable-header-section {\n  cursor: pointer;\n  -webkit-app-region: no-drag;\n}\n\n.header-button:hover {\n  opacity: 0.8;\n}\n\n.window-controls:hover button span {\n  display: block;\n}\n\n/* Code Styles */\ncode {\n  white-space: pre-wrap !important;\n}\n\n/* Scrollbar Styles */\n::-webkit-scrollbar {\n  width: 8px;\n  height: 8px;\n}\n\n::-webkit-scrollbar-track {\n  @apply bg-secondary/20 rounded-full;\n}\n\n::-webkit-scrollbar-thumb {\n  @apply bg-secondary/60 rounded-full hover:bg-secondary/80 transition-colors;\n}\n\n::-webkit-scrollbar-corner {\n  @apply bg-transparent;\n}\n\n/* Markdown Styles */\n.contentMarkdown {\n  @apply text-foreground space-y-3;\n}\n\n.contentMarkdown h1 {\n  @apply text-3xl font-bold mb-4 text-foreground/90;\n}\n\n.contentMarkdown h2 {\n  @apply text-2xl font-bold mb-3 text-foreground/90;\n}\n\n.contentMarkdown h3 {\n  @apply text-xl font-semibold mb-2.5 mt-4 text-foreground/90;\n}\n\n.contentMarkdown p {\n  @apply mb-3 leading-relaxed text-foreground/80;\n}\n\n.contentMarkdown ul {\n  @apply mb-3 space-y-1.5 list-none ml-4;\n}\n\n.contentMarkdown ol {\n  @apply mb-3 space-y-4 list-none;\n}\n\n.contentMarkdown li {\n  @apply relative pl-8 leading-relaxed text-foreground/80;\n}\n\n.contentMarkdown ul > li::before {\n  @apply absolute left-0 top-[0.7em] w-2 h-2 rounded-full bg-primary/70 -translate-y-1/2;\n  content: \"\";\n}\n\n.contentMarkdown ol {\n  counter-reset: item;\n}\n\n.contentMarkdown ol > li {\n  counter-increment: item;\n  @apply pl-0;\n}\n\n.contentMarkdown ol > li::before {\n  @apply hidden;\n}\n\n/* Nested lists */\n.contentMarkdown li > ul {\n  @apply mt-2 mb-0 ml-4;\n}\n\n.contentMarkdown li > ol {\n  @apply mt-2 mb-0;\n}\n\n/* Nested list items should have smaller bullets */\n.contentMarkdown li > ul > li::before {\n  @apply w-1.5 h-1.5 bg-primary/50;\n}\n\n.contentMarkdown li > ul > li {\n  @apply pl-6;\n}\n\n.contentMarkdown code {\n  @apply px-2 py-0.5 rounded-md bg-secondary/40 text-primary/90 font-mono text-[13px] border border-secondary/50;\n}\n\n.contentMarkdown strong {\n  @apply font-semibold text-primary/90;\n}\n\n.contentMarkdown a {\n  @apply text-primary/90 hover:text-primary hover:underline decoration-primary/30 transition-colors duration-200;\n}\n\n.contentMarkdown blockquote {\n  @apply border-l-4 border-primary/40 pl-4 italic my-4 text-foreground/70 bg-secondary/20 py-2 pr-3 rounded-r-lg;\n}\n\n.contentMarkdown pre {\n  @apply p-4 rounded-lg bg-secondary/30 overflow-x-auto my-4 border border-secondary/50;\n}\n\n.contentMarkdown pre code {\n  @apply bg-transparent p-0 text-foreground/90 border-0;\n}\n\n.contentMarkdown ul ul,\n.contentMarkdown ol ol,\n.contentMarkdown ul ol,\n.contentMarkdown ol ul {\n  @apply mt-2 mb-0;\n}\n\n.contentMarkdown li > p {\n  @apply inline;\n}\n"
  },
  {
    "path": "Frontend/src/app/main.tsx",
    "content": "import { StrictMode } from \"react\";\nimport { createRoot } from \"react-dom/client\";\nimport \"./index.css\";\nimport App from \"./App\";\nimport UserClientProviders from \"@/context/UserClientProviders\";\n\ncreateRoot(document.getElementById(\"root\")!).render(\n  <StrictMode>\n    <UserClientProviders>\n      <App />\n    </UserClientProviders>\n  </StrictMode>\n);\n"
  },
  {
    "path": "Frontend/src/app/vite-env.d.ts",
    "content": "import { defineConfig } from \"vite\";\nimport react from \"@vitejs/plugin-react\";\nimport path from \"path\";\n\n// https://vitejs.dev/config/\nexport default defineConfig({\n  plugins: [react()],\n  base: \"./\",\n  resolve: {\n    alias: {\n      \"@\": path.resolve(__dirname, \"./src\"),\n      \"@/ui\": path.resolve(__dirname, \"./src/app\"),\n      \"@/components\": path.resolve(__dirname, \"./src/components\"),\n    },\n  },\n});\n"
  },
  {
    "path": "Frontend/src/components/AppAlert/SettingsAlert.tsx",
    "content": "import {\n  Dialog,\n  DialogContent,\n  DialogTitle,\n  DialogDescription,\n} from \"@/components/ui/dialog\";\nimport { useUser } from \"@/context/useUser\";\nimport LLMPanel from \"@/components/SettingsModal/SettingsComponents/LLMPanel\";\n\nexport default function SettingsAlert() {\n  const { alertForUser, setAlertForUser } = useUser();\n\n  return (\n    <Dialog open={alertForUser} onOpenChange={setAlertForUser}>\n      <DialogContent>\n        <DialogTitle>LLM Settings</DialogTitle>\n        <DialogDescription>\n          Please add an API key or Select Local Model Deployment\n          <br />\n          *Local Model Deployment requires Ollama to be installed and running\n        </DialogDescription>\n        <LLMPanel />\n      </DialogContent>\n    </Dialog>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Authentication/CreateAccount.tsx",
    "content": "import { Label } from \"@/components/ui/label\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n  Card,\n  CardContent,\n  CardHeader,\n  CardTitle,\n  CardDescription,\n  CardFooter,\n} from \"@/components/ui/card\";\nimport { Input } from \"@/components/ui/input\";\nimport { useView } from \"@/context/useView\";\nimport { useState, useEffect } from \"react\";\nimport { motion, AnimatePresence } from \"framer-motion\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { useUser } from \"@/context/useUser\";\n\nexport default function CreateAccount() {\n  const { setActiveView } = useView();\n  const { users, setUsers } = useSysSettings();\n  const { setActiveUser } = useUser();\n  const [accountName, setAccountName] = useState(\"\");\n  const [error, setError] = useState(\"\");\n  const [currentStep, setCurrentStep] = useState(0);\n\n  const steps = [\n    { title: \"Welcome to Notate\", subtitle: null },\n    { title: \"Your Research Hack Tool\", subtitle: null },\n    {\n      title: \"Create Account\",\n      subtitle: \"Enter your name to create an account\",\n    },\n  ];\n\n  useEffect(() => {\n    const timer = setTimeout(() => {\n      if (currentStep < steps.length - 1) {\n        setCurrentStep((prevStep) => prevStep + 1);\n      }\n    }, 1500);\n\n    return () => clearTimeout(timer);\n  }, [currentStep, steps.length]);\n\n  const handleCreateAccount = async () => {\n    if (!accountName.trim()) {\n      setError(\"Please enter a name\");\n      return;\n    }\n\n    try {\n      const user = await window.electron.addUser(accountName);\n      if (user.error) {\n        setError(user.error);\n        return;\n      }\n      const allUsers = (await window.electron.getUsers()).users;\n      const activeUser = allUsers.find((u) => u.name === user.name);\n      if (activeUser) {\n        setActiveUser(activeUser);\n        setUsers(allUsers);\n        setActiveView(\"Chat\");\n      } else {\n        setError(\"Failed to create account. Please try again.\");\n      }\n    } catch (err) {\n      setError(\"Failed to create account. Please try again.\");\n      console.error(err);\n    }\n  };\n\n  const handleBack = () => setActiveView(\"SelectAccount\");\n\n  const fadeInUp = {\n    initial: { opacity: 0, y: 20 },\n    animate: { opacity: 1, y: 0 },\n    exit: { opacity: 0, y: -20 },\n    transition: { duration: 0.5 },\n  };\n\n  return (\n    <div className=\"h-full flex flex-col items-center justify-center bg-gradient-to-b from-gray-900 to-gray-800 text-white p-8\">\n      <AnimatePresence mode=\"wait\">\n        <motion.div key={currentStep} {...fadeInUp} className=\"text-center\">\n          {currentStep < 2 ? (\n            <>\n              <h1 className=\"text-5xl font-bold mb-6\">\n                {steps[currentStep].title}\n              </h1>\n              {steps[currentStep].subtitle && (\n                <p className=\"text-3xl mb-8\">{steps[currentStep].subtitle}</p>\n              )}\n            </>\n          ) : (\n            <Card className=\"w-full max-w-md\">\n              <CardHeader className=\"space-y-1\">\n                <CardTitle className=\"text-2xl font-bold text-center\">\n                  {steps[currentStep].title}\n                </CardTitle>\n                <CardDescription className=\"text-center\">\n                  {steps[currentStep].subtitle}\n                </CardDescription>\n              </CardHeader>\n              <CardContent className=\"space-y-4\">\n                <div className=\"space-y-2\">\n                  <Label htmlFor=\"name\">Name</Label>\n                  <Input\n                    id=\"name\"\n                    type=\"text\"\n                    placeholder=\"John Doe\"\n                    required\n                    value={accountName}\n                    onChange={(e) => setAccountName(e.target.value)}\n                  />\n                </div>\n                {error && <p className=\"text-red-500\">{error}</p>}\n                <Button\n                  className=\"w-full\"\n                  type=\"submit\"\n                  onClick={handleCreateAccount}\n                >\n                  Create Account\n                </Button>\n              </CardContent>\n              {users.length > 0 && (\n                <CardFooter>\n                  <Button\n                    variant=\"outline\"\n                    className=\"w-full\"\n                    onClick={handleBack}\n                  >\n                    Back to Select Account\n                  </Button>\n                </CardFooter>\n              )}\n            </Card>\n          )}\n        </motion.div>\n      </AnimatePresence>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Authentication/SelectAccount.tsx",
    "content": "import { Avatar, AvatarFallback } from \"@/components/ui/avatar\";\nimport { Card, CardContent } from \"@/components/ui/card\";\nimport { useView } from \"@/context/useView\";\nimport { Button } from \"@/components/ui/button\";\nimport { useUser } from \"@/context/useUser\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { motion } from \"framer-motion\";\nimport { Plus } from \"lucide-react\";\nimport { ScrollArea } from \"@/components/ui/scroll-area\";\n\nconst containerVariants = {\n  hidden: { opacity: 0 },\n  visible: { opacity: 1, transition: { duration: 0.5, staggerChildren: 0.1 } },\n};\n\nconst itemVariants = {\n  hidden: { opacity: 0, y: 20 },\n  visible: { opacity: 1, y: 0 },\n};\n\nconst MotionAvatar = motion.create(Avatar);\n\nexport default function SelectAccount({ users }: { users: User[] }) {\n  const { setActiveView } = useView();\n  const { activeUser, setActiveUser } = useUser();\n  const { setSettings } = useSysSettings();\n\n  const fetchSettings = async () => {\n    if (activeUser) {\n      const userSettings = await window.electron.getUserSettings(activeUser.id);\n      setSettings(userSettings);\n    }\n  };\n\n  const handleSelectAccount = (user: User) => {\n    setActiveUser(user);\n    fetchSettings();\n    setActiveView(\"Chat\");\n  };\n\n  return (\n    <div className=\"min-h-screen flex items-center justify-center bg-gradient-to-br from-background via-muted/30 to-muted/50\">\n      <motion.div\n        className=\"w-full max-w-md mx-auto pt-12 flex flex-col h-screen pb-4\"\n        variants={containerVariants}\n        initial=\"hidden\"\n        animate=\"visible\"\n      >\n        <div className=\"flex-none space-y-3 mb-8 px-6\">\n          <div className=\"flex items-center justify-between mb-5\">\n            <motion.h1\n              variants={itemVariants}\n              className=\"text-3xl font-bold tracking-tight bg-clip-text text-transparent bg-gradient-to-r from-primary to-primary/80\"\n            >\n              Select Account\n            </motion.h1>\n            <motion.div variants={itemVariants}>\n              <Button\n                onClick={() => setActiveView(\"Signup\")}\n                className=\"group px-4 h-10 shadow-md hover:shadow-xl transition-all duration-300 bg-gradient-to-br from-primary to-primary/80 hover:from-primary hover:to-primary hover:scale-[1.02]\"\n              >\n                <Plus className=\"h-4 w-4 mr-2 group-hover:rotate-90 transition-transform duration-300\" />\n                New Account\n              </Button>\n            </motion.div>\n          </div>\n          <motion.p\n            variants={itemVariants}\n            className=\"text-sm text-muted-foreground\"\n          >\n            Choose your account to access your workspace\n          </motion.p>\n        </div>\n\n        <motion.div\n          className=\"relative flex-1 px-4 min-h-0\"\n          variants={itemVariants}\n        >\n          <div className=\"absolute inset-0 pb-12\">\n            <div className=\"h-full rounded-[10px] bg-background/50 backdrop-blur-sm border border-border/50 shadow-sm\">\n              <ScrollArea className=\"h-full px-4 py-4\">\n                <div className=\"flex flex-col h-full\">\n                  <div className=\"grid auto-rows-min grid-cols-1 gap-3 py-1 px-1 grow\">\n                    {users.map((user) => (\n                      <motion.div\n                        key={user.name}\n                        variants={itemVariants}\n                        whileHover={{ scale: 1.02 }}\n                        whileTap={{ scale: 0.98 }}\n                        className=\"py-0.5\"\n                      >\n                        <Card\n                          className=\"group transition-all duration-300 cursor-pointer border border-border/50 bg-card/95 hover:shadow-lg hover:border-primary/20\"\n                          onClick={() => handleSelectAccount(user)}\n                        >\n                          <CardContent className=\"flex items-center p-5\">\n                            <MotionAvatar\n                              className=\"h-10 w-10 ring-2 ring-primary/10 group-hover:ring-primary/30 transition-all duration-300\"\n                              initial=\"hidden\"\n                              animate=\"visible\"\n                            >\n                              <AvatarFallback className=\"bg-primary/10 text-primary font-semibold text-sm group-hover:bg-primary/20 transition-colors duration-300\">\n                                {user.name.charAt(0).toUpperCase()}\n                              </AvatarFallback>\n                            </MotionAvatar>\n                            <div className=\"ml-4 min-w-0 flex-1\">\n                              <h3 className=\"font-medium text-sm text-foreground truncate group-hover:text-primary transition-colors duration-300\">\n                                {user.name}\n                              </h3>\n                              <p className=\"text-xs text-muted-foreground truncate mt-0.5\">\n                                Click to access workspace\n                              </p>\n                            </div>\n                          </CardContent>\n                        </Card>\n                      </motion.div>\n                    ))}\n                  </div>\n                </div>\n              </ScrollArea>\n            </div>\n          </div>\n        </motion.div>\n      </motion.div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Chat/Chat.tsx",
    "content": "import { ArrowDown, Loader2 } from \"lucide-react\";\nimport { Button } from \"@/components/ui/button\";\nimport { useUser } from \"@/context/useUser\";\nimport { useChatInput } from \"@/context/useChatInput\";\nimport { useChatLogic } from \"@/hooks/useChatLogic\";\nimport { ChatMessagesArea } from \"./ChatComponents/ChatMessagesArea\";\nimport { ChatInput } from \"./ChatComponents/ChatInput\";\nimport { LoadingIndicator } from \"./ChatComponents/LoadingIndicator\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { IngestProgress } from \"../CollectionModals/CollectionComponents/IngestProgress\";\n\nexport default function Chat() {\n  const {\n    scrollAreaRef,\n    resetCounter,\n    bottomRef,\n    showScrollButton,\n    scrollToBottom,\n  } = useChatLogic();\n  const { localModalLoading } = useSysSettings();\n  const { streamingMessage, streamingMessageReasoning, messages, error } =\n    useUser();\n\n  const { isLoading } = useChatInput();\n\n  return (\n    <div className=\"pt-5 h-[calc(100vh-1rem)] flex flex-col\">\n      <div className={`flex flex-col h-full overflow-hidden relative`}>\n        <ChatMessagesArea\n          scrollAreaRef={scrollAreaRef}\n          messages={messages}\n          streamingMessage={streamingMessage}\n          streamingMessageReasoning={streamingMessageReasoning}\n          error={error}\n          resetCounter={resetCounter}\n          bottomRef={bottomRef}\n        />\n\n        {showScrollButton && (\n          <Button\n            size=\"icon\"\n            variant=\"secondary\"\n            className=\"absolute bottom-32 right-8 rounded-full shadow-lg hover:shadow-xl transition-all\"\n            onClick={() => scrollToBottom()}\n          >\n            <ArrowDown className=\"h-4 w-4\" />\n          </Button>\n        )}\n        <div className=\"flex-1 flex justify-center\">\n          {localModalLoading && (\n            <div className=\"flex items-center gap-2\">\n              <Loader2 className=\"animate-spin h-4 w-4\" />\n              <span>Loading local model...</span>\n            </div>\n          )}\n          <IngestProgress truncate={true} />\n        </div>\n        {isLoading && (\n          <div className=\"flex justify-center\">\n            <LoadingIndicator />\n          </div>\n        )}\n\n        <div className=\"\">\n          <ChatInput />\n        </div>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/ChatHeader.tsx",
    "content": "import { Button } from \"@/components/ui/button\";\nimport { PlusCircle } from \"lucide-react\";\nimport { Loader2 } from \"lucide-react\";\nimport { IngestProgress } from \"@/components/CollectionModals/CollectionComponents/IngestProgress\";\nimport logo from \"@/assets/icon.png\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { useChatLogic } from \"@/hooks/useChatLogic\";\n\nexport function ChatHeader() {\n  const { localModalLoading } = useSysSettings();\n  const { handleResetChat } = useChatLogic();\n\n  return (\n    <div className=\"p-2 bg-card border-b border-secondary flex items-center\">\n      <div className=\"flex items-center flex-1\">\n        <img src={logo} alt=\"logo\" className=\"h-6 w-6 mr-2\" />\n\n        <h1 className=\"text-2xl font-bold\">Notate</h1>\n      </div>\n      <div className=\"flex-1 flex justify-center\">\n        {localModalLoading && (\n          <div className=\"flex items-center gap-2\">\n            <Loader2 className=\"animate-spin h-4 w-4\" />\n            <span>Loading local model...</span>\n          </div>\n        )}\n        <IngestProgress truncate={true} />\n      </div>\n      <div className=\"flex-1 flex justify-end\">\n        <Button\n          variant=\"secondary\"\n          onClick={() => {\n            handleResetChat();\n          }}\n        >\n          <PlusCircle className=\"mr-2\" /> New Chat\n        </Button>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/ChatInput.tsx",
    "content": "import { LibraryModal } from \"@/components/CollectionModals/LibraryModal\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n  Sheet,\n  SheetTitle,\n  SheetHeader,\n  SheetContent,\n  SheetDescription,\n  SheetTrigger,\n} from \"@/components/ui/sheet\";\nimport { Textarea } from \"@/components/ui/textarea\";\nimport { Library, Send, X, Mic, Loader2, Globe } from \"lucide-react\";\nimport { useState, useEffect, useMemo, useCallback, memo } from \"react\";\nimport { useUser } from \"@/context/useUser\";\nimport { useChatInput } from \"@/context/useChatInput\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport {\n  Tooltip,\n  TooltipContent,\n  TooltipProvider,\n  TooltipTrigger,\n} from \"@/components/ui/tooltip\";\nimport { WebAudioRecorder } from \"@/utils/webAudioRecorder\";\nimport { useLibrary } from \"@/context/useLibrary\";\nexport const ChatInput = memo(function ChatInput() {\n  const { activeUser, toggleTool, userTools, activeConversation } = useUser();\n  const {\n    handleChatRequest,\n    cancelRequest,\n    input,\n    setInput,\n    isLoading,\n    setIsLoading,\n  } = useChatInput();\n  const { openLibrary, setOpenLibrary } = useLibrary();\n  const [isRecording, setIsRecording] = useState(false);\n  const [transcriptionLoading, setTranscriptionLoading] = useState(false);\n  const [loadingDots, setLoadingDots] = useState(\"\");\n  const { isFFMPEGInstalled } = useSysSettings();\n  const audioRecorder = useMemo(() => new WebAudioRecorder(), []);\n  const { selectedCollection } = useLibrary();\n\n  // Memoize the loading dots animation interval\n  useEffect(() => {\n    if (!transcriptionLoading) {\n      setLoadingDots(\"\");\n      return;\n    }\n\n    const interval = setInterval(() => {\n      setLoadingDots((prev: string) => (prev === \"...\" ? \"\" : prev + \".\"));\n    }, 500);\n    return () => clearInterval(interval);\n  }, [transcriptionLoading]);\n\n  // Memoize the recording handler\n  const handleRecording = useCallback(async () => {\n    try {\n      if (!isRecording) {\n        await audioRecorder.startRecording();\n        setIsRecording(true);\n      } else {\n        setTranscriptionLoading(true);\n        const audioData = await audioRecorder.stopRecording();\n        setIsRecording(false);\n        if (!activeUser?.id) {\n          console.error(\"No active user ID found\");\n          setTranscriptionLoading(false);\n          return;\n        }\n        const result = await window.electron.transcribeAudio(\n          audioData,\n          activeUser.id\n        );\n\n        if (!result.success) {\n          console.error(\"Failed to transcribe audio:\", result.error);\n          setTranscriptionLoading(false);\n          return;\n        }\n\n        if (result.transcription) {\n          setInput((prev) => {\n            const newInput = prev + (prev ? \" \" : \"\") + result.transcription;\n            return newInput;\n          });\n        } else {\n          console.warn(\"No transcription in result:\", result);\n        }\n\n        setTranscriptionLoading(false);\n      }\n    } catch (error) {\n      console.error(\"Error handling recording:\", error);\n      setIsRecording(false);\n      setTranscriptionLoading(false);\n    }\n  }, [isRecording, audioRecorder, activeUser?.id, setInput]);\n\n  // Memoize the tooltip content\n  const tooltipContent = useMemo(() => {\n    if (!isFFMPEGInstalled) return \"Please install FFMPEG to use voice-to-text\";\n    if (transcriptionLoading) return \"Transcribing your audio...\";\n    if (isRecording) return \"Click to stop recording\";\n    return \"Click to start voice recording\";\n  }, [isFFMPEGInstalled, transcriptionLoading, isRecording]);\n\n  // Memoize the form submit handler\n  const handleSubmit = useCallback(\n    (e: React.FormEvent) => {\n      e.preventDefault();\n      if (input.trim()) {\n        handleChatRequest(selectedCollection?.id || undefined);\n      }\n    },\n    [input, handleChatRequest, selectedCollection?.id]\n  );\n\n  // Memoize the send button handler\n  const handleSendClick = useCallback(async () => {\n    if (isLoading) {\n      cancelRequest();\n      setIsLoading(false);\n    } else if (input.trim()) {\n      await handleChatRequest(selectedCollection?.id || undefined);\n    }\n  }, [\n    isLoading,\n    input,\n    cancelRequest,\n    handleChatRequest,\n    selectedCollection?.id,\n    setIsLoading,\n  ]);\n\n  return (\n    <div className=\"p-4 bg-card border-t border-secondary\">\n      <form onSubmit={handleSubmit} className=\"flex w-full items-center\">\n        <div className=\"flex flex-col items-center\">\n          <Sheet open={openLibrary} onOpenChange={setOpenLibrary}>\n            <SheetTrigger asChild>\n              <Button\n                type=\"button\"\n                size=\"icon\"\n                variant=\"outline\"\n                className=\"flex-shrink-0 rounded-none rounded-tl-[6px]\"\n              >\n                <Library className=\"h-5 w-5\" />\n                <span className=\"sr-only\">Library</span>\n              </Button>\n            </SheetTrigger>\n            <SheetContent\n              className=\"max-h-[100vh] mt-8 overflow-y-auto p-6\"\n              side=\"left\"\n            >\n              <SheetHeader>\n                <SheetTitle>Data Store Library</SheetTitle>\n                <SheetDescription />\n              </SheetHeader>\n              <LibraryModal />\n            </SheetContent>\n          </Sheet>\n          <TooltipProvider>\n            <Tooltip>\n              <TooltipTrigger asChild>\n                <Button\n                  type=\"button\"\n                  size=\"icon\"\n                  disabled={transcriptionLoading}\n                  variant={isRecording ? \"destructive\" : \"outline\"}\n                  onClick={handleRecording}\n                  className=\"flex-shrink-0 rounded-none rounded-bl-[6px] relative\"\n                >\n                  {transcriptionLoading ? (\n                    <Loader2 className=\"h-5 w-5 animate-spin\" />\n                  ) : (\n                    <Mic\n                      className={`h-5 w-5 ${\n                        isRecording ? \"animate-pulse\" : \"\"\n                      } ${!isFFMPEGInstalled ? \"opacity-50\" : \"\"}`}\n                    />\n                  )}\n                  {isRecording && (\n                    <span className=\"absolute -top-1 -right-1 h-2 w-2 rounded-full bg-red-500 z-20 animate-pulse\" />\n                  )}\n                  <span className=\"sr-only\">\n                    {transcriptionLoading\n                      ? \"Transcribing...\"\n                      : isRecording\n                      ? \"Stop Recording\"\n                      : \"Start Recording\"}\n                  </span>\n                </Button>\n              </TooltipTrigger>\n              <TooltipContent>\n                <p>{tooltipContent}</p>\n              </TooltipContent>\n            </Tooltip>\n          </TooltipProvider>\n        </div>\n        <div className=\"absolute right-14 bottom-12 z-50\">\n          {userTools.map((tool) => (\n            <Button\n              key={tool.id}\n              size=\"icon\"\n              variant={tool.enabled === 1 ? \"secondary\" : \"outline\"}\n              onClick={() =>\n                toggleTool({\n                  id: tool.id,\n                  name: tool.name,\n                  enabled: tool.enabled,\n                  docked: tool.docked,\n                })\n              }\n              className={`${tool.enabled === 1 ? \"opacity-100\" : \"opacity-40\"}`}\n            >\n              {tool.name === \"Web Search\" ? <Globe /> : <></>}\n            </Button>\n          ))}\n        </div>\n        <Textarea\n          placeholder=\"Type your message here...\"\n          value={transcriptionLoading ? `Transcribing${loadingDots}` : input}\n          onChange={(e) => setInput(e.target.value)}\n          onKeyDown={(e) => {\n            if (e.key === \"Enter\" && !e.shiftKey) {\n              e.preventDefault();\n              if (input.trim()) {\n                handleChatRequest(\n                  selectedCollection?.id || undefined,\n                  undefined,\n                  activeConversation || undefined\n                );\n              }\n            }\n          }}\n          disabled={transcriptionLoading}\n          data-testid=\"chat-input\"\n          className={`z-10 max-h-[72px] min-h-[72px] flex-grow bg-background text-foreground placeholder-muted-foreground border-secondary rounded-none transition-opacity duration-200 [overflow-wrap:anywhere] ${\n            transcriptionLoading ? \"opacity-50\" : \"opacity-100\"\n          }`}\n        />\n        <Button\n          type=\"button\"\n          size=\"icon\"\n          variant={isLoading ? \"destructive\" : \"outline\"}\n          onClick={handleSendClick}\n          data-testid=\"chat-submit\"\n          className=\"flex-shrink-0 h-[72px] w-[36px] rounded-none rounded-r-[6px]\"\n        >\n          {isLoading ? <X className=\"h-5 w-5\" /> : <Send className=\"h-5 w-5\" />}\n          <span className=\"sr-only\">{isLoading ? \"Cancel\" : \"Send\"}</span>\n        </Button>\n      </form>\n    </div>\n  );\n});\n"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/ChatMessage.tsx",
    "content": "import { Avatar, AvatarImage } from \"@/components/ui/avatar\";\nimport {\n  lazy,\n  Suspense,\n  useRef,\n  useEffect,\n  useState,\n  useMemo,\n  memo,\n} from \"react\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n  NotebookPenIcon,\n  ExternalLink,\n  ChevronUp,\n  ChevronDown,\n} from \"lucide-react\";\nimport { getYouTubeLink, formatTimestamp, getFileName } from \"@/lib/utils\";\nimport { providerIcons } from \"@/components/SettingsModal/SettingsComponents/providers/providerIcons\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport rehypeStringify from \"rehype-stringify\";\nimport remarkParse from \"remark-parse\";\nimport remarkRehype from \"remark-rehype\";\nimport { unified } from \"unified\";\nimport remarkGfm from \"remark-gfm\";\nimport remarkFrontmatter from \"remark-frontmatter\";\nimport type { Root, Element } from \"hast\";\nimport { visit } from \"unist-util-visit\";\nimport { ReasoningMessage } from \"./ReasoningMessage\";\n\n// Lazy load the syntax highlighter\nconst SyntaxHighlightedCode = lazy(() =>\n  import(\"@/components/Chat/ChatComponents/SyntaxHightlightedCode\").then(\n    (module) => ({ default: module.SyntaxHighlightedCode })\n  )\n);\n\nexport const ChatMessage = memo(function ChatMessage({\n  message,\n  formatDate,\n}: {\n  message: Message;\n  formatDate: (date: Date) => string;\n}) {\n  const isUser = message?.role === \"user\";\n  const isRetrieval = message?.isRetrieval;\n  const [isDataContentExpanded, setIsDataContentExpanded] = useState(false);\n  const { settings } = useSysSettings();\n  const [renderedContent, setRenderedContent] = useState<\n    (string | JSX.Element)[]\n  >([]);\n  const parsedDataContent = useMemo(() => {\n    if (!message.data_content) return null;\n    try {\n      return JSON.parse(message.data_content);\n    } catch {\n      return null;\n    }\n  }, [message.data_content]);\n\n  const renderDataContent = useMemo(() => {\n    if (!parsedDataContent) return null;\n    const topk = parsedDataContent.top_k;\n\n    return (\n      <div className=\"flex flex-col divide-y divide-border\">\n        {parsedDataContent.results.map(\n          (\n            result: {\n              content: string;\n              metadata: {\n                chunk_start?: number;\n                chunk_end?: number;\n                source: string;\n                title?: string;\n              };\n            },\n            index: number\n          ) => (\n            <div\n              key={index}\n              className=\"flex flex-col gap-3 p-4 hover:bg-muted/30 transition-colors duration-200\"\n            >\n              <div className=\"flex items-center gap-2\">\n                <div className=\"flex items-center justify-center w-5 h-5 rounded-[6px] bg-emerald-500/10 text-[10px] font-medium text-emerald-600 dark:text-emerald-400\">\n                  {index + 1}\n                </div>\n                <div className=\"text-xs font-medium text-muted-foreground\">\n                  Source {index + 1} of {topk}\n                </div>\n              </div>\n\n              <div className=\"flex flex-col gap-2.5\">\n                <div className=\"flex flex-col gap-1.5\">\n                  <div className=\"w-full flex items-center gap-2 text-xs bg-background/80 rounded-[8px] border shadow-sm\">\n                    <div className=\"flex-1 flex items-center gap-2 px-3 py-2 min-w-0\">\n                      <span className=\"flex-shrink-0 font-medium text-muted-foreground\">\n                        Source:\n                      </span>\n                      <span className=\"text-emerald-600 dark:text-emerald-400 truncate\">\n                        {result.metadata.chunk_start ? (\n                          <a\n                            href={getYouTubeLink(\n                              result.metadata.source,\n                              result.metadata.chunk_start\n                            )}\n                            target=\"_blank\"\n                            rel=\"noopener noreferrer\"\n                            className=\"hover:underline\"\n                          >\n                            {result.metadata.title ||\n                              getFileName(result.metadata.source)}\n                          </a>\n                        ) : result.metadata.source.startsWith(\"http\") ? (\n                          <a\n                            href={result.metadata.source}\n                            target=\"_blank\"\n                            rel=\"noopener noreferrer\"\n                            className=\"hover:underline\"\n                          >\n                            {result.metadata.title ||\n                              getFileName(result.metadata.source)}\n                          </a>\n                        ) : (\n                          <span>\n                            {result.metadata.title ||\n                              getFileName(result.metadata.source)}\n                          </span>\n                        )}\n                      </span>\n                    </div>\n                    <div className=\"flex-shrink-0 px-2 py-2 border-l\">\n                      <Button\n                        variant=\"ghost\"\n                        size=\"icon\"\n                        className=\"h-5 w-5\"\n                        onClick={() => {\n                          const source = result.metadata.source;\n                          if (\n                            source.includes(\"youtube.com\") ||\n                            source.includes(\"youtu.be\")\n                          ) {\n                            window.open(\n                              getYouTubeLink(\n                                source,\n                                result.metadata.chunk_start\n                              ),\n                              \"_blank\"\n                            );\n                          } else if (source.startsWith(\"http\")) {\n                            window.open(source, \"_blank\");\n                          } else {\n                            window.electron.openCollectionFolder(source);\n                          }\n                        }}\n                      >\n                        <ExternalLink className=\"w-3.5 h-3.5 text-muted-foreground\" />\n                      </Button>\n                    </div>\n                  </div>\n                  {result.metadata.chunk_start && result.metadata.chunk_end && (\n                    <div className=\"flex items-center gap-2\">\n                      <div className=\"w-1.5 h-1.5 rounded-full bg-emerald-500/30\" />\n                      <span className=\"text-[10px] font-medium text-muted-foreground\">\n                        {formatTimestamp(result.metadata.chunk_start)} -{\" \"}\n                        {formatTimestamp(result.metadata.chunk_end)}\n                      </span>\n                    </div>\n                  )}\n                </div>\n                <div className=\"text-sm text-foreground/90 bg-muted/30 px-3 py-2 rounded-[8px]\">\n                  {result.content}\n                </div>\n              </div>\n            </div>\n          )\n        )}\n      </div>\n    );\n  }, [parsedDataContent]);\n\n  const renderContent = async (content: string) => {\n    if (isUser) {\n      return [\n        <div key=\"user-message\" className=\"text-sm \">\n          {content}\n        </div>,\n      ];\n    }\n\n    const codeBlockRegex = /```(\\w+)?\\n([\\s\\S]*?)```/g;\n    const parts = [];\n    let lastIndex = 0;\n    let match;\n\n    while ((match = codeBlockRegex.exec(content)) !== null) {\n      if (match.index > lastIndex) {\n        const textContent = content.slice(lastIndex, match.index).trim();\n        if (textContent) {\n          const result = await unified()\n            .use(remarkParse)\n            .use(remarkFrontmatter)\n            .use(remarkGfm)\n            .use(remarkRehype)\n            .use(() => (tree: Root) => {\n              // Transform links to open in new window\n              visit(tree, \"element\", (node: Element) => {\n                if (node.tagName === \"a\" && node.properties?.href) {\n                  node.properties.onclick = `(function(e){e.preventDefault();window.electron.openExternal(\"${node.properties.href}\")})`;\n                  node.properties.href = \"#\";\n                }\n              });\n              return tree;\n            })\n            .use(rehypeStringify)\n            .process(textContent);\n          parts.push(\n            <div\n              key={`text-${lastIndex}`}\n              className=\"contentMarkdown\"\n              dangerouslySetInnerHTML={{ __html: result }}\n            />\n          );\n        }\n      }\n\n      const language = match[1] || \"text\";\n      const code = match[2].trim();\n      parts.push(\n        <div\n          key={`code-${match.index}`}\n          className=\"w-full overflow-x-auto my-2\"\n        >\n          <Suspense fallback={<div>Loading...</div>}>\n            <SyntaxHighlightedCode code={code} language={language} />\n          </Suspense>\n        </div>\n      );\n\n      lastIndex = match.index + match[0].length;\n    }\n\n    if (lastIndex < content.length) {\n      const textContent = content.slice(lastIndex).trim();\n      if (textContent) {\n        const result = await unified()\n          .use(remarkParse)\n          .use(remarkFrontmatter)\n          .use(remarkGfm)\n          .use(remarkRehype)\n          .use(() => (tree: Root) => {\n            // Transform links to open in new window\n            visit(tree, \"element\", (node: Element) => {\n              if (node.tagName === \"a\" && node.properties?.href) {\n                node.properties.onclick = `(function(e){e.preventDefault();window.electron.openExternal(\"${node.properties.href}\")})`;\n                node.properties.href = \"#\";\n              }\n            });\n            return tree;\n          })\n          .use(rehypeStringify)\n          .process(textContent);\n\n        parts.push(\n          <div\n            key={`text-${lastIndex}`}\n            className={`contentMarkdown`}\n            dangerouslySetInnerHTML={{ __html: String(result) }}\n          />\n        );\n      }\n    }\n\n    return parts;\n  };\n\n  const textareaRef = useRef<HTMLTextAreaElement>(null);\n\n  useEffect(() => {\n    if (textareaRef.current) {\n      textareaRef.current.value = message?.content || \"\";\n    }\n  }, [message?.content]);\n\n  useEffect(() => {\n    renderContent(message?.content || \"\").then(setRenderedContent);\n  }, [message?.content]);\n\n  const getProviderIcon = () => {\n    if (isUser) {\n      return { type: \"image\", src: \"/src/assets/avatars/user-avatar.svg\" };\n    }\n    if (isRetrieval) {\n      return { type: \"image\", src: \"/src/assets/avatars/database-avatar.svg\" };\n    }\n    if (settings.provider) {\n      const icon = providerIcons[settings.provider.toLowerCase()];\n\n      if (icon) {\n        return {\n          type: \"component\",\n          component: icon,\n        };\n      }\n    }\n    return { type: \"image\", src: \"/src/assets/avatars/ai-avatar.png\" };\n  };\n\n  const icon = getProviderIcon();\n\n  return (\n    <>\n      {!isUser && message.reasoning_content && (\n        <ReasoningMessage content={message.reasoning_content} />\n      )}\n      <div\n        className={`flex ${\n          isUser ? \"justify-end\" : \"justify-start\"\n        } animate-in fade-in duration-300 mx-4 my-3`}\n      >\n        <div\n          className={`flex ${\n            isUser ? \"flex-row-reverse\" : \"flex-row\"\n          } items-end max-w-[85%] group gap-3`}\n        >\n          <Avatar\n            className={`w-9 h-9 border-2 shadow-sm transition-all duration-300 ${\n              isUser\n                ? \"border-primary/50\"\n                : isRetrieval\n                ? \"border-emerald-500/50\"\n                : \"border-secondary/50\"\n            } ${\n              isUser\n                ? \"ring-2 ring-primary ring-offset-2\"\n                : isRetrieval\n                ? \"ring-2 ring-emerald-500 ring-offset-2\"\n                : \"ring-2 ring-secondary ring-offset-2\"\n            } overflow-hidden`}\n          >\n            {icon.type === \"image\" ? (\n              <AvatarImage\n                className=\"object-cover w-full h-full scale-125\"\n                src={icon.src}\n              />\n            ) : (\n              <div className=\"h-full w-full flex items-center justify-center scale-150\">\n                {icon.component}\n              </div>\n            )}\n          </Avatar>\n\n          <div\n            className={`relative px-4 py-3 rounded-[16px] break-words ${\n              isUser\n                ? \"bg-primary/90 text-primary-foreground rounded-br-[4px] shadow-lg shadow-primary/20\"\n                : isRetrieval\n                ? \"bg-emerald-50/95 dark:bg-emerald-950/30 text-emerald-900 dark:text-emerald-50 rounded-bl-[4px] border border-emerald-200/70 dark:border-emerald-800/70 shadow-lg shadow-emerald-900/5\"\n                : \"bg-secondary/95 text-secondary-foreground rounded-bl-[4px] shadow-lg shadow-secondary/10\"\n            } hover:shadow-xl transition-all duration-300 ease-in-out w-full backdrop-blur-sm`}\n          >\n            {message.data_content && (\n              <div\n                className=\"flex flex-col gap-1.5 mb-4 select-none\"\n                onClick={() => setIsDataContentExpanded(!isDataContentExpanded)}\n              >\n                <div className=\"flex items-center justify-between px-2 py-1.5 rounded-[8px] bg-muted/40 hover:bg-muted/60 cursor-pointer transition-colors duration-200\">\n                  <div className=\"flex items-center gap-2\">\n                    <div className=\"flex items-center justify-center w-5 h-5 rounded-[6px] bg-primary/10\">\n                      <NotebookPenIcon className=\"w-3.5 h-3.5 text-primary/70\" />\n                    </div>\n                    <span className=\"text-xs font-medium text-foreground/80\">\n                      Reference Notes\n                    </span>\n                    <span className=\"px-1.5 py-0.5 text-[10px] font-medium bg-primary/10 text-primary/70 rounded-[6px]\">\n                      {parsedDataContent?.results.length} sources\n                    </span>\n                  </div>\n                  <div className=\"flex items-center gap-1.5\">\n                    <span className=\"text-[10px] font-medium text-muted-foreground\">\n                      {isDataContentExpanded ? \"Hide\" : \"Show\"} details\n                    </span>\n                    {isDataContentExpanded ? (\n                      <ChevronUp className=\"w-3.5 h-3.5 text-muted-foreground\" />\n                    ) : (\n                      <ChevronDown className=\"w-3.5 h-3.5 text-muted-foreground\" />\n                    )}\n                  </div>\n                </div>\n              </div>\n            )}\n            {message.data_content && isDataContentExpanded && (\n              <div className=\"mb-4 overflow-hidden border rounded-[10px]\">\n                <div className=\"px-3 py-2 border-b bg-muted/30\">\n                  <div className=\"text-xs font-medium text-foreground/70\">\n                    Source References\n                  </div>\n                </div>\n                <div className=\"bg-background/50 backdrop-blur-sm\">\n                  {renderDataContent}\n                </div>\n              </div>\n            )}\n            {!isRetrieval && (\n              <div className=\"text-sm mb-2 [overflow-wrap:anywhere] text-left overflow-hidden\">\n                {renderedContent}\n                <div className=\"sr-only\">{message?.content}</div>\n              </div>\n            )}\n            <span\n              className={`text-[11px] mt-4 block opacity-0 group-hover:opacity-100 transition-opacity text-right absolute bottom-1 right-3 ${\n                isUser\n                  ? \"text-primary-foreground/70\"\n                  : isRetrieval\n                  ? \"text-emerald-700 dark:text-emerald-300\"\n                  : \"text-secondary-foreground/70\"\n              }`}\n            >\n              {message?.timestamp ? formatDate(message?.timestamp) : \"\"}\n            </span>\n          </div>\n        </div>\n      </div>\n    </>\n  );\n});\n"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/ChatMessagesArea.tsx",
    "content": "import { ScrollArea } from \"@/components/ui/scroll-area\";\nimport { NewConvoWelcome } from \"./NewConvoWelcome\";\nimport { ChatMessage } from \"./ChatMessage\";\nimport { StreamingReasoningMessage } from \"./StreamingReasoningMessage\";\nimport { StreamingMessage } from \"./StreamingMessage\";\nimport { formatDate } from \"@/lib/utils\";\n\nexport function ChatMessagesArea({\n  scrollAreaRef,\n  messages,\n  streamingMessage,\n  streamingMessageReasoning,\n  error,\n  resetCounter,\n  bottomRef,\n}: {\n  scrollAreaRef: React.RefObject<HTMLDivElement>;\n  messages: Message[];\n  streamingMessage: string | null;\n  streamingMessageReasoning: string | null;\n  error: string | null;\n  resetCounter: number;\n  bottomRef: React.RefObject<HTMLDivElement>;\n}) {\n  return (\n    <ScrollArea \n      ref={scrollAreaRef} \n      className=\"h-full flex-grow relative [&_[data-radix-scroll-area-viewport]>div]:!block [&_[data-radix-scroll-area-viewport]>div]:!h-full\"\n    >\n      <div className=\"h-full flex flex-col px-4\">\n        <div className=\"flex-grow flex flex-col\">\n          {messages.length === 0 && <NewConvoWelcome key={resetCounter} />}\n          {messages.map((message, index) => (\n            <div\n              key={index}\n              className={`message ${\n                message.role === \"user\" ? \"user-message\" : \"ai-message\"\n              }`}\n              data-testid={`chat-message-${message.role}`}\n            >\n              <ChatMessage message={message} formatDate={formatDate} />\n            </div>\n          ))}\n          {streamingMessageReasoning && <StreamingReasoningMessage />}\n          {error && (\n            <div className=\"text-red-500 mt-4 p-2 bg-red-100 rounded\">\n              Error: {error}\n            </div>\n          )}{\" \"}\n          {streamingMessage && <StreamingMessage content={streamingMessage} />}\n          <div ref={bottomRef} />\n        </div>\n      </div>\n    </ScrollArea>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/LoadingIndicator.tsx",
    "content": "import { useUser } from \"@/context/useUser\";\nimport { Loader2 } from \"lucide-react\";\n\nexport function LoadingIndicator() {\n  const { streamingMessage } = useUser();\n  return (\n    <div className=\"flex justify-center my-4\">\n      <div className=\"flex items-center bg-secondary/50 text-secondary-foreground rounded-full px-4 py-2 shadow-md\">\n        <Loader2 className=\"w-4 h-4 animate-spin mr-2\" />\n        <span className=\"text-sm\">\n          {streamingMessage ? \"AI is processing\" : \"Thinking\"}\n          <span className=\"inline-flex\">\n            <span className=\"animate-[dot_1.4s_infinite] [animation-delay:0.0s]\">.</span>\n            <span className=\"animate-[dot_1.4s_infinite] [animation-delay:0.2s]\">.</span>\n            <span className=\"animate-[dot_1.4s_infinite] [animation-delay:0.4s]\">.</span>\n          </span>\n        </span>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/NewConvoWelcome.tsx",
    "content": "import { Button } from \"@/components/ui/button\";\nimport { MessageSquare, X } from \"lucide-react\";\nimport notateLogo from \"@/assets/icon.png\";\nimport { useChatInput } from \"@/context/useChatInput\";\nimport { useLibrary } from \"@/context/useLibrary\";\nimport { docSuggestions, suggestions } from \"./suggestions\";\nimport { useMemo } from \"react\";\nimport {\n  Tooltip,\n  TooltipContent,\n  TooltipProvider,\n  TooltipTrigger,\n} from \"@/components/ui/tooltip\";\n\nexport function NewConvoWelcome() {\n  const { handleChatRequest } = useChatInput();\n  const { selectedCollection, setSelectedCollection, setShowUpload } =\n    useLibrary();\n\n  const randomDocSuggestions = useMemo(() => {\n    const shuffled = [...docSuggestions].sort(() => 0.5 - Math.random());\n    return shuffled.slice(0, 3);\n  }, []);\n\n  const randomSuggestions = useMemo(() => {\n    const shuffled = [...suggestions].sort(() => 0.5 - Math.random());\n    return shuffled.slice(0, 3);\n  }, []);\n\n  const handleSuggestionClick = (suggestion: string) => {\n    handleChatRequest(\n      selectedCollection?.id || undefined,\n      suggestion,\n      undefined\n    );\n  };\n\n  return (\n    <div className=\"!h-full flex-1 flex flex-col items-center justify-center px-4  text-center\">\n      <div className=\"space-y-4 md:space-y-6 w-full max-w-[600px]\">\n        <div className=\"space-y-3 md:space-y-4\">\n          <div className=\"flex items-center justify-center mx-auto my-3 md:my-4\">\n            <img\n              src={notateLogo}\n              alt=\"Notate Logo\"\n              className=\"w-10 h-10 md:w-12 md:h-12\"\n            />\n          </div>\n          <h2 className=\"text-xl md:text-2xl font-bold\">\n            Welcome to Notate! 👋\n          </h2>\n          <p className=\"text-sm md:text-base text-muted-foreground px-2\">\n            Your friendly AI assistant. Ask me anything about your documents,\n            videos, and web content.\n          </p>\n        </div>\n\n        <div className=\"grid gap-3 md:gap-4 w-full\">\n          <div className=\"grid gap-2 w-full\">\n            {selectedCollection && (\n              <p className=\"text-sm text-muted-foreground flex items-center justify-center gap-2\">\n                Currently viewing:\n                <span className=\"font-semibold text-[#ffffff] max-w-[200px] truncate\">\n                  {selectedCollection.name}\n                </span>\n                <button\n                  className=\"text-red-500 hover:text-red-600 flex items-center\"\n                  onClick={() => {\n                    setSelectedCollection(null);\n                    setShowUpload(false);\n                  }}\n                >\n                  <X className=\"w-3.5 h-3.5\" />\n                </button>\n              </p>\n            )}\n            <div className=\"grid gap-2 w-full\">\n              {selectedCollection\n                ? randomDocSuggestions.map((suggestion, i) => (\n                    <Button\n                      key={i}\n                      variant=\"outline\"\n                      className=\"justify-start text-left h-auto py-3 px-4 hover:bg-accent text-xs md:text-sm transition-colors w-full overflow-hidden\"\n                      onClick={() => handleSuggestionClick(suggestion)}\n                    >\n                      <MessageSquare className=\"w-3.5 h-3.5 md:w-4 md:h-4 mr-2 flex-shrink-0\" />\n                      <span className=\"truncate\">{suggestion}</span>\n                    </Button>\n                  ))\n                : randomSuggestions.map((suggestion, i) => (\n                    <TooltipProvider key={i}>\n                      <Tooltip>\n                        <TooltipTrigger asChild>\n                          <Button\n                            variant=\"outline\"\n                            className=\"justify-start text-left h-auto py-3 px-4 hover:bg-accent text-xs md:text-sm transition-colors w-full overflow-hidden\"\n                            onClick={() => handleSuggestionClick(suggestion)}\n                          >\n                            <MessageSquare className=\"w-3.5 h-3.5 md:w-4 md:h-4 mr-2 flex-shrink-0\" />\n                            <span className=\"truncate\">{suggestion}</span>\n                          </Button>\n                        </TooltipTrigger>\n                        <TooltipContent>\n                          <p>{suggestion}</p>\n                        </TooltipContent>\n                      </Tooltip>\n                    </TooltipProvider>\n                  ))}\n            </div>\n          </div>\n        </div>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/ReasoningMessage.tsx",
    "content": "import { BrainCircuit, ChevronDown, ChevronUp, Sparkles } from \"lucide-react\";\nimport { useState, CSSProperties, useEffect, lazy, Suspense } from \"react\";\nimport { cn } from \"@/lib/utils\";\nimport { unified } from \"unified\";\nimport remarkParse from \"remark-parse\";\nimport remarkRehype from \"remark-rehype\";\nimport rehypeStringify from \"rehype-stringify\";\nimport remarkGfm from \"remark-gfm\";\nimport remarkFrontmatter from \"remark-frontmatter\";\nimport { visit } from \"unist-util-visit\";\nimport type { Root, Element } from \"hast\";\n\n// Lazy load the syntax highlighter\nconst SyntaxHighlightedCode = lazy(() =>\n  import(\"@/components/Chat/ChatComponents/SyntaxHightlightedCode\").then(\n    (module) => ({ default: module.SyntaxHighlightedCode })\n  )\n);\n\ninterface ReasoningMessageProps {\n  content: string;\n}\n\nexport const ReasoningMessage = ({ content }: ReasoningMessageProps) => {\n  const [isExpanded, setIsExpanded] = useState(false);\n  const [renderedContent, setRenderedContent] = useState<(string | JSX.Element)[]>([]);\n\n  const renderContent = async (content: string) => {\n    const codeBlockRegex = /```(\\w+)?\\n([\\s\\S]*?)```/g;\n    const parts = [];\n    let lastIndex = 0;\n    let match;\n\n    while ((match = codeBlockRegex.exec(content)) !== null) {\n      if (match.index > lastIndex) {\n        const textContent = content.slice(lastIndex, match.index).trim();\n        if (textContent) {\n          const result = await unified()\n            .use(remarkParse)\n            .use(remarkFrontmatter)\n            .use(remarkGfm)\n            .use(remarkRehype)\n            .use(() => (tree: Root) => {\n              // Transform links to open in new window\n              visit(tree, \"element\", (node: Element) => {\n                if (node.tagName === \"a\" && node.properties?.href) {\n                  node.properties.onclick = `(function(e){e.preventDefault();window.electron.openExternal(\"${node.properties.href}\")})`;\n                  node.properties.href = \"#\";\n                }\n              });\n              return tree;\n            })\n            .use(rehypeStringify)\n            .process(textContent);\n          parts.push(\n            <div\n              key={`text-${lastIndex}`}\n              className=\"contentMarkdown\"\n              dangerouslySetInnerHTML={{ __html: result }}\n            />\n          );\n        }\n      }\n\n      const language = match[1] || \"text\";\n      const code = match[2].trim();\n      parts.push(\n        <div\n          key={`code-${match.index}`}\n          className=\"w-full overflow-x-auto my-2\"\n        >\n          <Suspense fallback={<div>Loading...</div>}>\n            <SyntaxHighlightedCode code={code} language={language} />\n          </Suspense>\n        </div>\n      );\n\n      lastIndex = match.index + match[0].length;\n    }\n\n    if (lastIndex < content.length) {\n      const textContent = content.slice(lastIndex).trim();\n      if (textContent) {\n        const result = await unified()\n          .use(remarkParse)\n          .use(remarkFrontmatter)\n          .use(remarkGfm)\n          .use(remarkRehype)\n          .use(() => (tree: Root) => {\n            // Transform links to open in new window\n            visit(tree, \"element\", (node: Element) => {\n              if (node.tagName === \"a\" && node.properties?.href) {\n                node.properties.onclick = `(function(e){e.preventDefault();window.electron.openExternal(\"${node.properties.href}\")})`;\n                node.properties.href = \"#\";\n              }\n            });\n            return tree;\n          })\n          .use(rehypeStringify)\n          .process(textContent);\n\n        parts.push(\n          <div\n            key={`text-${lastIndex}`}\n            className=\"contentMarkdown\"\n            dangerouslySetInnerHTML={{ __html: String(result) }}\n          />\n        );\n      }\n    }\n\n    return parts;\n  };\n\n  useEffect(() => {\n    renderContent(content).then(setRenderedContent);\n  }, [content]);\n\n  return (\n    <div className=\"flex flex-col gap-2 my-4 w-full justify-center items-center\">\n      <div\n        onClick={() => setIsExpanded(!isExpanded)}\n        style={\n          {\n            \"--shimmer-color\": \"rgba(255, 255, 255, 0.3)\",\n            \"--radius\": \"1.25rem\",\n            \"--speed\": \"5s\",\n          } as CSSProperties\n        }\n        className={cn(\n          \"bg-secondary/30 group relative z-0 flex max-w-[80%] cursor-pointer items-center justify-between px-4 py-2.5 overflow-hidden\",\n          \"backdrop-blur-sm border border-secondary/30\",\n          \"transform-gpu transition-all duration-300\",\n          \"hover:bg-secondary/40 hover:scale-[1.02]\",\n          \"[border-radius:var(--radius)]\",\n          \"shadow-[0_0_15px_rgba(0,0,0,0.1)]\"\n        )}\n      >\n        {/* Shimmer border */}\n        <div className=\"absolute inset-0 -z-20\">\n          <div\n            className=\"absolute inset-0 animate-border rounded-[1.25rem]\"\n            style={{\n              background: `linear-gradient(90deg, transparent, var(--shimmer-color), transparent)`,\n              backgroundSize: \"200% 100%\",\n              maskImage: `linear-gradient(black, black), linear-gradient(black, black)`,\n              maskSize: \"100% 100%\",\n              maskPosition: \"0 0, 100% 0\",\n              maskRepeat: \"no-repeat\",\n              WebkitMaskComposite: \"destination-out\",\n              maskComposite: \"exclude\",\n            }}\n          />\n        </div>\n\n        <div className=\"flex items-center gap-3\">\n          <div className=\"flex items-center justify-center w-7 h-7 rounded-xl bg-primary/10 group-hover:bg-primary/20 transition-colors\">\n            <BrainCircuit className=\"w-4 h-4 text-primary/70\" />\n          </div>\n          <div className=\"flex items-center gap-2\">\n            <span className=\"text-sm font-medium text-primary/90\">\n              Chain of Thought\n            </span>\n            <Sparkles className=\"w-3.5 h-3.5 text-primary/50\" />\n          </div>\n        </div>\n        <div className=\"text-primary/60\">\n          {isExpanded ? (\n            <ChevronUp className=\"w-4 h-4\" />\n          ) : (\n            <ChevronDown className=\"w-4 h-4\" />\n          )}\n        </div>\n\n        {/* Background */}\n        <div className=\"absolute -z-30 [background:var(--bg)] [border-radius:var(--radius)] inset-0\" />\n      </div>\n\n      {isExpanded && (\n        <div className=\"overflow-hidden rounded-xl border border-secondary/30 animate-in slide-in-from-top-2 duration-200 w-[80%] shadow-lg\">\n          <div className=\"px-4 py-2.5 border-b border-secondary/30 bg-secondary/20 backdrop-blur-sm\">\n            <div className=\"text-sm font-medium text-primary/80 flex items-center gap-2\">\n              <Sparkles className=\"w-3.5 h-3.5 text-primary/50\" />\n              Reasoning Process\n            </div>\n          </div>\n          <div className=\"bg-secondary/10 backdrop-blur-sm\">\n            <div className=\"px-5 py-4 text-sm [overflow-wrap:anywhere] text-left overflow-hidden\">\n              {renderedContent}\n              <div className=\"sr-only\">{content}</div>\n            </div>\n          </div>\n        </div>\n      )}\n    </div>\n  );\n};\n"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/StreamingMessage.tsx",
    "content": "import { Avatar, AvatarImage } from \"@/components/ui/avatar\";\nimport { SyntaxHighlightedCode } from \"@/components/Chat/ChatComponents/SyntaxHightlightedCode\";\nimport { useState, useEffect } from \"react\";\nimport { providerIcons } from \"@/components/SettingsModal/SettingsComponents/providers/providerIcons\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { motion } from \"framer-motion\";\nimport { cn } from \"@/lib/utils\";\nimport rehypeStringify from \"rehype-stringify\";\nimport remarkParse from \"remark-parse\";\nimport remarkRehype from \"remark-rehype\";\nimport { unified } from \"unified\";\nimport remarkGfm from \"remark-gfm\";\nimport remarkFrontmatter from \"remark-frontmatter\";\n\nconst MotionAvatar = motion.create(Avatar);\n\nexport function StreamingMessage({ content }: { content: string }) {\n  const [parsedContent, setParsedContent] = useState<(string | JSX.Element)[]>(\n    []\n  );\n  const { settings } = useSysSettings();\n\n  useEffect(() => {\n    const renderContent = async () => {\n      const parts: (string | JSX.Element)[] = [];\n      let codeBlock = \"\";\n      let isInCodeBlock = false;\n      let language = \"\";\n\n      const lines = content.split(\"\\n\");\n      let textContent = \"\";\n\n      for (const line of lines) {\n        if (line.startsWith(\"```\")) {\n          if (isInCodeBlock) {\n            // End of code block - render the code\n            parts.push(\n              <SyntaxHighlightedCode\n                key={parts.length}\n                code={codeBlock.trim()}\n                language={language}\n              />\n            );\n            codeBlock = \"\";\n            isInCodeBlock = false;\n            language = \"\";\n          } else {\n            // Start of code block - render accumulated text content\n            if (textContent.trim()) {\n              const result = await unified()\n                .use(remarkParse)\n                .use(remarkFrontmatter)\n                .use(remarkGfm)\n                .use(remarkRehype)\n                .use(rehypeStringify)\n                .process(textContent.trim());\n\n              parts.push(\n                <div\n                  key={parts.length}\n                  className=\"contentMarkdown\"\n                  dangerouslySetInnerHTML={{ __html: String(result) }}\n                />\n              );\n              textContent = \"\";\n            }\n            isInCodeBlock = true;\n            language = line.slice(3).trim() || \"text\";\n          }\n        } else if (isInCodeBlock) {\n          codeBlock += line + \"\\n\";\n        } else {\n          textContent += line + \"\\n\";\n        }\n      }\n\n      // Handle any remaining content\n      if (isInCodeBlock) {\n        parts.push(\n          <SyntaxHighlightedCode\n            key={parts.length}\n            code={codeBlock.trim()}\n            language={language}\n          />\n        );\n      } else if (textContent.trim()) {\n        const result = await unified()\n          .use(remarkParse)\n          .use(remarkFrontmatter)\n          .use(remarkGfm)\n          .use(remarkRehype)\n          .use(rehypeStringify)\n          .process(textContent.trim());\n\n        parts.push(\n          <div\n            key={parts.length}\n            className=\"contentMarkdown\"\n            dangerouslySetInnerHTML={{ __html: String(result) }}\n          />\n        );\n      }\n\n      setParsedContent(parts);\n    };\n\n    renderContent();\n  }, [content]);\n\n  return (\n    <div className=\"flex justify-start animate-in fade-in duration-300 mx-2 my-2\">\n      <div className=\"flex flex-row items-end max-w-[80%]\">\n        <div className=\"relative\">\n          <MotionAvatar\n            className=\"relative z-10 w-9 h-9 border-2 shadow-sm\"\n            style={\n              {\n                \"--pulse-color\": \"hsl(var(--primary) / 0.3)\",\n                \"--duration\": \"2s\",\n              } as React.CSSProperties\n            }\n          >\n            <motion.div className=\"h-full w-full flex items-center justify-center\">\n              {settings.provider ? (\n                providerIcons[settings.provider.toLowerCase()]\n              ) : (\n                <AvatarImage\n                  className=\"object-cover w-full h-full scale-125\"\n                  src=\"/src/assets/avatars/ai-avatar.png\"\n                />\n              )}\n            </motion.div>\n          </MotionAvatar>\n          <motion.div\n            className={cn(\n              \"absolute left-1/2 top-1/2 w-full h-full -translate-x-1/2 -translate-y-1/2\",\n              \"rounded-full animate-pulse\"\n            )}\n            style={\n              {\n                \"--pulse-color\": \"hsl(var(--primary) / 0.3)\",\n                \"--duration\": \"2s\",\n              } as React.CSSProperties\n            }\n          />\n          <motion.div\n            className={cn(\n              \"absolute left-1/2 top-1/2 w-[120%] h-[120%] -translate-x-1/2 -translate-y-1/2\",\n              \"rounded-full animate-pulse\"\n            )}\n            style={\n              {\n                \"--pulse-color\": \"hsl(var(--primary) / 0.2)\",\n                \"--duration\": \"2s\",\n              } as React.CSSProperties\n            }\n          />\n        </div>\n        <div className=\"mx-2 my-1 p-3 rounded-2xl bg-secondary text-secondary-foreground shadow-md rounded-bl-none\">\n          <div className=\"text-sm break-words text-left\">{parsedContent}</div>\n        </div>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/StreamingReasoningMessage.tsx",
    "content": "import { BrainCircuit, ChevronDown, ChevronUp, Sparkles } from \"lucide-react\";\nimport { useState, CSSProperties, useEffect } from \"react\";\nimport { cn } from \"@/lib/utils\";\nimport { useUser } from \"@/context/useUser\";\nimport { SyntaxHighlightedCode } from \"@/components/Chat/ChatComponents/SyntaxHightlightedCode\";\nimport rehypeStringify from \"rehype-stringify\";\nimport remarkParse from \"remark-parse\";\nimport remarkRehype from \"remark-rehype\";\nimport { unified } from \"unified\";\nimport remarkGfm from \"remark-gfm\";\nimport remarkFrontmatter from \"remark-frontmatter\";\nimport { Button } from \"@/components/ui/button\";\n\nexport const StreamingReasoningMessage = () => {\n  const { streamingMessageReasoning, agentActions } = useUser();\n  const [isExpanded, setIsExpanded] = useState(false);\n  const [agentActionsExpanded, setAgentActionsExpanded] = useState(false);\n  const [parsedContent, setParsedContent] = useState<(string | JSX.Element)[]>(\n    []\n  );\n\n  useEffect(() => {\n    const renderContent = async () => {\n      const parts: (string | JSX.Element)[] = [];\n      let codeBlock = \"\";\n      let isInCodeBlock = false;\n      let language = \"\";\n\n      const lines = streamingMessageReasoning?.split(\"\\n\") || [];\n      let textContent = \"\";\n\n      for (const line of lines) {\n        if (line.startsWith(\"```\")) {\n          if (isInCodeBlock) {\n            // End of code block - render the code\n            parts.push(\n              <SyntaxHighlightedCode\n                key={parts.length}\n                code={codeBlock.trim()}\n                language={language}\n              />\n            );\n            codeBlock = \"\";\n            isInCodeBlock = false;\n            language = \"\";\n          } else {\n            // Start of code block - render accumulated text content\n            if (textContent.trim()) {\n              const result = await unified()\n                .use(remarkParse)\n                .use(remarkFrontmatter)\n                .use(remarkGfm)\n                .use(remarkRehype)\n                .use(rehypeStringify)\n                .process(textContent.trim());\n\n              parts.push(\n                <div\n                  key={parts.length}\n                  className=\"contentMarkdown\"\n                  dangerouslySetInnerHTML={{ __html: String(result) }}\n                />\n              );\n              textContent = \"\";\n            }\n            isInCodeBlock = true;\n            language = line.slice(3).trim() || \"text\";\n          }\n        } else if (isInCodeBlock) {\n          codeBlock += line + \"\\n\";\n        } else {\n          textContent += line + \"\\n\";\n        }\n      }\n\n      // Handle any remaining content\n      if (isInCodeBlock) {\n        parts.push(\n          <SyntaxHighlightedCode\n            key={parts.length}\n            code={codeBlock.trim()}\n            language={language}\n          />\n        );\n      } else if (textContent.trim()) {\n        const result = await unified()\n          .use(remarkParse)\n          .use(remarkFrontmatter)\n          .use(remarkGfm)\n          .use(remarkRehype)\n          .use(rehypeStringify)\n          .process(textContent.trim());\n\n        parts.push(\n          <div\n            key={parts.length}\n            className=\"contentMarkdown\"\n            dangerouslySetInnerHTML={{ __html: String(result) }}\n          />\n        );\n      }\n\n      setParsedContent(parts);\n    };\n\n    renderContent();\n  }, [streamingMessageReasoning]);\n\n  return (\n    <div className=\"flex flex-col gap-2 my-4 w-full justify-center items-center\">\n      <div\n        onClick={() => setIsExpanded(!isExpanded)}\n        style={\n          {\n            \"--shimmer-color\": \"rgba(255, 255, 255, 0.3)\",\n            \"--radius\": \"1rem\",\n            \"--speed\": \"5s\",\n          } as CSSProperties\n        }\n        className={cn(\n          \" bg-secondary/30 group relative z-0 flex max-w-[80%] cursor-pointer items-center justify-between px-3 py-2 overflow-hidden\",\n          \"backdrop-blur-sm border border-secondary/30\",\n          \"transform-gpu transition-all duration-300\",\n          \"hover:bg-secondary/30\",\n          \"[border-radius:var(--radius)]\"\n        )}\n      >\n        {/* Shimmer border */}\n        <div className=\"absolute inset-0 -z-20\">\n          <div\n            className=\"absolute inset-0 animate-border rounded-lg\"\n            style={{\n              background: `linear-gradient(90deg, transparent, var(--shimmer-color), transparent)`,\n              backgroundSize: \"200% 100%\",\n              maskImage: `linear-gradient(black, black), linear-gradient(black, black)`,\n              maskSize: \"100% 100%\",\n              maskPosition: \"0 0, 100% 0\",\n              maskRepeat: \"no-repeat\",\n              WebkitMaskComposite: \"destination-out\",\n              maskComposite: \"exclude\",\n            }}\n          />\n        </div>\n\n        <div className=\"flex items-center gap-2.5\">\n          <div className=\"flex items-center justify-center w-6 h-6 rounded-md bg-primary/10 group-hover:bg-primary/20 transition-colors\">\n            <BrainCircuit className=\"w-4 h-4 text-primary/70\" />{\" \"}\n          </div>\n          <span className=\"text-sm font-medium text-primary/90\">\n            Chain of Thought\n          </span>\n          <span className=\"text-xs text-primary/60\">\n            Thinking\n            <span className=\"inline-flex\">\n              <span className=\"animate-[dot_1.4s_infinite] [animation-delay:0.0s]\">\n                .\n              </span>\n              <span className=\"animate-[dot_1.4s_infinite] [animation-delay:0.2s]\">\n                .\n              </span>\n              <span className=\"animate-[dot_1.4s_infinite] [animation-delay:0.4s]\">\n                .\n              </span>\n            </span>\n          </span>\n        </div>\n        <div className=\"text-primary/60\">\n          {isExpanded ? (\n            <ChevronUp className=\"w-4 h-4\" />\n          ) : (\n            <ChevronDown className=\"w-4 h-4\" />\n          )}\n        </div>\n\n        {/* Background */}\n        <div className=\"absolute -z-30 [background:var(--bg)] [border-radius:var(--radius)] inset-0\" />\n      </div>\n\n      {isExpanded && (\n        <div className=\"overflow-hidden rounded-lg border border-secondary/30 animate-in slide-in-from-top-2 duration-200 w-[80%]\">\n          <div className=\"px-4 py-2.5 border-b border-secondary/30 bg-secondary/20 backdrop-blur-sm\">\n            <div className=\"text-sm font-medium text-primary/80 flex items-center gap-2\">\n              <Sparkles className=\"w-3.5 h-3.5 text-primary/50\" />\n              Reasoning Process\n            </div>\n          </div>\n\n          {agentActions &&\n            (agentActionsExpanded ? (\n              <div className=\"bg-secondary/10 backdrop-blur-sm\">\n                <div className=\"px-4 py-3 text-sm break-words [overflow-wrap:anywhere] text-left overflow-hidden\">\n                  {agentActions}\n                </div>\n              </div>\n            ) : (\n              <div className=\"bg-secondary/10 backdrop-blur-sm\">\n                <Button onClick={() => setAgentActionsExpanded(true)}>\n                  View Actions\n                </Button>\n              </div>\n            ))}\n          <div className=\"bg-secondary/10 backdrop-blur-sm\">\n            <div className=\"px-4 py-3 text-sm break-words [overflow-wrap:anywhere] text-left overflow-hidden\">\n              {parsedContent}\n              <div className=\"sr-only\">{streamingMessageReasoning}</div>\n            </div>\n          </div>\n        </div>\n      )}\n    </div>\n  );\n};\n"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/SyntaxHightlightedCode.tsx",
    "content": "import { highlightCode } from \"@/lib/shikiHightlight\";\nimport { useClipboard } from \"use-clipboard-copy\";\nimport { useState } from \"react\";\nimport { Check, Copy } from \"lucide-react\";\n\ninterface SyntaxHighlightedCodeProps {\n  code: string;\n  language: string;\n}\n\nexport function SyntaxHighlightedCode({\n  code,\n  language,\n}: SyntaxHighlightedCodeProps) {\n  const normalizedLanguage = normalizeLanguage(language, code);\n  const highlightedCode = highlightCode(code, normalizedLanguage);\n  const clipboard = useClipboard();\n  const [isCopied, setIsCopied] = useState(false);\n\n  const handleCopy = () => {\n    clipboard.copy(code);\n    setIsCopied(true);\n    setTimeout(() => setIsCopied(false), 2000);\n  };\n\n  return (\n    <div className=\"rounded-[4px] overflow-hidden bg-[#22272e] relative max-w-full\">\n      <div className=\"absolute top-2 right-2\">\n        <button\n          onClick={handleCopy}\n          className=\"p-2 bg-gray-700 rounded-[4px] text-gray-300 hover:bg-gray-600 focus:outline-none focus:ring-2 focus:ring-gray-500 transition-colors\"\n          aria-label=\"Copy code\"\n        >\n          {isCopied ? (\n            <Check className=\"w-5 h-5 text-green-500\" />\n          ) : (\n            <Copy className=\"w-5 h-5\" />\n          )}\n        </button>\n      </div>\n      <div className=\"overflow-x-auto p-4\">\n        <pre className=\"break-words w-full\">\n          <div\n            dangerouslySetInnerHTML={{ __html: highlightedCode }}\n            style={{\n              whiteSpace: \"pre-wrap\",\n              wordBreak: \"break-word\",\n              overflowWrap: \"break-word\",\n              maxWidth: \"100%\",\n              display: \"block\",\n              fontSize: \"0.9em\",\n              lineHeight: \"1.5\",\n            }}\n          />\n        </pre>\n      </div>\n    </div>\n  );\n}\n\nfunction normalizeLanguage(language: string, code: string): string {\n  language = language.toLowerCase();\n  if (\n    language === \"typescript\" ||\n    language === \"ts\" ||\n    language === \"tsx\" ||\n    language === \"jsx\"\n  ) {\n    return \"typescript\";\n  }\n  if (language === \"javascript\" || language === \"js\") {\n    return \"javascript\";\n  }\n  if (!language && /^\\s*\\{[\\s\\S]*\\}\\s*$/.test(code)) {\n    return \"json\";\n  }\n  return language || \"plaintext\";\n}\n"
  },
  {
    "path": "Frontend/src/components/Chat/ChatComponents/suggestions.tsx",
    "content": "export const docSuggestions = [\n  \"What documents have I uploaded?\",\n  \"Summarize the documents in this collection\",\n  \"What are the key points from my notes?\",\n  \"What are the specific methodologies used in my research papers?\",\n  \"What are the latest dates and deadlines mentioned in my notes?\",\n  \"Find the technical specifications and measurements cited\",\n  \"Show me the budget numbers and financial figures and their sources\",\n  \"List the step-by-step procedures from my technical guides\",\n  \"What software versions and tools are referenced?\",\n];\n\nexport const suggestions = [\n  \"Explain how photosynthesis works\",\n  \"What are the main causes of climate change?\",\n  \"Tell me about the history of the Internet\",\n  \"How does the human immune system work?\",\n  \"What were the major events of World War II?\",\n  \"Explain the theory of relativity in simple terms\",\n  \"What is the difference between DNA and RNA?\",\n  \"How do earthquakes and volcanoes form?\",\n  \"What are the fundamental principles of economics?\",\n  \"Explain the process of evolution\",\n  \"What are the main periods in art history?\",\n  \"How does the solar system work?\",\n  \"What are the basic principles of psychology?\",\n  \"Tell me about the major world religions\",\n  \"How does artificial intelligence work?\",\n  \"What are the key events in the Civil Rights Movement?\",\n  \"Explain how democracy developed throughout history\",\n  \"What are the main branches of philosophy?\",\n  \"How does the human brain process information?\",\n  \"What are the fundamental laws of physics?\"\n];\n"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/AddLibrary.tsx",
    "content": "import { Label } from \"@/components/ui/label\";\nimport { Textarea } from \"@/components/ui/textarea\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n  Select,\n  SelectContent,\n  SelectItem,\n  SelectTrigger,\n  SelectValue,\n} from \"@/components/ui/select\";\nimport { ChevronLeft, Cloud, Database, Settings2, X } from \"lucide-react\";\nimport { useEffect, useState } from \"react\";\nimport { sanitizeStoreName } from \"@/lib/utils\";\nimport { useUser } from \"@/context/useUser\";\nimport { useLibrary } from \"@/context/useLibrary\";\nimport {\n  Tooltip,\n  TooltipContent,\n  TooltipProvider,\n  TooltipTrigger,\n} from \"@/components/ui/tooltip\";\nimport { Input } from \"@/components/ui/input\";\nimport { toast } from \"@/hooks/use-toast\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { Progress } from \"@/components/ui/progress\";\nimport { fetchEmbeddingModels } from \"@/data/models\";\n\nexport default function AddLibrary() {\n  const [newStore, setNewStore] = useState(\"\");\n  const [newStoreError, setNewStoreError] = useState<string | null>(null);\n  const [newStoreDescription, setNewStoreDescription] = useState(\"\");\n  const [isLocal, setIsLocal] = useState(true);\n  const [showAdvancedSettings, setShowAdvancedSettings] = useState(false);\n  const [localEmbeddingModel, setLocalEmbeddingModel] = useState(\n    \"HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\"\n  );\n  const [customModel, setCustomModel] = useState(\"\");\n  const [showCustomInput, setShowCustomInput] = useState(false);\n  const [isDownloading, setIsDownloading] = useState(false);\n  const [newStoreType, setNewStoreType] = useState(\"Notes\");\n  const [currentFile, setCurrentFile] = useState<string>();\n  const [fileProgress, setFileProgress] = useState(0);\n  const { activeUser, apiKeys } = useUser();\n  const { setLocalModalLoading } = useSysSettings();\n  const [downloadProgress, setDownloadProgress] =\n    useState<DownloadProgressData>({\n      message: \"\",\n      totalProgress: 0,\n    });\n  const {\n    setUserCollections,\n    setSelectedCollection,\n    setShowUpload,\n    setShowAddStore,\n    setFiles,\n    setProgressMessage,\n    progressMessage,\n    embeddingModels,\n    setEmbeddingModels,\n  } = useLibrary();\n\n  useEffect(() => {\n    const handleProgress = (\n      _: Electron.IpcRendererEvent,\n      message: string | OllamaProgressEvent | DownloadModelProgress\n    ) => {\n      if (\n        typeof message === \"object\" &&\n        \"type\" in message &&\n        message.type === \"progress\"\n      ) {\n        const {\n          message: progressMessage,\n          fileName,\n          fileProgress,\n          totalProgress,\n          ...rest\n        } = message.data;\n        setProgressMessage(progressMessage);\n        setDownloadProgress({\n          message: progressMessage,\n          totalProgress,\n          ...rest,\n        });\n        if (fileName) setCurrentFile(fileName);\n        if (typeof fileProgress === \"number\") setFileProgress(fileProgress);\n      }\n    };\n\n    window.electron.removeListener(\"download-model-progress\", handleProgress);\n    window.electron.on(\"download-model-progress\", handleProgress);\n\n    return () => {\n      window.electron.removeListener(\"download-model-progress\", handleProgress);\n    };\n  }, []);\n\n  const handleCancel = async () => {\n    try {\n      const result = await window.electron.cancelDownload();\n      if (result.success) {\n        toast({\n          title: \"Download cancelled\",\n          description: \"Model download was cancelled successfully\",\n        });\n      }\n    } catch (error) {\n      console.error(\"Error cancelling download:\", error);\n    } finally {\n      setIsDownloading(false);\n      setLocalModalLoading(false);\n      setDownloadProgress({ message: \"\", totalProgress: 0 });\n      setFileProgress(0);\n      setProgressMessage(\"\");\n      setCurrentFile(undefined);\n    }\n  };\n\n  const handleCreateCollection = async () => {\n    if (!activeUser) return;\n\n    const cleanedStore = await sanitizeStoreName(newStore);\n\n    if (!apiKeys.find((key) => key.provider === \"openai\")) {\n      setIsLocal(true);\n      setLocalEmbeddingModel(\n        \"HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\"\n      );\n    }\n\n    const newCollection = (await window.electron.createCollection(\n      activeUser.id,\n      cleanedStore,\n      newStoreDescription,\n      newStoreType,\n      isLocal,\n      localEmbeddingModel\n    )) as unknown as Collection;\n\n    if (newCollection.id === undefined) {\n      setNewStoreError(\"This name is already in use\");\n      return;\n    }\n\n    window.electron.updateUserSettings({\n      vectorstore: newCollection.id.toString(),\n    });\n\n    setUserCollections((prevCollections) => [\n      ...prevCollections,\n      newCollection,\n    ]);\n    setShowAddStore(false);\n    setFiles([]);\n    setNewStore(\"\");\n    setNewStoreDescription(\"\");\n    setNewStoreType(\"Notes\");\n    setSelectedCollection(newCollection);\n    setShowUpload(true);\n  };\n\n  return (\n    <div className=\"space-y-6\">\n      <div className=\"space-y-4\">\n        <div className=\"grid grid-cols-4 items-center gap-4\">\n          <div className=\"col-span-4\">\n            <div className=\"grid grid-cols-4 items-center gap-4 \">\n              <Button\n                variant=\"outline\"\n                size=\"icon\"\n                className=\"flex items-center gap-2\"\n                onClick={() => setShowAddStore(false)}\n              >\n                <ChevronLeft className=\"h-4 w-4\" />\n              </Button>\n              <Label\n                htmlFor=\"newStore\"\n                className=\"col-span-2 text-center text-lg\"\n              >\n                Create a new store\n              </Label>\n              {isLocal && (\n                <div className=\"col-span-1 flex items-center gap-2 justify-end\">\n                  <Button\n                    variant=\"outline\"\n                    size=\"icon\"\n                    type=\"button\"\n                    onClick={() =>\n                      setShowAdvancedSettings(!showAdvancedSettings)\n                    }\n                  >\n                    <Settings2 className=\"h-4 w-4\" />\n                  </Button>\n                </div>\n              )}\n            </div>\n          </div>\n        </div>\n        <div className=\"rounded-[6px] px-4 pb-4 bg-gradient-to-br from-secondary/50 via-secondary/30 to-background border\">\n          <div className=\"rounded-[6px] p-4\">\n            <div className=\"grid grid-cols-4 items-center gap-4\">\n              <Label htmlFor=\"newStore\" className=\"text-right\">\n                Store Name\n              </Label>\n              <div className=\"col-span-3\">\n                {newStoreError && (\n                  <p className=\"text-destructive text-sm mb-2\">\n                    {newStoreError}\n                  </p>\n                )}\n                <Textarea\n                  id=\"newStore\"\n                  placeholder=\"Enter store name\"\n                  value={newStore}\n                  onChange={(e) => {\n                    setNewStore(e.target.value);\n                    setNewStoreError(null);\n                  }}\n                  className=\"resize-none bg-background\"\n                />\n              </div>\n            </div>\n\n            <div className=\"grid grid-cols-4 items-center gap-4 mt-4\">\n              <Label htmlFor=\"description\" className=\"text-right\">\n                Description\n              </Label>\n              <div className=\"col-span-3\">\n                <Textarea\n                  id=\"description\"\n                  placeholder=\"Enter store description (optional)\"\n                  value={newStoreDescription}\n                  onChange={(e) => setNewStoreDescription(e.target.value)}\n                  className=\"resize-none bg-background\"\n                />\n              </div>\n            </div>\n          </div>\n\n          <div className=\"space-y-4\">\n            <div className=\"w-full\">\n              <div className=\"\">\n                <div className=\"flex gap-4\">\n                  <TooltipProvider>\n                    <Tooltip>\n                      <TooltipTrigger asChild>\n                        <div>\n                          <Button\n                            disabled={\n                              apiKeys.find(\n                                (key) => key.provider === \"openai\"\n                              ) === undefined\n                            }\n                            type=\"button\"\n                            variant={isLocal ? \"outline\" : \"secondary\"}\n                            className=\"flex-1 sm:text-[14px] text-[10px]\"\n                            onClick={() => setIsLocal(false)}\n                          >\n                            <Cloud className=\"h-4 w-4 mr-2\" />\n                            Open AI Embeddings\n                          </Button>\n                        </div>\n                      </TooltipTrigger>\n                      <TooltipContent>\n                        <p>\n                          You must have an OpenAI API key to use this feature.\n                        </p>\n                      </TooltipContent>\n                    </Tooltip>\n                  </TooltipProvider>\n                  <Button\n                    type=\"button\"\n                    variant={isLocal ? \"secondary\" : \"outline\"}\n                    className=\"flex-1 sm:text-[14px] text-[10px]\"\n                    onClick={() => setIsLocal(true)}\n                  >\n                    <Database className=\"h-4 w-4 mr-2\" />\n                    Local Embeddings\n                  </Button>\n                </div>\n              </div>\n            </div>\n\n            {isLocal && showAdvancedSettings && (\n              <>\n                <div className=\"grid grid-cols-4 items-center gap-4\">\n                  <Label htmlFor=\"localEmbeddingModel\" className=\"text-right\">\n                    Embeddings\n                  </Label>\n                  <div className=\"col-span-3\">\n                    <Select\n                      value={localEmbeddingModel}\n                      onValueChange={(value) => {\n                        setLocalEmbeddingModel(value);\n                        setShowCustomInput(value === \"custom\");\n                      }}\n                    >\n                      <SelectTrigger\n                        id=\"localEmbeddingModel\"\n                        className=\"bg-background\"\n                      >\n                        <SelectValue placeholder=\"Select embedding model\" />\n                      </SelectTrigger>\n                      <SelectContent className=\"bg-background\">\n                        <SelectItem value=\"HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\">\n                          Default:\n                          HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\n                        </SelectItem>\n                        {embeddingModels\n                          .filter(\n                            (model) =>\n                              model.name !==\n                              \"HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\"\n                          )\n                          .map((model) => (\n                            <SelectItem key={model.name} value={model.name}>\n                              {model.name}\n                            </SelectItem>\n                          ))}\n                        <SelectItem value=\"custom\">\n                          Add Hugging Face Model\n                        </SelectItem>\n                      </SelectContent>\n                    </Select>\n                  </div>\n                </div>\n\n                {showCustomInput && (\n                  <div className=\"space-y-4\">\n                    <div className=\"\">\n                      <Input\n                        id=\"customModel\"\n                        placeholder=\"Enter Model Repo eg: 'HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5'\"\n                        value={customModel}\n                        onChange={(e) => setCustomModel(e.target.value)}\n                        className=\"resize-none\"\n                      />\n                    </div>\n                    <div className=\"col-span-1\">\n                      <Button\n                        type=\"button\"\n                        disabled={isDownloading}\n                        onClick={async () => {\n                          if (customModel.trim()) {\n                            setIsDownloading(true);\n                            try {\n                              const modelsPath =\n                                await window.electron.getModelsPath();\n                              await window.electron.downloadModel({\n                                modelId: customModel.trim(),\n                                dirPath: modelsPath + \"/\" + customModel.trim(),\n                              });\n                              await fetchEmbeddingModels(setEmbeddingModels);\n                              setLocalEmbeddingModel(customModel.trim());\n                              setShowCustomInput(false);\n                            } catch (error) {\n                              console.error(\"Error downloading model:\", error);\n                              // You might want to show an error message to the user here\n                            } finally {\n                              setIsDownloading(false);\n                            }\n                          }\n                        }}\n                        className=\"w-full\"\n                      >\n                        {isDownloading ? \"Downloading...\" : \"Download Model\"}\n                      </Button>\n                    </div>\n                  </div>\n                )}\n              </>\n            )}\n            {progressMessage && (\n              <div className=\"mt-4 p-4 rounded-md border bg-background\">\n                <div className=\"space-y-2\">\n                  <div className=\"flex items-center justify-between\">\n                    <p className=\"text-sm text-secondary-foreground\">\n                      {progressMessage}\n                    </p>\n                    {isDownloading && (\n                      <Button\n                        variant=\"destructive\"\n                        size=\"sm\"\n                        onClick={handleCancel}\n                        className=\"h-6 px-2\"\n                      >\n                        <X className=\"h-4 w-4\" />\n                      </Button>\n                    )}\n                  </div>\n                  {currentFile && (\n                    <div className=\"space-y-1\">\n                      <div className=\"flex justify-between items-center\">\n                        <p className=\"text-xs text-muted-foreground truncate flex-1\">\n                          {currentFile}\n                        </p>\n                        <p className=\"text-xs text-muted-foreground ml-2\">\n                          {fileProgress}%\n                        </p>\n                      </div>\n                      <Progress value={fileProgress} className=\"h-1\" />\n                      <div className=\"flex justify-between text-xs text-muted-foreground\">\n                        <span>\n                          {downloadProgress.currentSize || \"0 B\"} /{\" \"}\n                          {downloadProgress.totalSize || \"0 B\"}\n                        </span>\n                        {downloadProgress.speed && (\n                          <span>{downloadProgress.speed}</span>\n                        )}\n                      </div>\n                    </div>\n                  )}\n                  <div className=\"space-y-1\">\n                    <div className=\"flex justify-between text-xs text-muted-foreground\">\n                      <span>Total Progress</span>\n                      <span>{downloadProgress.totalProgress}%</span>\n                    </div>\n                    <Progress\n                      value={downloadProgress.totalProgress}\n                      className=\"h-1\"\n                    />\n                  </div>\n                </div>\n              </div>\n            )}\n            <div className=\"grid grid-cols-4 items-center gap-4 \">\n              <Label htmlFor=\"storeType\" className=\"text-right \">\n                Store Type\n              </Label>\n              <div className=\"col-span-3 bg-background\">\n                <Select\n                  value={newStoreType}\n                  onValueChange={(value) => setNewStoreType(value)}\n                >\n                  <SelectTrigger id=\"storeType\">\n                    <SelectValue placeholder=\"Select store type\" />\n                  </SelectTrigger>\n                  <SelectContent>\n                    <SelectItem value=\"Notes\">Notes</SelectItem>\n                    <SelectItem value=\"Chats\">Chats</SelectItem>\n                  </SelectContent>\n                </Select>\n              </div>\n            </div>\n          </div>\n        </div>\n      </div>\n      <div className=\"flex justify-between gap-4 pt-4 border-t\">\n        <Button\n          type=\"button\"\n          onClick={() => setShowAddStore(false)}\n          className=\"w-32 text-red-900\"\n          variant=\"outline\"\n        >\n          Cancel\n        </Button>\n        <Button\n          disabled={isDownloading || !newStore}\n          type=\"button\"\n          variant=\"secondary\"\n          onClick={handleCreateCollection}\n          className=\"w-32\"\n        >\n          Create Store\n        </Button>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/DataStoreSelect.tsx",
    "content": "import { Button } from \"@/components/ui/button\";\nimport {\n  Popover,\n  PopoverContent,\n  PopoverTrigger,\n} from \"@/components/ui/popover\";\nimport { Check, ChevronDown, Plus } from \"lucide-react\";\nimport { cn } from \"@/lib/utils\";\nimport { useUser } from \"@/context/useUser\";\nimport { useCallback, useEffect, useState } from \"react\";\nimport {\n  Command,\n  CommandEmpty,\n  CommandGroup,\n  CommandInput,\n  CommandItem,\n  CommandList,\n  CommandSeparator,\n} from \"@/components/ui/command\";\nimport { toast } from \"@/hooks/use-toast\";\nimport { useLibrary } from \"@/context/useLibrary\";\n\nexport default function DataStoreSelect() {\n  const [open, setOpen] = useState(false);\n  const [value, setValue] = useState(\"\");\n  const { activeUser } = useUser();\n  const {\n    userCollections,\n    selectedCollection,\n    setShowAddStore,\n    setSelectedCollection,\n    setShowUpload,\n    setFiles,\n  } = useLibrary();\n\n  const handleSelectCollection = async (collection: Collection) => {\n    if (!activeUser) return;\n    await window.electron.updateUserSettings({\n      userId: activeUser.id,\n      vectorstore: collection.id.toString(),\n    });\n    setSelectedCollection(collection);\n    await loadFiles();\n    setOpen(false);\n    setShowUpload(true);\n    toast({\n      title: \"Collection selected\",\n      description: `Selected collection: ${collection.name}`,\n    });\n  };\n\n  const loadFiles = useCallback(async () => {\n    if (!activeUser?.id || !activeUser?.name || !selectedCollection?.id) return;\n    const fileList = await window.electron.getFilesInCollection(\n      activeUser.id,\n      selectedCollection.id\n    );\n    setFiles(fileList.files as unknown as string[]);\n  }, [activeUser?.id, selectedCollection?.id, activeUser?.name, setFiles]);\n\n  useEffect(() => {\n    loadFiles();\n  }, [selectedCollection, loadFiles]);\n\n  return (\n    <div className=\"space-y-4\">\n      <div className=\"flex flex-col space-y-2\">\n        <Popover open={open} onOpenChange={setOpen}>\n          <PopoverTrigger asChild>\n            <Button\n              variant=\"outline\"\n              role=\"combobox\"\n              aria-expanded={open}\n              className=\"w-full justify-between bg-background\"\n            >\n              <span\n                className={cn(\"truncate\", !value && \"text-muted-foreground\")}\n              >\n                {selectedCollection?.name || \"Select Data Store\"}\n              </span>\n              <ChevronDown className=\"h-4 w-4 shrink-0 opacity-50\" />\n            </Button>\n          </PopoverTrigger>\n          <PopoverContent className=\"w-full p-0\" align=\"start\">\n            <Command>\n              <CommandInput placeholder=\"Search stores...\" />\n              <CommandList>\n                <CommandEmpty>No stores found.</CommandEmpty>\n                <CommandGroup>\n                  <Button\n                    variant=\"ghost\"\n                    className=\"w-full justify-start font-normal\"\n                    onClick={() => {\n                      setShowAddStore(true);\n                      setOpen(false);\n                    }}\n                  >\n                    <Plus className=\"h-4 w-4 mr-2\" />\n                    New Data Store\n                  </Button>\n                </CommandGroup>\n                <CommandSeparator />\n                <CommandGroup>\n                  <CommandItem\n                    onSelect={() => {\n                      handleSelectCollection({\n                        id: 0,\n                        name: \"No Store / Just Chat\",\n                        description: \"\",\n                        type: \"Chat\",\n                        files: \"\",\n                        userId: activeUser?.id || 0,\n                      });\n                    }}\n                  >\n                    No Store / Just Chat\n                  </CommandItem>\n                </CommandGroup>\n                <CommandGroup>\n                  {userCollections.map((store) => (\n                    <CommandItem\n                      key={store.id}\n                      value={store.name}\n                      onSelect={(currentValue) => {\n                        setValue(currentValue === value ? \"\" : currentValue);\n                        handleSelectCollection(store);\n                        setOpen(false);\n                        setShowAddStore(false);\n                      }}\n                    >\n                      {store.name}\n                      {value === store.name && (\n                        <Check className=\"ml-auto h-4 w-4\" />\n                      )}\n                    </CommandItem>\n                  ))}\n                </CommandGroup>\n              </CommandList>\n            </Command>\n          </PopoverContent>\n        </Popover>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/FIlesInCollection.tsx",
    "content": "import { Button } from \"@/components/ui/button\";\nimport {\n  File,\n  Library,\n  Globe,\n  Youtube,\n  FileText,\n  ChevronDown,\n} from \"lucide-react\";\n\nimport { processFiles } from \"@/lib/utils\";\nimport { useLibrary } from \"@/context/useLibrary\";\nimport { cn } from \"@/lib/utils\";\n\nconst truncateFileName = (fileName: string) => {\n  if (fileName.length <= 30) return fileName;\n  const start = fileName.slice(0, 13);\n  const end = fileName.slice(-14);\n  return `${start}...${end}`;\n};\n\nexport function FilesInCollection() {\n  const { files, fileExpanded, setFileExpanded } = useLibrary();\n  const filesList = processFiles(files);\n\n  return (\n    <div className=\"rounded-[6px] p-4 bg-gradient-to-br from-secondary/50 via-secondary/30 to-background border\">\n      <div className=\"flex items-center justify-between mb-4\">\n        <div className=\"flex items-center gap-2\">\n          <div className=\"h-2 w-2 rounded-full bg-green-500 animate-pulse\" />\n          <h3 className=\"text-sm font-medium flex items-center gap-2\">\n            <Library className=\"h-4 w-4\" />\n            Files in collection ({filesList.length})\n          </h3>\n        </div>\n        <Button\n          variant=\"ghost\"\n          size=\"sm\"\n          className=\"h-8\"\n          onClick={() => setFileExpanded(!fileExpanded)}\n        >\n          <ChevronDown className={cn(\"h-4 w-4 transition-transform\", fileExpanded ? \"rotate-180\" : \"\")} />\n          <span className=\"ml-2\">{fileExpanded ? \"Hide\" : \"Show\"}</span>\n        </Button>\n      </div>\n      <div\n        className={cn(\n          \"overflow-hidden transition-all duration-200\",\n          fileExpanded ? \"max-h-48 overflow-y-auto\" : \"max-h-0\"\n        )}\n      >\n        {filesList.length > 0 ? (\n          <ul className=\"space-y-1\">\n            {filesList.map((file, index) => {\n              const isUrl = file.startsWith(\"http\");\n              const isYoutube =\n                file.includes(\"youtube.com\") || file.includes(\"youtu.be\");\n              const fileExtension = !isUrl\n                ? file.split(\".\").pop()?.toLowerCase()\n                : null;\n\n              let icon = <File className=\"h-4 w-4 mr-2 flex-shrink-0\" />;\n              if (isYoutube) {\n                icon = (\n                  <Youtube className=\"h-4 w-4 mr-2 flex-shrink-0 text-red-500\" />\n                );\n              } else if (isUrl) {\n                icon = (\n                  <Globe className=\"h-4 w-4 mr-2 flex-shrink-0 text-blue-500\" />\n                );\n              } else if (fileExtension === \"md\") {\n                icon = (\n                  <FileText className=\"h-4 w-4 mr-2 flex-shrink-0 text-purple-500\" />\n                );\n              } else if (fileExtension === \"py\") {\n                icon = (\n                  <File className=\"h-4 w-4 mr-2 flex-shrink-0 text-yellow-500\" />\n                );\n              } else if (fileExtension === \"txt\") {\n                icon = (\n                  <FileText className=\"h-4 w-4 mr-2 flex-shrink-0 text-gray-500\" />\n                );\n              }\n\n              return (\n                <li\n                  key={index}\n                  className=\"flex items-center text-sm text-muted-foreground py-1.5 px-2 rounded-[4px] hover:bg-secondary/80 group\"\n                >\n                  {icon}\n                  <span className=\"truncate group-hover:text-clip\">\n                    {isUrl ? (\n                      <a\n                        href={file}\n                        target=\"_blank\"\n                        rel=\"noopener noreferrer\"\n                        className=\"text-blue-500 hover:underline truncate\"\n                        title={file}\n                      >\n                        {truncateFileName(file)}\n                      </a>\n                    ) : (\n                      <span title={file}>{truncateFileName(file)}</span>\n                    )}\n                  </span>\n                </li>\n              );\n            })}\n          </ul>\n        ) : (\n          <p className=\"text-sm text-muted-foreground italic\">\n            No files in collection\n          </p>\n        )}\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/Ingest.tsx",
    "content": "import { Tabs, TabsList, TabsTrigger, TabsContent } from \"@/components/ui/tabs\";\nimport { Button } from \"@/components/ui/button\";\nimport { ArrowLeft, Trash2, Upload } from \"lucide-react\";\nimport { useEffect } from \"react\";\nimport { useLibrary } from \"@/context/useLibrary\";\nimport { FilesInCollection } from \"./FIlesInCollection\";\nimport { FileTab } from \"./IngestTabs/FileIngestTab\";\nimport { LinkIngestTab } from \"./IngestTabs/LinkIngestTab\";\nimport { comingSoonFileTypes, implementedFileTypes } from \"./ingestTypes\";\n\nexport default function IngestModal({\n  setShowUpload,\n}: {\n  setShowUpload?: (showUpload: boolean) => void;\n}) {\n  const {\n    selectedCollection,\n    openAddToCollection,\n    loadFiles,\n    handleDeleteCollection,\n  } = useLibrary();\n\n  useEffect(() => {\n    if (openAddToCollection) {\n      loadFiles();\n    }\n  }, [openAddToCollection, loadFiles]);\n\n  return (\n    <div className=\"space-y-6\">\n      <Tabs defaultValue=\"upload\" className=\"w-full space-y-6\">\n        <div className=\"space-y-4\">\n          <div className=\"flex items-center justify-between px-2\">\n            <div className=\"flex items-center gap-2\">\n              {setShowUpload && (\n                <Button\n                  variant=\"ghost\"\n                  size=\"icon\"\n                  onClick={() => setShowUpload(false)}\n                >\n                  <ArrowLeft className=\"h-4 w-4\" />\n                </Button>\n              )}\n              <div className=\"flex items-center gap-2\">\n                <span className=\"text-sm text-muted-foreground\">\n                  Collection:\n                </span>\n                <span className=\"text-sm font-medium border border-primary/20 rounded-[6px] px-2 py-1 bg-primary/10 text-primary break-all\">\n                  {selectedCollection?.name}\n                </span>\n              </div>\n            </div>\n            <Button\n              variant=\"ghost\"\n              size=\"icon\"\n              className=\"text-destructive hover:text-destructive hover:bg-destructive/10\"\n              onClick={handleDeleteCollection}\n            >\n              <Trash2 className=\"h-4 w-4\" />\n            </Button>\n          </div>\n\n          <div className=\"rounded-[6px] p-4 bg-gradient-to-br from-secondary/50 via-secondary/30 to-background border\">\n            <div className=\"text-center space-y-2\">\n              <h3 className=\"text-lg font-semibold\">Supported File Types</h3>\n              <div className=\"flex flex-wrap justify-center gap-2\">\n                {implementedFileTypes.map((ext) => (\n                  <span\n                    key={ext}\n                    className=\"inline-flex items-center px-3 py-1 rounded-full text-xs font-medium bg-primary/10 text-primary\"\n                  >\n                    {ext}\n                  </span>\n                ))}\n                {comingSoonFileTypes.map((ext) => (\n                  <span\n                    key={ext}\n                    className=\"inline-flex items-center px-3 py-1 rounded-full text-xs font-medium bg-muted text-muted-foreground\"\n                  >\n                    {ext} <span className=\"ml-1 text-[10px]\">(Soon)</span>\n                  </span>\n                ))}\n              </div>\n            </div>\n          </div>\n        </div>\n\n        <TabsList className=\"grid w-full grid-cols-2 h-10 p-1 bg-muted rounded-[10px]\">\n          <TabsTrigger\n            value=\"upload\"\n            className=\"rounded-l-[6px] data-[state=active]:bg-background data-[state=active]:text-foreground data-[state=active]:shadow-sm\"\n          >\n            <Upload className=\"h-4 w-4 mr-2\" />\n            Ingest Files\n          </TabsTrigger>\n          <TabsTrigger\n            value=\"link\"\n            className=\"rounded-r-[6px] data-[state=active]:bg-background data-[state=active]:text-foreground data-[state=active]:shadow-sm\"\n          >\n            <span className=\"mr-2\">🔗</span>\n            Ingest From Link\n          </TabsTrigger>\n        </TabsList>\n\n        <TabsContent value=\"upload\" className=\"space-y-4\">\n          <FileTab />\n        </TabsContent>\n        <TabsContent value=\"link\" className=\"space-y-4\">\n          <LinkIngestTab />\n        </TabsContent>\n      </Tabs>\n      <FilesInCollection />\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/IngestProgress.tsx",
    "content": "import { Button } from \"@/components/ui/button\";\nimport { Progress } from \"@/components/ui/progress\";\nimport { useUser } from \"@/context/useUser\";\nimport { useLibrary } from \"@/context/useLibrary\";\nimport { useEffect, useState } from \"react\";\n\nexport function IngestProgress({ truncate }: { truncate?: boolean }) {\n  const { activeUser } = useUser();\n  const {\n    progressMessage,\n    progress,\n    handleCancelEmbed,\n    showProgress,\n    setShowProgress,\n  } = useLibrary();\n  const [localMessage, setLocalMessage] = useState(progressMessage);\n  const [localProgress, setLocalProgress] = useState(progress);\n  const handleCancelWebcrawl = async () => {\n    if (activeUser) {\n      const result = await window.electron.cancelWebcrawl(activeUser.id);\n      if (result.result) {\n        setShowProgress(false);\n      }\n    }\n  };\n  useEffect(() => {\n    if (showProgress) {\n      setLocalMessage(progressMessage);\n      setLocalProgress(progress);\n    }\n  }, [progressMessage, progress, showProgress]);\n\n  if (!showProgress) {\n    return null;\n  }\n\n  return (\n    <div className=\"w-full\">\n      <div className={`rounded-[10px] shadow-lg p-1`}>\n        <div className=\"flex items-center gap-2 w-full\">\n          <div className=\"flex-grow min-w-0\">\n            <p\n              className={`${\n                truncate ? \"text-[8px] md:text-xs\" : \"text-xs\"\n              } text-secondary-foreground mb-1 break-all`}\n            >\n              {truncate ? localMessage.slice(0, 60) + \"...\" : localMessage}\n            </p>\n            <Progress value={localProgress} className=\"h-1\" />\n          </div>\n          <Button\n            type=\"button\"\n            variant=\"destructive\"\n            size=\"sm\"\n            onClick={() => {\n              handleCancelWebcrawl();\n              handleCancelEmbed();\n              setShowProgress(false);\n            }}\n            className=\"p-1 h-6\"\n            title=\"Cancel\"\n          >\n            ✕\n          </Button>\n        </div>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/IngestTabs/FileIngestTab.tsx",
    "content": "import { Button } from \"@/components/ui/button\";\nimport { Upload, Loader2 } from \"lucide-react\";\nimport { useCallback } from \"react\";\nimport { useDropzone } from \"react-dropzone\";\nimport { IngestProgress } from \"../IngestProgress\";\nimport { useLibrary } from \"@/context/useLibrary\";\n\nexport function FileTab() {\n  const {\n    setSelectedFile,\n    selectedFile,\n    handleUpload,\n    ingesting,\n  } = useLibrary();\n\n  const onDrop = useCallback((acceptedFiles: File[]) => {\n    if (acceptedFiles.length > 0) {\n      setSelectedFile(acceptedFiles[0]);\n    }\n  }, [setSelectedFile]);\n\n  const { getRootProps, getInputProps, isDragActive } = useDropzone({\n    onDrop,\n  });\n\n  return (\n    <>\n      <div\n        {...getRootProps()}\n        className={`border-2 border-dashed rounded-[10px] p-8 text-center cursor-pointer transition-colors ${\n          isDragActive\n            ? \"border-primary bg-primary/5\"\n            : \"border-muted-foreground/20 hover:border-primary/50\"\n        }`}\n      >\n        <input {...getInputProps()} />\n        <Upload className=\"mx-auto h-12 w-12 text-muted-foreground\" />\n        <p className=\"mt-2 text-sm text-muted-foreground\">\n          Drag 'n' drop a file here, or click to select a file\n        </p>\n      </div>\n      {selectedFile && (\n        <p className=\"text-sm text-muted-foreground\">\n          Selected: {selectedFile.name}\n        </p>\n      )}\n      <IngestProgress />\n      <Button\n        onClick={() => {\n          if (selectedFile) {\n            const reader = new FileReader();\n            reader.onload = async (e: ProgressEvent<FileReader>) => {\n              const content = e.target?.result;\n              if (content instanceof ArrayBuffer) {\n                const uint8Array = new Uint8Array(content);\n                const binaryString = uint8Array.reduce((data, byte) => \n                  data + String.fromCharCode(byte), '');\n                const base64Content = btoa(binaryString);\n                handleUpload(base64Content);\n              }\n            };\n            reader.readAsArrayBuffer(selectedFile);\n          }\n        }}\n        disabled={!selectedFile || ingesting}\n        className=\"w-full\"\n      >\n        <Upload className=\"mr-2 h-4 w-4\" />\n        {ingesting ? (\n          <span className=\"inline-flex items-center\">\n            <span className=\"animate-spin h-4 w-4 mr-2\">\n              <Loader2 className=\"h-4 w-4\" />\n            </span>\n            Uploading File...\n          </span>\n        ) : (\n          \"Upload File\"\n        )}\n      </Button>\n    </>\n  );\n} "
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/IngestTabs/LinkIngestTab.tsx",
    "content": "import { Button } from \"@/components/ui/button\";\nimport { Input } from \"@/components/ui/input\";\nimport { Upload, Loader2 } from \"lucide-react\";\nimport { useUser } from \"@/context/useUser\";\nimport { toast } from \"@/hooks/use-toast\";\nimport { IngestProgress } from \"../IngestProgress\";\nimport { implementedLinkTypes } from \"../ingestTypes\";\nimport { useLibrary } from \"@/context/useLibrary\";\n\nexport function LinkIngestTab() {\n  const { activeUser } = useUser();\n  const {\n    selectedCollection,\n    loadFiles,\n    setProgressMessage,\n    setProgress,\n    setShowProgress,\n    setIngesting,\n    ingesting,\n    setSelectedLinkType,\n    selectedLinkType,\n    link,\n    setLink,\n  } = useLibrary();\n\n  const handleSubmit = async () => {\n    setIngesting(true);\n    if (!selectedLinkType) {\n      toast({\n        title: \"Error\",\n        description: \"Please select a link type\",\n        variant: \"destructive\",\n      });\n      return;\n    }\n    if (selectedLinkType === \"crawl\" || selectedLinkType === \"documentation\") {\n      if (!activeUser?.id || !selectedCollection?.id || !link) {\n        toast({\n          title: \"Error\",\n          description: \"Missing required information\",\n          variant: \"destructive\",\n        });\n        return;\n      }\n      try {\n        setShowProgress(true);\n        setProgress(0);\n        setProgressMessage(\"Starting web crawl...\");\n\n        const result = await window.electron.webcrawl({\n          base_url: link,\n          user_id: activeUser.id,\n          user_name: activeUser.name,\n          collection_id: selectedCollection.id,\n          collection_name: selectedCollection.name,\n          max_workers: 1,\n        });\n\n        if (result) {\n          toast({\n            title: \"Success\",\n            description: \"Web crawl completed\",\n          });\n\n          setLink(\"\");\n          setSelectedLinkType(null);\n          loadFiles();\n\n          setTimeout(() => {\n            setShowProgress(false);\n            setProgress(0);\n            setProgressMessage(\"\");\n            setIngesting(false);\n          }, 2000);\n        }\n      } catch (error) {\n        console.error(\"Error crawling website:\", error);\n        toast({\n          title: \"Error\",\n          description:\n            error instanceof Error ? error.message : \"Failed to crawl website\",\n          variant: \"destructive\",\n        });\n      }\n    } else if (selectedLinkType === \"youtube\") {\n      if (!activeUser?.id || !selectedCollection?.id || !link) {\n        toast({\n          title: \"Error\",\n          description: \"Missing required information\",\n          variant: \"destructive\",\n        });\n        return;\n      }\n\n      try {\n        setProgressMessage(\"Starting YouTube video processing...\");\n        setProgress(0);\n        setShowProgress(true);\n\n        const result = await window.electron.youtubeIngest(\n          link,\n          activeUser.id,\n          activeUser.name,\n          selectedCollection.id,\n          selectedCollection.name\n        );\n\n        if (result) {\n          setProgressMessage(\"YouTube video processed successfully!\");\n          setProgress(100);\n          toast({\n            title: \"Success\",\n            description: \"YouTube video processed successfully\",\n          });\n\n          setLink(\"\");\n          setSelectedLinkType(null);\n          loadFiles();\n          setTimeout(() => {\n            setShowProgress(false);\n            setProgress(0);\n            setProgressMessage(\"\");\n            setIngesting(false);\n          }, 2000);\n        }\n      } catch (error) {\n        console.error(\"Error processing YouTube video:\", error);\n        toast({\n          title: \"Error\",\n          description:\n            error instanceof Error\n              ? error.message\n              : \"Failed to process YouTube video\",\n          variant: \"destructive\",\n        });\n        setIngesting(false);\n        setProgressMessage(\"\");\n        setProgress(0);\n        setShowProgress(false);\n      }\n    } else if (selectedLinkType === \"website\") {\n      if (!activeUser?.id || !selectedCollection?.id || !link) {\n        toast({\n          title: \"Error\",\n          description: \"Missing required information\",\n          variant: \"destructive\",\n        });\n        return;\n      }\n\n      try {\n        setProgressMessage(\"Starting website fetch...\");\n        setProgress(0);\n        setShowProgress(true);\n\n        const result = await window.electron.websiteFetch(\n          link,\n          activeUser.id,\n          activeUser.name,\n          selectedCollection.id,\n          selectedCollection.name\n        );\n\n        if (result.success) {\n          setProgressMessage(\"Website processed successfully!\");\n          setProgress(100);\n          toast({\n            title: \"Success\",\n            description: \"Website processed successfully\",\n          });\n          setLink(\"\");\n          setSelectedLinkType(null);\n          loadFiles();\n          setTimeout(() => {\n            setIngesting(false);\n            setShowProgress(false);\n            setProgress(0);\n            setProgressMessage(\"\");\n          }, 2000);\n        } else {\n          throw new Error(result.success || \"Failed to process website\");\n        }\n      } catch (error) {\n        console.error(\"Error processing website:\", error);\n        toast({\n          title: \"Error\",\n          description:\n            error instanceof Error\n              ? error.message\n              : \"Failed to process website\",\n          variant: \"destructive\",\n        });\n        setProgressMessage(\"\");\n        setProgress(0);\n        setShowProgress(false);\n        setIngesting(false);\n      }\n    }\n  };\n\n  return (\n    <div className=\"space-y-4\">\n      <div className=\"grid grid-cols-2 gap-2\">\n        {implementedLinkTypes.map((type) => (\n          <Button\n            key={type.value}\n            variant={selectedLinkType === type.value ? \"secondary\" : \"outline\"}\n            onClick={() =>\n              setSelectedLinkType(type.value as \"website\" | \"youtube\" | \"crawl\")\n            }\n            className=\"flex items-center justify-start space-x-2 h-12\"\n          >\n            <span className=\"text-lg\">{type.icon}</span>\n            <div className=\"text-left\">\n              <p className=\"font-medium\">{type.name}</p>\n              <p className=\"text-xs text-muted-foreground\">{type.description}</p>\n            </div>\n          </Button>\n        ))}\n      </div>\n\n      {selectedLinkType && (\n        <div className=\"space-y-2\">\n          <Input\n            placeholder={`Enter ${selectedLinkType} URL...`}\n            value={link}\n            onChange={(e) => setLink(e.target.value)}\n            className=\"h-10\"\n          />\n          <Button\n            onClick={handleSubmit}\n            disabled={!link || ingesting}\n            className=\"w-full\"\n          >\n            <Upload className=\"mr-2 h-4 w-4\" />\n\n            {selectedLinkType === \"youtube\" ? (\n              <>\n                {ingesting ? (\n                  <span className=\"inline-flex items-center\">\n                    <span className=\"animate-spin h-4 w-4 mr-2\">\n                      <Loader2 className=\"h-4 w-4\" />\n                    </span>\n                    Ingesting Video...\n                  </span>\n                ) : (\n                  \"Ingest Video\"\n                )}\n              </>\n            ) : selectedLinkType === \"documentation\" ? (\n              <>\n                {ingesting ? (\n                  <span className=\"inline-flex items-center\">\n                    <span className=\"animate-spin h-4 w-4 mr-2\">\n                      <Loader2 className=\"h-4 w-4\" />\n                    </span>\n                    Ingesting Documentation...\n                  </span>\n                ) : (\n                  \"Ingest Documentation\"\n                )}\n              </>\n            ) : selectedLinkType === \"crawl\" ? (\n              <>\n                {ingesting ? (\n                  <span className=\"inline-flex items-center\">\n                    <span className=\"animate-spin h-4 w-4 mr-2\">\n                      <Loader2 className=\"h-4 w-4\" />\n                    </span>\n                    Crawling & Ingesting...\n                  </span>\n                ) : (\n                  \"Web Crawl & Ingest\"\n                )}\n              </>\n            ) : (\n              <>\n                {ingesting ? (\n                  <span className=\"inline-flex items-center\">\n                    <span className=\"animate-spin h-4 w-4 mr-2\">\n                      <Loader2 className=\"h-4 w-4\" />\n                    </span>\n                    Ingesting Page...\n                  </span>\n                ) : (\n                  \"Ingest Page\"\n                )}\n              </>\n            )}\n          </Button>\n        </div>\n      )}\n      <IngestProgress />\n    </div>\n  );\n} "
  },
  {
    "path": "Frontend/src/components/CollectionModals/CollectionComponents/ingestTypes.tsx",
    "content": "export const implementedFileTypes = [\n  \".md\",\n  \".html\",\n  \".json\",\n  \".py\",\n  \".txt\",\n  \".csv\",\n  \".pdf\",\n  \".docx\",\n] as const;\n\nexport const comingSoonFileTypes = [\".pptx\", \".xlsx\"] as const;\n\nexport const implementedLinkTypes = [\n  {\n    icon: \"🌐\",\n    name: \"Website\",\n    value: \"website\",\n    description: \"Single webpage\",\n  },\n  {\n    icon: \"🎥\",\n    name: \"YouTube\",\n    value: \"youtube\",\n    description: \"Video content\",\n  },\n  {\n    icon: \"🕷️\",\n    name: \"Web Crawl\",\n    value: \"crawl\",\n    description: \"Crawl websites\",\n  },\n  {\n    icon: \"📚\",\n    name: \"Documentation\",\n    value: \"documentation\",\n    description: \"Read the Docs\",\n  },\n] as const;\n"
  },
  {
    "path": "Frontend/src/components/CollectionModals/LibraryModal.tsx",
    "content": "import { useLibrary } from \"@/context/useLibrary\";\nimport IngestModal from \"./CollectionComponents/Ingest\";\nimport AddLibrary from \"./CollectionComponents/AddLibrary\";\nimport DataStoreSelect from \"./CollectionComponents/DataStoreSelect\";\n\nexport function LibraryModal() {\n  const { selectedCollection, showUpload, setShowUpload, showAddStore } =\n    useLibrary();\n\n  return (\n    <div className=\"space-y-8\">\n      <div className=\"space-y-6\">\n        {showUpload && selectedCollection?.id !== 0 ? (\n          <IngestModal setShowUpload={setShowUpload} />\n        ) : (\n          <div className=\"space-y-6\">\n            <div>\n              {!showAddStore && <DataStoreSelect />}\n              {showAddStore && <AddLibrary />}\n            </div>\n          </div>\n        )}\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/FileExplorer/FileExplorer.tsx",
    "content": "import {\n  FileIcon,\n  Trash2,\n  Edit2,\n  FolderOpen,\n  File,\n  Folder,\n  ChevronLeftCircle,\n} from \"lucide-react\";\nimport React, { useState, useEffect, useCallback } from \"react\";\nimport { useUser } from \"@/context/useUser\";\nimport { Button } from \"../ui/button\";\nimport {\n  Dialog,\n  DialogContent,\n  DialogHeader,\n  DialogTitle,\n  DialogFooter,\n  DialogDescription,\n} from \"@/components/ui/dialog\";\nimport { Input } from \"@/components/ui/input\";\nimport { useView } from \"@/context/useView\";\n\ninterface FileNode {\n  name: string;\n  type: \"file\" | \"folder\";\n  children?: FileNode[];\n  path?: string;\n}\n\ninterface FileItemProps {\n  node: FileNode;\n  depth: number;\n  onDelete: () => void;\n  onRename: () => void;\n  onReload: () => Promise<void>;\n  parentPath?: string;\n}\n\nconst FileItem: React.FC<FileItemProps> = ({\n  node,\n  depth,\n  onDelete,\n  onRename,\n  onReload,\n  parentPath,\n}) => {\n  const [isOpen, setIsOpen] = useState(false);\n  const [showRenameDialog, setShowRenameDialog] = useState(false);\n  const [newName, setNewName] = useState(node.name);\n  const paddingLeft = `${depth * 16}px`;\n  const { activeUser } = useUser();\n  const fullPath = parentPath\n    ? `${parentPath}/${node.name}`\n    : `${activeUser?.id}_${activeUser?.name}/${node.name}`;\n\n  const handleOpenFolder = () => {\n    if (!activeUser) return;\n    window.electron.openCollectionFolderFromFileExplorer(fullPath);\n  };\n\n  const handleRename = async () => {\n    if (!activeUser) return;\n    if (newName.trim() === \"\") return;\n\n    try {\n      const result = await window.electron.renameFile(\n        activeUser.id,\n        activeUser.name,\n        fullPath,\n        newName\n      );\n      if (result.success) {\n        await onReload(); // Ensure files are reloaded after rename\n        onRename();\n        setShowRenameDialog(false);\n      } else {\n        console.error(\"Failed to rename file\");\n        // You might want to show an error message to the user here\n      }\n    } catch (error) {\n      console.error(\"Error renaming file:\", error);\n      // Optionally add error handling UI here\n    }\n  };\n\n  const handleRemoveFile = async () => {\n    if (!activeUser) return;\n\n    const confirmed = window.confirm(\n      `Are you sure you want to delete \"${node.name}\"? This action cannot be undone.`\n    );\n\n    if (!confirmed) return;\n\n    try {\n      const result = await window.electron.removeFileorFolder(\n        activeUser.id,\n        activeUser.name,\n        fullPath\n      );\n\n      if (result.success) {\n        onDelete();\n      } else {\n        console.error(\"Failed to remove file or folder\");\n        // You might want to show an error message to the user here\n      }\n    } catch (error) {\n      console.error(\"Error removing file:\", error);\n      // You might want to show an error message to the user here\n    }\n  };\n\n  if (node.type === \"file\") {\n    return (\n      <>\n        <div\n          style={{ paddingLeft }}\n          className=\"flex items-center justify-between p-1.5 hover:bg-muted/50 transition-colors group\"\n        >\n          <div className=\"flex items-center\">\n            <File className=\"text-muted-foreground mr-2 text-sm\" />\n            <span className=\"text-sm text-foreground\">{node.name}</span>\n          </div>\n          <div className=\"flex items-center gap-2 opacity-0 group-hover:opacity-100 transition-opacity\">\n            {depth > 0 && (\n              <Button\n                variant=\"outline\"\n                size=\"icon\"\n                onClick={() => setShowRenameDialog(true)}\n              >\n                <Edit2 className=\"h-4 w-4 cursor-pointer hover:text-primary\" />\n              </Button>\n            )}\n            <Button variant=\"outline\" size=\"icon\" onClick={handleRemoveFile}>\n              <Trash2 className=\"h-4 w-4 cursor-pointer hover:text-destructive\" />\n            </Button>\n          </div>\n        </div>\n\n        <Dialog open={showRenameDialog} onOpenChange={setShowRenameDialog}>\n          <DialogContent>\n            <DialogDescription />\n            <DialogHeader>\n              <DialogTitle>Rename File</DialogTitle>\n            </DialogHeader>\n            <div className=\"py-4\">\n              <Input\n                value={newName}\n                onChange={(e) => setNewName(e.target.value)}\n                placeholder=\"Enter new name\"\n                autoFocus\n              />\n            </div>\n            <DialogFooter>\n              <Button\n                variant=\"outline\"\n                onClick={() => setShowRenameDialog(false)}\n              >\n                Cancel\n              </Button>\n              <Button onClick={handleRename}>Rename</Button>\n            </DialogFooter>\n          </DialogContent>\n        </Dialog>\n      </>\n    );\n  }\n\n  return (\n    <>\n      <div\n        style={{ paddingLeft }}\n        className=\"flex items-center justify-between p-1.5 hover:bg-muted/50 transition-colors cursor-pointer group\"\n      >\n        <div\n          className=\"flex items-center px-2\"\n          onClick={() => setIsOpen(!isOpen)}\n        >\n          {isOpen ? (\n            <FolderOpen className=\"text-primary mr-2 text-sm\" />\n          ) : (\n            <Folder className=\"text-primary mr-2 text-sm\" />\n          )}\n          <span className=\"text-sm text-foreground\">{node.name}</span>\n        </div>\n        <div className=\"flex items-center gap-2 opacity-0 group-hover:opacity-100 transition-opacity\">\n          <Button variant=\"outline\" size=\"icon\" onClick={handleOpenFolder}>\n            <FolderOpen className=\"h-4 w-4 cursor-pointer hover:text-primary\" />\n          </Button>\n          {depth > 0 && (\n            <Button\n              variant=\"outline\"\n              size=\"icon\"\n              onClick={() => setShowRenameDialog(true)}\n            >\n              <Edit2 className=\"h-4 w-4 cursor-pointer hover:text-primary\" />\n            </Button>\n          )}\n          <Button variant=\"outline\" size=\"icon\" onClick={handleRemoveFile}>\n            <Trash2 className=\"h-4 w-4 cursor-pointer hover:text-destructive\" />\n          </Button>\n        </div>\n      </div>\n\n      <Dialog open={showRenameDialog} onOpenChange={setShowRenameDialog}>\n        <DialogContent>\n          <DialogDescription />\n          <DialogHeader>\n            <DialogTitle>Rename Folder</DialogTitle>\n          </DialogHeader>\n          <div className=\"py-4\">\n            <Input\n              value={newName}\n              onChange={(e) => setNewName(e.target.value)}\n              placeholder=\"Enter new name\"\n              autoFocus\n            />\n          </div>\n          <DialogFooter>\n            <Button\n              variant=\"outline\"\n              onClick={() => setShowRenameDialog(false)}\n            >\n              Cancel\n            </Button>\n            <Button onClick={handleRename}>Rename</Button>\n          </DialogFooter>\n        </DialogContent>\n      </Dialog>\n\n      {isOpen &&\n        node.children?.map((child, index) => (\n          <FileItem\n            key={`${child.name}-${index}`}\n            node={child}\n            depth={depth + 1}\n            onDelete={onDelete}\n            onRename={onRename}\n            onReload={onReload}\n            parentPath={fullPath}\n          />\n        ))}\n    </>\n  );\n};\n\nfunction buildFileTree(files: string[]): FileNode[] {\n  const root: FileNode[] = [];\n\n  files.forEach((filePath) => {\n    const parts = filePath.split(\"/\");\n    let currentLevel = root;\n\n    parts.forEach((part, index) => {\n      const isLastPart = index === parts.length - 1;\n      const existingNode = currentLevel.find((node) => node.name === part);\n\n      if (existingNode) {\n        if (!isLastPart) {\n          currentLevel = existingNode.children || [];\n        }\n      } else {\n        const newNode: FileNode = {\n          name: part,\n          type: isLastPart ? \"file\" : \"folder\",\n          children: isLastPart ? undefined : [],\n        };\n        currentLevel.push(newNode);\n        if (!isLastPart) {\n          currentLevel = newNode.children!;\n        }\n      }\n    });\n  });\n\n  return root;\n}\n\nexport default function FileExplorer() {\n  const { activeUser } = useUser();\n  const [fileTree, setFileTree] = useState<FileNode[]>([]);\n  const [loading, setLoading] = useState(true);\n  const { setActiveView } = useView();\n\n  useEffect(() => {\n    if (!activeUser) {\n      setActiveView(\"SelectAccount\");\n    }\n  }, [activeUser, setActiveView]);\n\n  const loadFiles = useCallback(async () => {\n    if (!activeUser) {\n      return;\n    }\n    try {\n      const result = await window.electron.getUserCollectionFiles(\n        activeUser.id,\n        activeUser.name\n      );\n      setFileTree(buildFileTree(result.files));\n    } catch (error) {\n      console.error(\"Error loading files:\", error);\n    } finally {\n      setLoading(false);\n    }\n  }, [activeUser]);\n\n  useEffect(() => {\n    loadFiles();\n  }, [activeUser, loadFiles]);\n\n  return (\n    <div\n      className=\"pt-5 h-[calc(100vh-1rem)] flex flex-col history-view\"\n      data-testid=\"history-view\"\n    >\n      <div className=\"flex flex-col h-full overflow-hidden\">\n        <div className=\"p-2 bg-secondary/50 border-b border-secondary flex items-center justify-between\">\n          <div className=\"flex items-center\">\n            <FileIcon className=\"mr-2 h-6 w-6 text-primary\" />\n            <h1 className=\"text-2xl font-bold\">File Explorer</h1>\n          </div>\n          <Button variant=\"secondary\" onClick={() => setActiveView(\"Chat\")}>\n            <ChevronLeftCircle className=\"h-4 w-4 cursor-pointer hover:text-primary\" />\n            Back to Chat\n          </Button>\n        </div>\n        <div className=\"overflow-auto h-[calc(100%-2.5rem)] p-8\">\n          {loading ? (\n            <div className=\"flex items-center justify-center h-full\">\n              <span className=\"text-sm text-muted-foreground\">Loading...</span>\n            </div>\n          ) : fileTree.length > 0 ? (\n            fileTree.map((node, index) => (\n              <FileItem\n                key={`${node.name}-${index}`}\n                node={node}\n                depth={0}\n                onDelete={loadFiles}\n                onRename={loadFiles}\n                onReload={loadFiles}\n              />\n            ))\n          ) : (\n            <div className=\"flex flex-col items-center justify-center h-full\">\n              <div className=\"flex flex-col items-center justify-center h-full gap-4\">\n                <Button onClick={() => setActiveView(\"Chat\")}>\n                  <ChevronLeftCircle className=\"h-4 w-4 cursor-pointer hover:text-primary\" />\n                  Back to Chat\n                </Button>\n                <span className=\"text-sm text-muted-foreground\">\n                  No files found\n                </span>\n              </div>\n            </div>\n          )}\n        </div>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Header/Header.tsx",
    "content": "import { useUser } from \"@/context/useUser\";\nimport { useEffect } from \"react\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport SearchComponent from \"./HeaderComponents/Search\";\nimport SettingsDialog from \"./HeaderComponents/SettingsDialog\";\nimport WindowControls from \"./HeaderComponents/MainWindowControl\";\nimport WinLinuxControls from \"./HeaderComponents/WinLinuxControls\";\nimport { useChatInput } from \"@/context/useChatInput\";\nimport ToolsDialog from \"./HeaderComponents/ToolsDialog\";\nimport { Button } from \"../ui/button\";\nimport { PlusCircle } from \"lucide-react\";\nimport { useChatLogic } from \"@/hooks/useChatLogic\";\nexport function Header() {\n  const { isSearchOpen, searchTerm, conversations, setFilteredConversations } =\n    useUser();\n  const { handleResetChat } = useChatLogic();\n\n  const { platform, isMaximized, setIsMaximized } = useSysSettings();\n  const { input } = useChatInput();\n  useEffect(() => {\n    if (isSearchOpen) {\n      const filtered =\n        conversations\n          ?.filter(\n            (conv) =>\n              conv?.title\n                ?.toLowerCase?.()\n                ?.includes(searchTerm?.toLowerCase?.() ?? \"\") ?? false\n          )\n          ?.sort((a, b) => (b?.id ?? 0) - (a?.id ?? 0))\n          ?.slice(0, 10) ?? [];\n      setFilteredConversations(filtered);\n    }\n  }, [searchTerm, conversations, isSearchOpen, setFilteredConversations]);\n\n  // Update filtered conversations when input is cleared (new chat request)\n  useEffect(() => {\n    if (!input) {\n      const filtered =\n        conversations\n          ?.sort((a, b) => (b?.id ?? 0) - (a?.id ?? 0))\n          ?.slice(0, 10) ?? [];\n      setFilteredConversations(filtered);\n    }\n  }, [conversations, setFilteredConversations]);\n\n  const renderWindowControls = WindowControls({\n    isMaximized,\n    setIsMaximized,\n    platform,\n  });\n\n  return (\n    <header\n      className={`bg-secondary/50 grid grid-cols-3 items-center border-b border-secondary ${\n        platform !== \"darwin\" ? \"pr-0\" : \"\"\n      }`}\n    >\n      {/* Left column */}\n      <div className=\"flex items-center justify-between\">\n        {platform === \"darwin\" ? renderWindowControls : <WinLinuxControls />}\n      </div>\n      {/* Center column */}\n      <SearchComponent />\n      {/* Right column */}\n      <div className=\"flex items-center justify-end\">\n        <Button\n          variant=\"ghost\"\n          className=\"clickable-header-section text-xs rounded-none sm:w-auto h-9 w-9 sm:px-2\"\n          onClick={() => {\n            handleResetChat();\n          }}\n        >\n          <PlusCircle className=\"sm:mr-2\" />\n          <span className=\"hidden sm:block text-xs\">New Chat</span>\n        </Button>\n        <ToolsDialog />\n        <SettingsDialog />\n      </div>\n    </header>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Header/HeaderComponents/MainWindowControl.tsx",
    "content": "import { Minimize, Minus, X, Maximize2Icon } from \"lucide-react\";\nimport { useEffect, useState } from \"react\";\n\nconst MainWindowControl = ({\n  isMaximized,\n  setIsMaximized,\n  platform,\n}: {\n  isMaximized: boolean;\n  setIsMaximized: (isMaximized: boolean) => void;\n  platform: string | null;\n}) => {\n  const [isFocused, setIsFocused] = useState(true);\n\n  useEffect(() => {\n    const handleFocus = () => setIsFocused(true);\n    const handleBlur = () => setIsFocused(false);\n\n    window.addEventListener(\"focus\", handleFocus);\n    window.addEventListener(\"blur\", handleBlur);\n\n    return () => {\n      window.removeEventListener(\"focus\", handleFocus);\n      window.removeEventListener(\"blur\", handleBlur);\n    };\n  }, []);\n\n  if (platform === \"darwin\") {\n    return (\n      <div className={`flex pl-2 ${isFocused ? \"focus-within\" : \"\"}`}>\n        <div className=\"flex window-controls\">\n          <button\n            className=\"close header-button\"\n            id=\"close\"\n            onClick={() => window.electron.sendFrameAction(\"close\")}\n          >\n            <span className=\"hidden\">\n              <X className=\"m-auto text-black font-bold \" size={10} />\n            </span>\n          </button>\n          <button\n            className=\"minimize header-button\"\n            id=\"minimize\"\n            onClick={() => window.electron.sendFrameAction(\"minimize\")}\n          >\n            <span className=\"hidden\">\n              <Minus className=\"m-auto text-black font-bold\" size={8} />\n            </span>\n          </button>\n          <button\n            className={`${isMaximized ? \"restore\" : \"maximize\"} header-button`}\n            id={isMaximized ? \"unmaximize\" : \"maximize\"}\n            onClick={() => {\n              setIsMaximized(!isMaximized);\n              window.electron.sendFrameAction(\n                isMaximized ? \"unmaximize\" : \"maximize\"\n              );\n            }}\n          >\n            <span className=\"hidden\">\n              {isMaximized ? (\n                <Minimize className=\"m-auto text-black font-bold\" size={8} />\n              ) : (\n                <Maximize2Icon\n                  className=\"m-auto text-black font-bold\"\n                  size={8}\n                />\n              )}\n            </span>\n          </button>\n        </div>\n      </div>\n    );\n  } else {\n    return (\n      <div className=\"flex order-last items-center\">\n        <button\n          className=\"win-header-button group\"\n          onClick={() => window.electron.sendFrameAction(\"minimize\")}\n        >\n          <svg width=\"10\" height=\"10\" viewBox=\"0 0 10 1\">\n            <path d=\"M0 0h10v1H0z\" fill=\"currentColor\" />\n          </svg>\n        </button>\n        <button\n          className={`win-header-button group`}\n          onClick={() => {\n            setIsMaximized(!isMaximized);\n            window.electron.sendFrameAction(\n              isMaximized ? \"unmaximize\" : \"maximize\"\n            );\n          }}\n        >\n          {isMaximized ? (\n            <svg width=\"10\" height=\"10\" viewBox=\"0 0 10 10\">\n              <path d=\"M0 0v10h10V0H0zm1 1h8v8H1V1z\" fill=\"currentColor\" />\n            </svg>\n          ) : (\n            <svg width=\"10\" height=\"10\" viewBox=\"0 0 10 10\">\n              <path d=\"M0 0v10h10V0H0zm9 9H1V1h8v8z\" fill=\"currentColor\" />\n            </svg>\n          )}\n        </button>\n        <button\n          className=\"win-header-button win-close\"\n          onClick={() => window.electron.sendFrameAction(\"close\")}\n        >\n          <p className=\"leading-none text-[12px]\">&#x2715;</p>\n        </button>\n      </div>\n    );\n  }\n};\n\nexport default MainWindowControl;\n"
  },
  {
    "path": "Frontend/src/components/Header/HeaderComponents/Search.tsx",
    "content": "import { SearchIcon, Search } from \"lucide-react\";\nimport { useUser } from \"@/context/useUser\";\nimport { useEffect, useState, useCallback } from \"react\";\nimport { useView } from \"@/context/useView\";\n\ntype ConversationWithTimestamp = Conversation & {\n  latestMessageTime: number;\n};\n\nexport default function SearchComponent() {\n  const {\n    setActiveConversation,\n    filteredConversations,\n    isSearchOpen,\n    setIsSearchOpen,\n    searchTerm,\n    setSearchTerm,\n    searchRef,\n    conversations,\n    activeUser,\n    setMessages,\n  } = useUser();\n\n  const { activeView, setActiveView } = useView();\n  const [sortedConversations, setSortedConversations] = useState<ConversationWithTimestamp[]>([]);\n\n  const sortConversationsByLatestMessage = useCallback(async () => {\n    if (!activeUser?.id) return;\n\n    // Get messages for each conversation and find the latest timestamp\n    const conversationsWithTimestamp = await Promise.all(\n      conversations.map(async (conv) => {\n        // If we already have latestMessageTime, use it\n        if (\"latestMessageTime\" in conv) {\n          return conv as ConversationWithTimestamp;\n        }\n\n        const result = await window.electron.getConversationMessagesWithData(\n          activeUser.id,\n          conv.id,\n          undefined\n        );\n        const messages = result.messages || [];\n        const latestMessage = messages.reduce((latest, current) => {\n          if (!latest || !latest.timestamp) return current;\n          if (!current || !current.timestamp) return latest;\n          const currentTime = new Date(current.timestamp).getTime();\n          const latestTime = new Date(latest.timestamp).getTime();\n          return currentTime > latestTime ? current : latest;\n        }, messages[0]);\n\n        return {\n          ...conv,\n          latestMessageTime: latestMessage?.timestamp\n            ? new Date(latestMessage.timestamp).getTime()\n            : new Date(conv.created_at).getTime(),\n        };\n      })\n    );\n\n    // Sort conversations by latest message timestamp\n    const sorted = conversationsWithTimestamp.sort(\n      (a, b) => b.latestMessageTime - a.latestMessageTime\n    );\n    setSortedConversations(sorted);\n  }, [activeUser?.id, conversations]);\n\n  // Update sorted conversations whenever conversations change\n  useEffect(() => {\n    sortConversationsByLatestMessage();\n  }, [sortConversationsByLatestMessage]);\n\n  const toggleSearch = () => {\n    setIsSearchOpen(!isSearchOpen);\n    setSearchTerm(\"\");\n  };\n\n  const handleConversationClick = (conversationId: number) => {\n    setActiveConversation(conversationId);\n    if (activeView !== \"Chat\") {\n      setActiveView(\"Chat\");\n    }\n    setIsSearchOpen(false);\n    setSearchTerm(\"\");\n    const conversation = conversations.find(\n      (conv) => conv.id === conversationId\n    );\n    if (conversation && activeUser?.id) {\n      window.electron\n        .getConversationMessagesWithData(\n          activeUser.id,\n          conversationId,\n          undefined\n        )\n        .then((result) => {\n          setMessages(result.messages);\n        });\n    }\n  };\n\n  // Use sorted conversations when no search term, otherwise use filtered\n  const displayedConversations =\n    searchTerm.trim() === \"\" ? sortedConversations : filteredConversations;\n\n  return (\n    <div className=\"flex justify-center items-center\">\n      <div\n        ref={searchRef}\n        className=\"clickable-header-section outer-glow flex justify-center items-center border-2 box-shadow-inner rounded-[6px] w-[90%] rounded hover:bg-secondary/50 relative\"\n        onClick={toggleSearch}\n      >\n        {isSearchOpen ? (\n          <div className=\"relative w-full\">\n            <input\n              type=\"text\"\n              placeholder=\"Search...\"\n              className=\"px-2 w-full bg-transparent text-sm text-gray-300 outline-none\"\n              autoFocus\n              value={searchTerm}\n              onChange={(e) => setSearchTerm(e.target.value)}\n            />\n            <Search\n              size={14}\n              className=\"absolute right-2 top-1/2 transform -translate-y-1/2 text-gray-300\"\n            />\n            {isSearchOpen && (\n              <div className=\"absolute top-full left-0 w-full bg-secondary/90 rounded-b-[6px] max-h-60 overflow-x-hidden overflow-y-auto z-50 [&::-webkit-scrollbar]:w-2 [&::-webkit-scrollbar-track]:bg-gray-900 [&::-webkit-scrollbar-thumb]:bg-gray-700 [&::-webkit-scrollbar-thumb]:rounded\">\n                {displayedConversations.map((conv) => (\n                  <div\n                    key={conv.id}\n                    className=\"px-2 py-1 hover:bg-secondary/50 cursor-pointer text-sm text-gray-300\"\n                    onClick={() => handleConversationClick(conv.id)}\n                  >\n                    {conv.title}\n                  </div>\n                ))}\n              </div>\n            )}\n          </div>\n        ) : (\n          <h1 className=\"text-sm text-gray-300 flex items-center gap-2 px-4\">\n            <SearchIcon size={14} /> Notate\n          </h1>\n        )}\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Header/HeaderComponents/SettingsDialog.tsx",
    "content": "import {\n  Dialog,\n  DialogTrigger,\n  DialogContent,\n  DialogHeader,\n  DialogTitle,\n  DialogDescription,\n} from \"@/components/ui/dialog\";\nimport { Button } from \"@/components/ui/button\";\nimport { Settings } from \"lucide-react\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { SettingsModal } from \"@/components/SettingsModal/SettingsModal\";\nimport WindowControls from \"./MainWindowControl\";\n\nexport default function SettingsDialog() {\n  const {\n    settingsOpen,\n    setSettingsOpen,\n    platform,\n    isMaximized,\n    setIsMaximized,\n  } = useSysSettings();\n\n  const renderWindowControls = WindowControls({\n    isMaximized,\n    setIsMaximized,\n    platform,\n  });\n  return (\n    <>\n      <Dialog open={settingsOpen} onOpenChange={setSettingsOpen}>\n        <DialogTrigger asChild className=\"clickable-header-section\">\n          <Button\n            type=\"button\"\n            size=\"icon\"\n            variant=\"ghost\"\n            className=\"rounded-none\"\n          >\n            <Settings className=\"h-5 w-5\" />\n            <span className=\"sr-only\">Chat Settings</span>\n          </Button>\n        </DialogTrigger>\n        <DialogContent className=\"max-h-[100vh] w-[80%] mt-4\">\n          <DialogHeader className=\"sm:pb-4 pb-2\">\n            <DialogTitle className=\"text-xl font-semibold\">\n              Settings\n            </DialogTitle>\n            <DialogDescription className=\"text-muted-foreground\">\n              Configure your application preferences and settings\n            </DialogDescription>\n          </DialogHeader>\n          <div className=\"overflow-y-hidden overflow-x-hidden pr-2\">\n            <SettingsModal />\n          </div>\n        </DialogContent>\n      </Dialog>\n      {platform !== \"darwin\" && renderWindowControls}\n    </>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Header/HeaderComponents/ToolsDialog.tsx",
    "content": "import { Button } from \"@/components/ui/button\";\nimport ToolboxIcon from \"@/assets/toolbox/toolbox.svg\";\nimport {\n  Dialog,\n  DialogTrigger,\n  DialogContent,\n  DialogHeader,\n  DialogTitle,\n  DialogDescription,\n} from \"@/components/ui/dialog\";\nimport Tools from \"@/components/Tools/Tools\";\n\nexport default function ToolsDialog() {\n  return (\n    <Dialog>\n      <DialogTrigger asChild>\n        <Button\n          size=\"icon\"\n          variant=\"ghost\"\n          className=\"clickable-header-section rounded-none\"\n        >\n          <img src={ToolboxIcon} alt=\"Toolbox\" className=\"h-4 w-4\" />\n        </Button>\n      </DialogTrigger>\n      <DialogContent>\n        <DialogHeader>\n          <DialogDescription />\n          <DialogTitle>Tools</DialogTitle>\n        </DialogHeader>\n        <Tools />\n      </DialogContent>\n    </Dialog>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Header/HeaderComponents/WinLinuxControls.tsx",
    "content": "import {\n  Menubar,\n  MenubarMenu,\n  MenubarTrigger,\n  MenubarContent,\n  MenubarItem,\n  MenubarSeparator,\n} from \"@/components/ui/menubar\";\nimport { useUser } from \"@/context/useUser\";\nimport { useView } from \"@/context/useView\";\nimport { useLibrary } from \"@/context/useLibrary\";\nexport default function WinLinuxControls() {\n  const {\n    setActiveUser,\n    setApiKeys,\n    setPrompts,\n    setConversations,\n    handleResetChat,\n  } = useUser();\n  const { setSelectedCollection, setFiles } = useLibrary();\n  const { setActiveView } = useView();\n  return (\n    <Menubar className=\"clickable-header-section bg-transparent border-none\">\n      <MenubarMenu>\n        <MenubarTrigger className=\"clickable-header-section\">\n          File\n        </MenubarTrigger>\n        <MenubarContent className=\"clickable-header-section\">\n          <MenubarItem\n            className=\"clickable-header-section\"\n            onClick={() => {\n              handleResetChat();\n            }}\n          >\n            New Conversation\n          </MenubarItem>\n          <MenubarItem\n            className=\"clickable-header-section\"\n            onClick={() => {\n              setActiveUser(null);\n              setSelectedCollection(null);\n              setApiKeys([]);\n              setPrompts([]);\n              setFiles([]);\n              setConversations([]);\n              setActiveView(\"SelectAccount\");\n            }}\n          >\n            Change User\n          </MenubarItem>\n          <MenubarSeparator />\n          <MenubarItem\n            className=\"clickable-header-section\"\n            onClick={() => window.electron.quit()}\n          >\n            Quit\n          </MenubarItem>\n        </MenubarContent>\n      </MenubarMenu>\n      <MenubarMenu>\n        <MenubarTrigger className=\"clickable-header-section\">\n          Edit\n        </MenubarTrigger>\n        <MenubarContent className=\"clickable-header-section\">\n          <MenubarItem\n            className=\"clickable-header-section\"\n            onClick={() => window.electron.undo()}\n          >\n            Undo\n          </MenubarItem>\n          <MenubarItem\n            className=\"clickable-header-section\"\n            onClick={() => window.electron.redo()}\n          >\n            Redo\n          </MenubarItem>\n          <MenubarSeparator />\n          <MenubarItem\n            className=\"clickable-header-section\"\n            onClick={() => window.electron.cut()}\n          >\n            Cut\n          </MenubarItem>\n          <MenubarItem\n            className=\"clickable-header-section\"\n            onClick={() => window.electron.copy()}\n          >\n            Copy\n          </MenubarItem>\n          <MenubarItem\n            className=\"clickable-header-section\"\n            onClick={() => window.electron.paste()}\n          >\n            Paste\n          </MenubarItem>\n          <MenubarItem\n            className=\"clickable-header-section\"\n            onClick={() => window.electron.delete()}\n          >\n            Delete\n          </MenubarItem>\n          <MenubarSeparator />\n          <MenubarItem\n            className=\"clickable-header-section\"\n            onClick={() => window.electron.selectAll()}\n          >\n            Select All\n          </MenubarItem>\n          <MenubarSeparator />\n          <MenubarItem\n            className=\"clickable-header-section\"\n            onClick={() => window.electron.print()}\n          >\n            Print\n          </MenubarItem>\n        </MenubarContent>\n        <MenubarMenu>\n          <MenubarTrigger className=\"clickable-header-section\">\n            View\n          </MenubarTrigger>\n          <MenubarContent className=\"clickable-header-section\">\n            <MenubarItem\n              className=\"clickable-header-section\"\n              onClick={() => setActiveView(\"Chat\")}\n            >\n              Chat\n            </MenubarItem>\n            <MenubarItem\n              className=\"clickable-header-section\"\n              onClick={() => setActiveView(\"History\")}\n            >\n              History\n            </MenubarItem>\n            <MenubarItem\n              className=\"clickable-header-section\"\n              onClick={() => setActiveView(\"FileExplorer\")}\n            >\n              File Explorer\n            </MenubarItem>\n          </MenubarContent>\n        </MenubarMenu>\n      </MenubarMenu>\n    </Menubar>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/History/History.tsx",
    "content": "import { useEffect, useState } from \"react\";\nimport { format } from \"date-fns\";\nimport {\n  Scroll,\n  Search,\n  Calendar,\n  Trash2,\n  ChevronLeftCircle,\n} from \"lucide-react\";\nimport { ScrollArea } from \"@/components/ui/scroll-area\";\nimport { Button } from \"@/components/ui/button\";\nimport { useUser } from \"@/context/useUser\";\nimport { useView } from \"@/context/useView\";\nimport { Alert, AlertDescription, AlertTitle } from \"@/components/ui/alert\";\n\nexport default function History() {\n  const [searchQuery, setSearchQuery] = useState(\"\");\n  const {\n    conversations,\n    setConversations,\n    activeUser,\n    setActiveConversation,\n    setMessages,\n  } = useUser();\n  const { setActiveView } = useView();\n\n  useEffect(() => {\n    if (!activeUser) {\n      setActiveView(\"SelectAccount\");\n    }\n  }, [activeUser, setActiveView]);\n\n  const filteredConversations = conversations.filter((conv) =>\n    conv.title?.toLowerCase().includes(searchQuery.toLowerCase())\n  );\n\n  const handleDeleteConversation = async (\n    e: React.MouseEvent,\n    conversationId: number\n  ) => {\n    e.stopPropagation();\n    if (!activeUser) {\n      return;\n    }\n    try {\n      await window.electron.deleteConversation(activeUser.id, conversationId);\n      setConversations((prev) =>\n        prev.filter((conv) => conv.id !== conversationId)\n      );\n    } catch (error) {\n      console.error(\"Error deleting conversation:\", error);\n    }\n  };\n\n  const handleSelectConversation = async (conversationId: number) => {\n    setActiveConversation(conversationId);\n    if (!activeUser) {\n      return;\n    }\n    try {\n      const result = await window.electron.getConversationMessages(\n        activeUser.id,\n        conversationId\n      );\n      setMessages(result.messages);\n      setActiveView(\"Chat\");\n    } catch (error) {\n      console.error(\"Error loading conversation:\", error);\n    }\n  };\n\n  return (\n    <div\n      className=\"pt-5 h-[calc(100vh-1rem)] flex flex-col history-view\"\n      data-testid=\"history-view\"\n    >\n      <div className=\"flex flex-col h-full overflow-hidden\">\n        <div className=\"p-2 bg-secondary/50 border-b border-secondary flex items-center justify-between\">\n          <div className=\"flex items-center\">\n            <Scroll className=\"mr-2 h-6 w-6 text-primary\" />\n            <h1 className=\"text-2xl font-bold\">Chat History</h1>\n          </div>\n          <Button variant=\"secondary\" onClick={() => setActiveView(\"Chat\")}>\n            <ChevronLeftCircle className=\"h-4 w-4 cursor-pointer hover:text-primary\" />\n            Back to Chat\n          </Button>\n        </div>\n\n        {conversations.length === 0 ? (\n          <div className=\"p-4\">\n            <Alert>\n              <AlertTitle>No conversations found</AlertTitle>\n              <AlertDescription className=\"flex flex-col gap-4\">\n                <p>You haven't started any conversations yet.</p>\n                <Button\n                  onClick={() => {\n                    setMessages([]);\n                    setActiveConversation(null);\n                    setActiveView(\"Chat\");\n                  }}\n                >\n                  Start a New Chat\n                </Button>\n              </AlertDescription>\n            </Alert>\n          </div>\n        ) : (\n          <>\n            <div className=\"p-4 border-b border-secondary\">\n              <div className=\"relative\">\n                <Search className=\"absolute left-3 top-1/2 transform -translate-y-1/2 text-muted-foreground w-5 h-5\" />\n                <input\n                  type=\"text\"\n                  placeholder=\"Search conversations...\"\n                  className=\"w-full pl-10 pr-4 py-2 rounded-[8px] border border-input bg-background \n                           focus-visible:ring-1 focus-visible:ring-ring\"\n                  value={searchQuery}\n                  onChange={(e) => setSearchQuery(e.target.value)}\n                />\n              </div>\n            </div>\n            <ScrollArea\n              className=\"flex-grow px-4 scroll-area\"\n              data-testid=\"history-scroll-area\"\n              style={{ height: \"calc(100% - 8rem)\" }}\n            >\n              <div className=\"grid gap-4 py-4\">\n                {filteredConversations.map((conversation) => (\n                  <div\n                    key={conversation.id}\n                    onClick={() => handleSelectConversation(conversation.id)}\n                    className=\"bg-card rounded-[8px] p-4 shadow-sm hover:shadow-md transition-shadow cursor-pointer\n                             border border-border\"\n                  >\n                    <div className=\"flex justify-between items-start gap-2\">\n                      <div className=\"flex flex-col m-\">\n                        <h3 className=\"font-medium text-lg text-foreground\">\n                          {conversation.title || \"Untitled Conversation\"}\n                        </h3>\n                      </div>\n                      <div className=\"flex items-center gap-2\">\n                        <span className=\"text-sm text-muted-foreground flex items-center gap-1 whitespace-nowrap\">\n                          <Calendar className=\"w-4 h-4\" />\n                          {format(\n                            new Date(conversation.created_at || Date.now()),\n                            \"MMM d, yyyy\"\n                          )}\n                        </span>\n                        <Button\n                          variant=\"ghost\"\n                          size=\"icon\"\n                          className=\"h-8 w-8\"\n                          onClick={(e) =>\n                            handleDeleteConversation(e, conversation.id)\n                          }\n                        >\n                          <Trash2 className=\"h-4 w-4 text-muted-foreground hover:text-destructive\" />\n                        </Button>\n                      </div>\n                    </div>\n                  </div>\n                ))}\n              </div>\n            </ScrollArea>\n          </>\n        )}\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/ChatSettings.tsx",
    "content": "import { Label } from \"@/components/ui/label\";\nimport {\n  Popover,\n  PopoverTrigger,\n  PopoverContent,\n} from \"@/components/ui/popover\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n  Command,\n  CommandInput,\n  CommandList,\n  CommandEmpty,\n  CommandGroup,\n  CommandItem,\n  CommandSeparator,\n} from \"@/components/ui/command\";\nimport { Check, ChevronDown, Plus, LogOut } from \"lucide-react\";\nimport { cn } from \"@/lib/utils\";\nimport { Textarea } from \"@/components/ui/textarea\";\nimport { Slider } from \"@/components/ui/slider\";\nimport {\n  Select,\n  SelectTrigger,\n  SelectValue,\n  SelectContent,\n  SelectGroup,\n  SelectItem,\n  SelectLabel,\n} from \"@/components/ui/select\";\nimport { useUser } from \"@/context/useUser\";\nimport { useState, useEffect } from \"react\";\nimport { useView } from \"@/context/useView\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { toast } from \"@/hooks/use-toast\";\nimport { useLibrary } from \"@/context/useLibrary\";\nimport { Input } from \"@/components/ui/input\";\n\nexport default function ChatSettings() {\n  const {\n    setApiKeys,\n    setPrompts,\n    setConversations,\n    setActiveUser,\n    openRouterModels,\n    externalOllama,\n  } = useUser();\n  const { setSelectedCollection, setFiles } = useLibrary();\n  const { activeUser, apiKeys, prompts, azureModels, customModels } = useUser();\n  const [open, setOpen] = useState<boolean>(false);\n  const [value, setValue] = useState<string>(\"\");\n  const [showNewPrompt, setShowNewPrompt] = useState<boolean>(false);\n  const [newPrompt, setNewPrompt] = useState<string>(\"\");\n  const { setActiveView } = useView();\n  const {\n    settings,\n    setSettings,\n    setSettingsOpen,\n    localModels,\n    handleRunModel,\n    maxTokens,\n    setMaxTokens,\n    ollamaModels,\n    setLocalModalLoading,\n    fetchSettings,\n    setSelectedProvider,\n  } = useSysSettings();\n  const [localMaxTokens, setLocalMaxTokens] = useState<string>(\"\");\n\n  useEffect(() => {\n    setLocalMaxTokens(maxTokens?.toString() || \"\");\n  }, [maxTokens]);\n\n  const handleMaxTokensChange = async (value: string) => {\n    setLocalMaxTokens(value);\n    const parsedValue = parseInt(value);\n    if (!isNaN(parsedValue)) {\n      setMaxTokens(parsedValue);\n      if (activeUser) {\n        await window.electron.updateUserSettings({\n          userId: activeUser.id,\n          maxTokens: parsedValue,\n        });\n        setSettings((prev) => ({ ...prev, maxTokens: parsedValue }));\n      }\n    }\n  };\n\n  const handleProviderModelChange = async (\n    provider: string,\n    model_name: string\n  ) => {\n    if (!activeUser) {\n      return;\n    }\n    await window.electron.updateUserSettings({\n      userId: activeUser.id,\n      provider: provider.toLowerCase(),\n      model: model_name,\n    });\n    if (provider === \"ollama external\") {\n      await window.electron.updateUserSettings({\n        userId: activeUser.id,\n        provider: \"ollama external\",\n        model:\n          externalOllama?.find((model) => model.name === model_name)?.model ??\n          \"\",\n        selectedExternalOllamaId:\n          externalOllama?.find((model) => model.name === model_name)?.id ?? 0,\n      });\n    }\n    if (provider === \"ollama\") {\n      await window.electron.updateUserSettings({\n        userId: activeUser.id,\n        ollamaModel: model_name,\n      });\n    }\n    if (provider === \"azure open ai\") {\n      await window.electron.updateUserSettings({\n        userId: activeUser.id,\n        baseUrl:\n          azureModels?.find((model) => model.name === model_name)?.endpoint ??\n          \"\",\n        selectedAzureId:\n          azureModels?.find((model) => model.name === model_name)?.id ?? 0,\n      });\n    }\n    if (provider === \"custom\") {\n      await window.electron.updateUserSettings({\n        userId: activeUser.id,\n        model:\n          customModels?.find((model) => model.name === model_name)?.model ?? \"\",\n        baseUrl:\n          customModels?.find((model) => model.name === model_name)?.endpoint ??\n          \"\",\n        selectedCustomId:\n          customModels?.find((model) => model.name === model_name)?.id ?? 0,\n      });\n    }\n\n    setSettings((prev) => ({\n      ...prev,\n      model: model_name,\n      provider: provider.toLowerCase(),\n    }));\n  };\n\n  const modelTokenDefaults = {\n    \"gpt-3.5-turbo\": 4096,\n    \"gpt-4o\": 8192,\n    \"gpt-4o-mini\": 4096,\n    \"o1-preview\": 8192,\n    \"o1-mini\": 4096,\n    o1: 8192,\n    \"o3-mini-2025-01-31\": 20000,\n    \"claude-3-5-sonnet-20241022\": 8192,\n    \"claude-3-5-haiku-20241022\": 8192,\n    \"claude-3-opus-20240229\": 4096,\n    \"claude-3-sonnet-20240229\": 4096,\n    \"claude-3-haiku-20240307\": 4096,\n    \"claude-2.1\": 4096,\n    \"claude-2.0\": 4096,\n    \"gemini-1.5-flash\": 8192,\n    \"gemini-1.5-pro\": 8192,\n    \"grok-beta\": 8192,\n    local: 2048,\n    ollama: 2048,\n    \"azure open ai\": 4096,\n    custom: 2048,\n    deepseek: 8192,\n    \"ollama external\": 2048,\n  };\n\n  const modelOptions = {\n    openai: [\n      \"gpt-3.5-turbo\",\n      \"gpt-4o\",\n      \"gpt-4o-mini\",\n      \"o1-preview\",\n      \"o1-mini\",\n      \"o1\",\n      \"o3-mini-2025-01-31\",\n    ],\n    anthropic: [\n      \"claude-3-5-sonnet-20241022\",\n      \"claude-3-5-haiku-20241022\",\n      \"claude-3-opus-20240229\",\n      \"claude-3-sonnet-20240229\",\n      \"claude-3-haiku-20240307\",\n      \"claude-2.1\",\n      \"claude-2.0\",\n    ],\n    gemini: [\n      \"gemini-1.5-flash\",\n      \"gemini-1.5-pro\",\n      \"gemini-2.0-flash\",\n      \"gemini-2.0-flash-exp\",\n      \"gemini-2.0-flash-lite-preview-02-05\",\n      \"gemini-2.0-pro-exp-02-05\",\n      \"gemini-2.0-flash-thinking-exp-01-21\",\n      \"learnlm-1.5-pro-experimental\",\n    ],\n    xai: [\"grok-beta\"],\n    openrouter: openRouterModels || [],\n    local: Array.isArray(localModels)\n      ? localModels.map((model) => model.name)\n      : [],\n    ollama: ollamaModels?.map((model) => model.name) || [],\n    \"azure open ai\": azureModels?.map((model) => model.name) || [],\n    custom: customModels?.map((model) => model.name) || [],\n    deepseek: [\"deepseek-chat\", \"deepseek-reasoner\"],\n    \"ollama external\": externalOllama?.map((model) => model.name) || [],\n  };\n\n  const handleAddPrompt = async () => {\n    if (activeUser) {\n      const newPromptObject = await window.electron.addUserPrompt(\n        activeUser.id,\n        newPrompt,\n        newPrompt\n      );\n      await window.electron.updateUserSettings({\n        userId: activeUser.id,\n        promptId: newPromptObject.id,\n      });\n      setSettings((prev) => ({ ...prev, promptId: newPromptObject.id }));\n      setPrompts((prev) => [\n        ...prev,\n        {\n          id: newPromptObject.id,\n          name: newPromptObject.name,\n          prompt: newPromptObject.prompt,\n          userId: activeUser.id,\n        },\n      ]);\n    }\n  };\n\n  useEffect(() => {\n    if (settings.promptId) {\n      const promptId = settings.promptId.toString();\n      const selectedPromptName =\n        prompts.find((p) => p.id === parseInt(promptId))?.name || \"\";\n      setValue(selectedPromptName);\n    }\n  }, [settings.promptId, prompts]);\n\n  // Add new useEffect for handling initial model selection\n  useEffect(() => {\n    if (\n      settings.provider === \"custom\" &&\n      settings.selectedCustomId &&\n      customModels\n    ) {\n      const selectedCustomModel = customModels.find(\n        (model) => model.id === settings.selectedCustomId\n      );\n      if (selectedCustomModel) {\n        setSettings((prev) => ({\n          ...prev,\n          model: selectedCustomModel.model,\n          displayModel: selectedCustomModel.name,\n        }));\n      }\n    }\n  }, [settings.provider, settings.selectedCustomId, customModels]);\n\n  return (\n    <div className=\"space-y-6\">\n      <div className=\"rounded-[6px] p-4 bg-gradient-to-br from-secondary/50 via-secondary/30 to-background border\">\n        <div className=\"space-y-4\">\n          <div className=\"grid grid-cols-4 items-start gap-4 py-2\">\n            <Label\n              htmlFor=\"prompt\"\n              className=\"text-right text-sm font-medium pt-2\"\n            >\n              Prompt\n            </Label>\n            <div className=\"col-span-3 space-y-4\">\n              <Popover open={open} onOpenChange={setOpen}>\n                <PopoverTrigger asChild>\n                  <Button\n                    id=\"select-42\"\n                    variant=\"outline\"\n                    role=\"combobox\"\n                    aria-expanded={open}\n                    className=\"w-full justify-between bg-secondary/05 px-3 font-normal bg-background\"\n                  >\n                    <span\n                      className={cn(\n                        \"truncate\",\n                        !value && \"text-muted-foreground\"\n                      )}\n                    >\n                      {value || \"Default Prompt\"}\n                    </span>\n                    <ChevronDown size={16} className=\"opacity-50\" />\n                  </Button>\n                </PopoverTrigger>\n                <PopoverContent className=\"w-full p-0\" align=\"start\">\n                  <Command>\n                    <CommandInput placeholder=\"Search prompts...\" />\n                    <CommandList>\n                      <CommandEmpty>No prompt found.</CommandEmpty>\n                      <CommandGroup>\n                        <CommandItem\n                          onSelect={() => {\n                            setShowNewPrompt(true);\n                            setOpen(false);\n                            setValue(\"Adding New Prompt\");\n                          }}\n                          className=\"flex items-center\"\n                        >\n                          <Plus className=\"mr-2 h-4 w-4\" />\n                          Add New Prompt\n                        </CommandItem>\n                      </CommandGroup>\n                      <CommandSeparator />\n                      <CommandGroup>\n                        {prompts.map((prompt) => (\n                          <CommandItem\n                            key={prompt.id}\n                            value={prompt.name}\n                            onSelect={(currentValue) => {\n                              setValue(currentValue);\n                              setOpen(false);\n                              if (activeUser) {\n                                window.electron.updateUserSettings({\n                                  userId: activeUser.id,\n                                  promptId:\n                                    prompts.find((p) => p.name === currentValue)\n                                      ?.id ?? 0,\n                                });\n                                setSettings((prev) => ({\n                                  ...prev,\n                                  promptId:\n                                    prompts.find((p) => p.name === currentValue)\n                                      ?.id ?? 0,\n                                }));\n                              }\n                              toast({\n                                title: \"Prompt set\",\n                                description: `Prompt set to ${currentValue}`,\n                              });\n                            }}\n                          >\n                            {prompt.name.slice(0, 50)}\n                            {value === prompt.name && (\n                              <Check className=\"ml-auto h-4 w-4\" />\n                            )}\n                          </CommandItem>\n                        ))}\n                      </CommandGroup>\n                    </CommandList>\n                  </Command>\n                </PopoverContent>\n              </Popover>\n\n              {showNewPrompt && (\n                <div className=\"flex gap-4\">\n                  <Textarea\n                    id=\"newPrompt\"\n                    placeholder=\"Enter new prompt\"\n                    value={newPrompt}\n                    onChange={(e) => setNewPrompt(e.target.value)}\n                    className=\"flex-1\"\n                  />\n                  <Button\n                    onClick={() => {\n                      setShowNewPrompt(false);\n                      handleAddPrompt();\n                      setNewPrompt(\"\");\n                      toast({\n                        title: \"Prompt added\",\n                        description: `Prompt added to ${value}`,\n                      });\n                    }}\n                  >\n                    Add\n                  </Button>\n                </div>\n              )}\n            </div>\n          </div>\n        </div>\n        <div className=\"space-y-4\">\n          <div className=\"grid grid-cols-4 items-center gap-4\">\n            <Label htmlFor=\"model\" className=\"text-right text-sm font-medium\">\n              Model\n            </Label>\n            <Select\n              value={\n                settings.provider === \"ollama external\" && externalOllama\n                  ? (() => {\n                      const modelInfo = externalOllama.find(\n                        (m) => m.id === settings.selectedExternalOllamaId\n                      );\n                      return modelInfo?.name || settings.model;\n                    })()\n                  : settings.provider === \"custom\"\n                  ? settings.displayModel || settings.model\n                  : settings.model\n              }\n              onValueChange={async (value) => {\n                let provider = Object.keys(modelOptions).find((key) =>\n                  modelOptions[key as keyof typeof modelOptions].includes(value)\n                ) as LLMProvider;\n\n                if (modelOptions.ollama.includes(value)) {\n                  provider = \"ollama\";\n                }\n                if (modelOptions[\"ollama external\"].includes(value)) {\n                  provider = \"ollama external\";\n                }\n                if (!activeUser) {\n                  return;\n                }\n\n                if (provider === \"local\") {\n                  await handleProviderModelChange(provider, value);\n                  await window.electron.updateUserSettings({\n                    userId: activeUser.id,\n                    model:\n                      localModels.find((model) => model.name === value)?.name ??\n                      \"\",\n                    provider: \"local\",\n                    modelType:\n                      localModels.find((model) => model.name === value)?.type ??\n                      \"\",\n                    modelLocation:\n                      localModels.find((model) => model.name === value)\n                        ?.model_location ?? \"\",\n                  });\n                }\n                await handleProviderModelChange(provider, value);\n                await fetchSettings(activeUser);\n\n                const newMaxTokens =\n                  modelTokenDefaults[\n                    value.toLowerCase() as keyof typeof modelTokenDefaults\n                  ] ||\n                  modelTokenDefaults[\n                    provider.toLowerCase() as keyof typeof modelTokenDefaults\n                  ] ||\n                  modelTokenDefaults.local;\n\n                setMaxTokens(newMaxTokens);\n                setLocalMaxTokens(newMaxTokens.toString());\n\n                const isLocalModel = modelOptions.local.includes(value);\n                if (isLocalModel && activeUser) {\n                  const selectedModelPath = localModels.find(\n                    (model) => model.name === value\n                  )?.model_location;\n                  const selectedModelType = localModels.find(\n                    (model) => model.name === value\n                  )?.type;\n                  toast({\n                    title: \"Local model loading\",\n                    description: `Loading ${value}...`,\n                  });\n                  if (selectedModelPath && selectedModelType) {\n                    await handleRunModel(\n                      value,\n                      selectedModelPath,\n                      selectedModelType,\n                      activeUser.id.toString()\n                    );\n                  }\n                } else if (modelOptions.ollama.includes(value)) {\n                  toast({\n                    title: \"Ollama model loading\",\n                    description: `Loading ${value}...`,\n                  });\n                  if (activeUser) {\n                    await window.electron.runOllama(value, activeUser);\n                    setLocalModalLoading(false);\n                    toast({\n                      title: \"Ollama model loaded\",\n                      description: `Ollama model loaded`,\n                    });\n                  }\n                } else {\n                  toast({\n                    title: \"Model set\",\n                    description: `Model set to ${value}`,\n                  });\n                }\n              }}\n            >\n              <SelectTrigger className=\"col-span-3 bg-background\">\n                <SelectValue placeholder=\"Select model\">\n                  {settings.provider === \"ollama external\" && externalOllama\n                    ? (() => {\n                        const modelInfo = externalOllama.find(\n                          (m) => m.id === settings.selectedExternalOllamaId\n                        );\n                        if (modelInfo) {\n                          const endpoint =\n                            modelInfo.endpoint\n                              ?.replace(/^https?:\\/\\//, \"\")\n                              .replace(/\\/.*$/, \"\") || \"unknown\";\n                          return `${modelInfo.name} [External: ${endpoint}]`;\n                        }\n                        return settings.model;\n                      })()\n                    : settings.model}\n                </SelectValue>\n              </SelectTrigger>\n              <SelectContent className=\"bg-background\">\n                {apiKeys.map((apiKey) => (\n                  <SelectGroup key={apiKey.provider}>\n                    <SelectLabel className=\"font-semibold\">\n                      {apiKey.provider.toUpperCase()}\n                    </SelectLabel>\n                    {modelOptions[\n                      apiKey.provider.toLowerCase() as keyof typeof modelOptions\n                    ]\n                      ?.filter((model) => model && model.trim() !== \"\")\n                      .map((model) => (\n                        <SelectItem key={model} value={model}>\n                          {model}\n                        </SelectItem>\n                      ))}\n                  </SelectGroup>\n                ))}\n                {localModels.length > 0 && (\n                  <SelectGroup>\n                    <SelectLabel className=\"font-semibold\">LOCAL</SelectLabel>\n                    {modelOptions.local\n                      .filter((model) => model && model.trim() !== \"\")\n                      .map((model) => (\n                        <SelectItem key={model} value={model}>\n                          {model}\n                        </SelectItem>\n                      ))}\n                  </SelectGroup>\n                )}\n                {ollamaModels.length > 0 && (\n                  <SelectGroup>\n                    <SelectLabel className=\"font-semibold\">OLLAMA</SelectLabel>\n                    {modelOptions.ollama\n                      .filter((model) => model && model.trim() !== \"\")\n                      .map((model) => (\n                        <SelectItem key={model} value={model}>\n                          {model}\n                        </SelectItem>\n                      ))}\n                  </SelectGroup>\n                )}\n                {customModels.length > 0 && (\n                  <SelectGroup>\n                    <SelectLabel className=\"font-semibold\">CUSTOM</SelectLabel>\n                    {modelOptions.custom\n                      .filter((model) => model && model.trim() !== \"\")\n                      .map((model) => (\n                        <SelectItem key={model} value={model}>\n                          {model}\n                        </SelectItem>\n                      ))}\n                  </SelectGroup>\n                )}\n                {externalOllama.length > 0 && (\n                  <SelectGroup>\n                    <SelectLabel className=\"font-semibold\">\n                      OLLAMA EXTERNAL\n                    </SelectLabel>\n                    {modelOptions[\"ollama external\"]\n                      .filter((model) => model && model.trim() !== \"\")\n                      .map((model) => {\n                        const modelInfo = externalOllama.find(\n                          (m) => m.name === model\n                        );\n                        const endpoint =\n                          modelInfo?.endpoint\n                            ?.replace(/^https?:\\/\\//, \"\")\n                            .replace(/\\/.*$/, \"\") || \"unknown\";\n                        return (\n                          <SelectItem key={model} value={model}>\n                            {model} [External: {endpoint}]\n                          </SelectItem>\n                        );\n                      })}\n                  </SelectGroup>\n                )}\n              </SelectContent>\n            </Select>\n          </div>\n\n          <div className=\"grid grid-cols-4 items-center gap-4\">\n            <Label\n              htmlFor=\"temperature\"\n              className=\"text-right text-sm font-medium\"\n            >\n              Temperature\n            </Label>\n            <div className=\"col-span-3 flex items-center gap-4\">\n              <Slider\n                id=\"temperature\"\n                min={0}\n                max={1}\n                step={0.1}\n                value={[settings.temperature ?? 0.7]}\n                onValueChange={(value) => {\n                  setSettings((prev) => ({ ...prev, temperature: value[0] }));\n                  if (activeUser) {\n                    window.electron.updateUserSettings({\n                      userId: activeUser.id,\n                      temperature: value[0],\n                    });\n                  }\n                }}\n                className=\"flex-grow\"\n              />\n              <span className=\"w-12 text-right text-sm tabular-nums\">\n                {settings.temperature?.toFixed(1) ?? \"0.7\"}\n              </span>\n            </div>\n          </div>\n\n          <div className=\"grid grid-cols-4 items-center gap-4\">\n            <Label\n              htmlFor=\"maxTokens\"\n              className=\"text-right text-sm font-medium\"\n            >\n              Max Tokens\n            </Label>\n            <Input\n              id=\"maxTokens\"\n              type=\"number\"\n              value={localMaxTokens}\n              onChange={(e) => handleMaxTokensChange(e.target.value)}\n              className=\"col-span-3 bg-background\"\n            />\n          </div>\n        </div>\n        {settings.model === \"o3-mini-2025-01-31\" && (\n          <div className=\"grid grid-cols-4 items-center gap-4 py-2\">\n            <Label\n              htmlFor=\"reasoningEffort\"\n              className=\"text-right text-sm font-medium\"\n            >\n              Reasoning\n            </Label>\n            <Select\n              value={settings.reasoningEffort ?? \"medium\"}\n              onValueChange={(value: ReasoningEffort) => {\n                setSettings((prev) => ({ ...prev, reasoningEffort: value }));\n                if (activeUser) {\n                  window.electron.updateUserSettings({\n                    userId: activeUser.id,\n                    reasoningEffort: value as ReasoningEffort,\n                  });\n                }\n              }}\n            >\n              <SelectTrigger className=\"col-span-3 bg-background\">\n                <SelectValue placeholder=\"Select reasoning effort\" />\n              </SelectTrigger>\n              <SelectContent className=\"bg-background\">\n                <SelectGroup>\n                  <SelectItem value=\"low\">Low</SelectItem>\n                  <SelectItem value=\"medium\">Medium</SelectItem>\n                  <SelectItem value=\"high\">High</SelectItem>\n                </SelectGroup>\n              </SelectContent>\n            </Select>\n          </div>\n        )}\n      </div>\n      <div className=\"flex flex-col space-y-4 pt-6 border-t\">\n        <div className=\"flex justify-end space-x-2\">\n          <Button variant=\"outline\" onClick={() => setSettingsOpen(false)}>\n            Cancel\n          </Button>\n          <Button\n            variant=\"secondary\"\n            onClick={() => {\n              setSettingsOpen(false);\n              if (activeUser) {\n                window.electron.updateUserSettings({\n                  vectorstore: settings.vectorstore ?? \"\",\n                  temperature: Number(settings.temperature),\n                  model: settings.model ?? \"\",\n                  provider: settings.provider ?? \"\",\n                });\n              }\n\n              toast({\n                title: \"Settings saved\",\n                description: `Settings saved`,\n              });\n            }}\n          >\n            Save Changes\n          </Button>\n        </div>\n\n        <div className=\"flex\">\n          <Button\n            variant=\"outline\"\n            size=\"sm\"\n            className=\"text-destructive hover:text-destructive\"\n            onClick={() => {\n              setActiveUser(null);\n              setSelectedCollection(null);\n              setApiKeys([]);\n              setPrompts([]);\n              setFiles([]);\n              setConversations([]);\n              setActiveView(\"SelectAccount\");\n              setSettingsOpen(false);\n              setSelectedProvider(\"\");\n              toast({\n                title: \"Logged out\",\n                description: `Logged out of all accounts`,\n              });\n            }}\n          >\n            <LogOut size={14} className=\"mr-2\" /> Logout\n          </Button>\n        </div>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/DevIntegration.tsx",
    "content": "import { Cpu, Trash, Copy, Check, Eye } from \"lucide-react\";\nimport { Button } from \"@/components/ui/button\";\nimport { Input } from \"@/components/ui/input\";\nimport {\n  Select,\n  SelectContent,\n  SelectItem,\n  SelectTrigger,\n  SelectValue,\n} from \"@/components/ui/select\";\nimport { useUser } from \"@/context/useUser\";\nimport { useState } from \"react\";\nimport { useClipboard } from \"use-clipboard-copy\";\nimport {\n  Dialog,\n  DialogContent,\n  DialogHeader,\n  DialogTitle,\n  DialogDescription,\n} from \"@/components/ui/dialog\";\n\ninterface APIKey {\n  id: number;\n  key: string;\n  name: string;\n  expiration: string | null;\n}\n\nexport function DevIntegration() {\n  const { activeUser, devAPIKeys, setDevAPIKeys } = useUser();\n  const [keyName, setKeyName] = useState(\"\");\n  const [expiration, setExpiration] = useState<string | null>(null);\n  const [activeKeysMinimized, setActiveKeysMinimized] = useState(true);\n  const [showKeyDialog, setShowKeyDialog] = useState(false);\n  const [selectedKey, setSelectedKey] = useState<{\n    key: string;\n    name: string;\n    isNew?: boolean;\n  } | null>(null);\n  const clipboard = useClipboard();\n  const [isCopied, setIsCopied] = useState(false);\n\n  const handleCopy = () => {\n    if (selectedKey) {\n      clipboard.copy(selectedKey.key);\n      setIsCopied(true);\n      setTimeout(() => setIsCopied(false), 2000);\n    }\n  };\n\n  const handleDeleteKey = async (id: number) => {\n    if (!activeUser) return;\n    await window.electron.deleteDevAPIKey(activeUser.id, id);\n    setDevAPIKeys(devAPIKeys.filter((key) => key.id !== id));\n  };\n\n  const handleGenerateKey = async () => {\n    if (!activeUser) return;\n    const results = await window.electron.addDevAPIKey(\n      activeUser.id,\n      keyName,\n      expiration === \"never\" ? null : expiration\n    );\n    setDevAPIKeys([...devAPIKeys, results]);\n    setSelectedKey({ key: results.key, name: keyName, isNew: true });\n    setShowKeyDialog(true);\n    setKeyName(\"\");\n    setExpiration(null);\n  };\n\n  const handleViewKey = (key: APIKey) => {\n    setSelectedKey({ key: key.key, name: key.name });\n    setShowKeyDialog(true);\n  };\n\n  return (\n    <div>\n      <Dialog open={showKeyDialog} onOpenChange={setShowKeyDialog}>\n        <DialogContent>\n          <DialogHeader>\n            <DialogTitle>\n              {selectedKey?.isNew ? \"API Key Generated\" : \"View API Key\"}\n            </DialogTitle>\n            <DialogDescription>\n              {selectedKey?.isNew\n                ? \"Please copy your API key. You won't be able to see it again.\"\n                : `Viewing API key: ${selectedKey?.name}`}\n            </DialogDescription>\n          </DialogHeader>\n          <div className=\"mt-4 p-4 bg-muted rounded-[8px] relative\">\n            <p className=\"text-sm break-all font-mono pr-12\">\n              {selectedKey?.key}\n            </p>\n            <Button\n              size=\"sm\"\n              variant=\"outline\"\n              className=\"absolute top-2 right-2\"\n              onClick={handleCopy}\n            >\n              {isCopied ? (\n                <Check className=\"h-4 w-4 text-green-500\" />\n              ) : (\n                <Copy className=\"h-4 w-4\" />\n              )}\n            </Button>\n          </div>\n        </DialogContent>\n      </Dialog>\n\n      <div className=\"flex flex-col gap-4\">\n        <div className=\"rounded-[6px] bg-background\">\n          <div className=\"flex items-center gap-2 \">\n            <Cpu className=\"h-4 w-4 text-primary\" />\n            <h3 className=\"text-sm font-medium\">Generate API Key</h3>\n          </div>\n\n          <div className=\"space-y-2\">\n            <div className=\"space-y-2\">\n              <label className=\"text-sm font-medium\">Key Name</label>\n              <Input\n                type=\"text\"\n                placeholder=\"Enter a name for this API key\"\n                value={keyName}\n                onChange={(e) => setKeyName(e.target.value)}\n              />\n            </div>\n            <div className=\"space-y-2\">\n              <label className=\"text-sm font-medium\">Expiration</label>\n              <Select\n                value={expiration ?? undefined}\n                onValueChange={(value) => setExpiration(value)}\n              >\n                <SelectTrigger>\n                  <SelectValue placeholder=\"Select an option\" />\n                </SelectTrigger>\n                <SelectContent>\n                  <SelectItem value=\"30\">30 days</SelectItem>\n                  <SelectItem value=\"60\">60 days</SelectItem>\n                  <SelectItem value=\"90\">90 days</SelectItem>\n                  <SelectItem value=\"never\">Never expire</SelectItem>\n                </SelectContent>\n              </Select>\n            </div>\n\n            <Button\n              variant=\"secondary\"\n              className=\"w-full\"\n              onClick={handleGenerateKey}\n            >\n              Generate Key\n            </Button>\n          </div>\n        </div>\n\n        <div className=\"rounded-[6px] p-4 bg-gradient-to-br from-secondary/50 via-secondary/30 to-background border\">\n          <div className=\"flex items-center justify-between mb-2\">\n            <div className=\"flex items-center gap-2\">\n              <div className=\"h-2 w-2 rounded-full bg-green-500 animate-pulse\" />\n              <h3 className=\"text-sm font-medium\">Active API Keys</h3>\n            </div>\n            <Button\n              variant=\"outline\"\n              size=\"sm\"\n              onClick={() => setActiveKeysMinimized(!activeKeysMinimized)}\n            >\n              {activeKeysMinimized ? \"View Keys\" : \"Minimize\"}\n            </Button>\n          </div>\n\n          {!activeKeysMinimized && (\n            <div className=\"space-y-2 max-h-[200px] overflow-y-auto p-2\">\n              {devAPIKeys.length > 0 ? (\n                devAPIKeys.map((key) => (\n                  <div\n                    key={key.id}\n                    className=\"flex items-center justify-between p-2 rounded-[4px] bg-background/80 backdrop-blur-sm border shadow-sm hover:shadow-md transition-shadow\"\n                  >\n                    <div className=\"flex flex-col gap-1\">\n                      <p className=\"text-xs font-medium\">{key.name}</p>\n                      <p className=\"text-xs text-muted-foreground\">\n                        Expires: {key.expiration ?? \"Never\"}\n                      </p>\n                    </div>\n                    <div className=\"flex gap-2\">\n                      <Button\n                        variant=\"ghost\"\n                        size=\"sm\"\n                        onClick={() => handleViewKey(key)}\n                        className=\"h-8 w-8 p-0\"\n                      >\n                        <Eye className=\"h-4 w-4\" />\n                      </Button>\n                      <Button\n                        variant=\"ghost\"\n                        size=\"sm\"\n                        onClick={() => handleDeleteKey(key.id)}\n                        className=\"h-8 w-8 p-0 hover:text-destructive\"\n                      >\n                        <Trash className=\"h-4 w-4\" />\n                      </Button>\n                    </div>\n                  </div>\n                ))\n              ) : (\n                <p className=\"text-sm text-muted-foreground\">\n                  No active API keys\n                </p>\n              )}\n            </div>\n          )}\n          {activeKeysMinimized && (\n            <p className=\"text-sm font-medium\">\n              {devAPIKeys.length} active keys\n            </p>\n          )}\n        </div>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/AddLocalModel.tsx",
    "content": "import { useState, useEffect } from \"react\";\nimport { Button } from \"@/components/ui/button\";\nimport { Input } from \"@/components/ui/input\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { useUser } from \"@/context/useUser\";\nimport { toast } from \"@/hooks/use-toast\";\nimport {\n  Tooltip,\n  TooltipContent,\n  TooltipProvider,\n  TooltipTrigger,\n} from \"@/components/ui/tooltip\";\nimport { Download, HelpCircle, Loader2, X } from \"lucide-react\";\nimport { Progress } from \"@/components/ui/progress\";\n\nexport default function AddLocalModel() {\n  const [downloadProgress, setDownloadProgress] =\n    useState<DownloadProgressData>({\n      message: \"\",\n      totalProgress: 0,\n    });\n  const [progressMessage, setProgressMessage] = useState(\"\");\n  const [currentFile, setCurrentFile] = useState<string>();\n  const [fileProgress, setFileProgress] = useState(0);\n  const [isDownloading, setIsDownloading] = useState(false);\n\n  const { activeUser } = useUser();\n  const {\n    localModelDir,\n    setLocalModel,\n    localModel,\n    setLocalModalLoading,\n    localModalLoading,\n  } = useSysSettings();\n\n  useEffect(() => {\n    const handleProgress = (\n      _: Electron.IpcRendererEvent,\n      message: string | OllamaProgressEvent | DownloadModelProgress\n    ) => {\n      if (\n        typeof message === \"object\" &&\n        \"type\" in message &&\n        message.type === \"progress\"\n      ) {\n        const {\n          message: progressMessage,\n          fileName,\n          fileProgress,\n          totalProgress,\n          ...rest\n        } = message.data;\n        setProgressMessage(progressMessage);\n        setDownloadProgress({\n          message: progressMessage,\n          totalProgress,\n          ...rest,\n        });\n        if (fileName) setCurrentFile(fileName);\n        if (typeof fileProgress === \"number\") setFileProgress(fileProgress);\n      }\n    };\n\n    window.electron.removeListener(\"download-model-progress\", handleProgress);\n    window.electron.on(\"download-model-progress\", handleProgress);\n\n    return () => {\n      window.electron.removeListener(\"download-model-progress\", handleProgress);\n    };\n  }, []);\n\n  const handleCancel = async () => {\n    try {\n      const result = await window.electron.cancelDownload();\n      if (result.success) {\n        toast({\n          title: \"Download cancelled\",\n          description: \"Model download was cancelled successfully\",\n        });\n      }\n    } catch (error) {\n      console.error(\"Error cancelling download:\", error);\n    } finally {\n      setIsDownloading(false);\n      setLocalModalLoading(false);\n      setDownloadProgress({ message: \"\", totalProgress: 0 });\n      setFileProgress(0);\n      setProgressMessage(\"\");\n      setCurrentFile(undefined);\n    }\n  };\n\n  const handleDownload = async () => {\n    if (!activeUser) {\n      toast({\n        title: \"Invalid User\",\n        description: \"Please login to download models\",\n        variant: \"destructive\",\n      });\n      return;\n    }\n\n    try {\n      setIsDownloading(true);\n      setLocalModalLoading(true);\n      setDownloadProgress({\n        message: \"Starting download...\",\n        totalProgress: 0,\n      });\n      setFileProgress(0);\n      setCurrentFile(undefined);\n      const modelId = localModel.replace(\"hf.co/\", \"\");\n\n      await window.electron.downloadModel({\n        modelId,\n        dirPath: `${localModelDir}/${modelId}`,\n      });\n\n      toast({\n        title: \"Success\",\n        description: `Downloaded model ${modelId}`,\n      });\n    } catch (error) {\n      toast({\n        title: \"Error downloading model\",\n        description:\n          error instanceof Error ? error.message : \"Unknown error occurred\",\n        variant: \"destructive\",\n      });\n    } finally {\n      setIsDownloading(false);\n      setLocalModalLoading(false);\n      setDownloadProgress({ message: \"\", totalProgress: 0 });\n      setFileProgress(0);\n      setProgressMessage(\"\");\n      setCurrentFile(undefined);\n    }\n  };\n  // TODO:   Add in Token for Huggingface private model downloads\n  return (\n    <div className=\"text-xs text-muted-foreground\">\n      <div className=\"w-full flex flex-col gap-2\">\n        <TooltipProvider>\n          <Tooltip>\n            <TooltipTrigger className=\"flex flex-row gap-2 items-center\">\n              <Input\n                className=\"w-full\"\n                placeholder=\"Enter model ID (e.g. TheBloke/Mistral-7B-v0.1-GGUF)\"\n                value={localModel}\n                onChange={(e) => setLocalModel(e.target.value)}\n              />{\" \"}\n              <HelpCircle className=\"w-4 h-4\" />\n            </TooltipTrigger>\n            <TooltipContent>\n              Enter a Hugging Face model ID (e.g.\n              TheBloke/Mistral-7B-v0.1-GGUF).\n            </TooltipContent>\n          </Tooltip>\n        </TooltipProvider>\n        {progressMessage && (\n          <div className=\"space-y-2\">\n            <div className=\"flex items-center justify-between\">\n              <p className=\"text-sm text-secondary-foreground\">\n                {progressMessage}\n              </p>\n              {isDownloading && (\n                <Button\n                  variant=\"destructive\"\n                  size=\"sm\"\n                  onClick={handleCancel}\n                  className=\"h-6 px-2\"\n                >\n                  <X className=\"h-4 w-4\" />\n                </Button>\n              )}\n            </div>\n            {currentFile && (\n              <div className=\"space-y-1\">\n                <div className=\"flex justify-between items-center\">\n                  <p className=\"text-xs text-muted-foreground truncate flex-1\">\n                    {currentFile}\n                  </p>\n                  <p className=\"text-xs text-muted-foreground ml-2\">\n                    {fileProgress}%\n                  </p>\n                </div>\n                <Progress value={fileProgress} className=\"h-1\" />\n                <div className=\"flex justify-between text-xs text-muted-foreground\">\n                  <span>\n                    {downloadProgress.currentSize || \"0 B\"} /{\" \"}\n                    {downloadProgress.totalSize || \"0 B\"}\n                  </span>\n                  {downloadProgress.speed && (\n                    <span>{downloadProgress.speed}</span>\n                  )}\n                </div>\n              </div>\n            )}\n            <div className=\"space-y-1\">\n              <div className=\"flex justify-between text-xs text-muted-foreground\">\n                <span>Total Progress</span>\n                <span>{downloadProgress.totalProgress}%</span>\n              </div>\n              <Progress\n                value={downloadProgress.totalProgress}\n                className=\"h-1\"\n              />\n            </div>\n          </div>\n        )}\n      </div>\n      {localModalLoading ?? (\n        <div className=\"flex items-center justify-center gap-2\">\n          <Loader2 className=\"animate-spin h-4 w-4\" />\n          <span>Downloading Model...</span>\n        </div>\n      )}\n      <div className=\"w-full flex flex-row gap-2 pt-2\">\n        <Button\n          variant=\"secondary\"\n          className=\"w-full\"\n          onClick={handleDownload}\n          disabled={localModalLoading}\n        >\n          {localModalLoading ? (\n            <div className=\"flex items-center gap-2\">\n              <Loader2 className=\"h-4 w-4 animate-spin\" />\n            </div>\n          ) : (\n            <div className=\"flex items-center gap-2\">\n              <Download className=\"h-4 w-4\" /> Download Model\n            </div>\n          )}\n        </Button>\n      </div>\n      <div className=\"w-full flex flex-row gap-2 justify-end pt-1\">\n        <a\n          href=\"https://huggingface.co/models?pipeline_tag=text-generation&sort=trending\"\n          target=\"_blank\"\n          rel=\"noopener noreferrer\"\n          className=\"text-primary hover:underline\"\n        >\n          Browse models on Hugging Face →\n        </a>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/AddOllamaModel.tsx",
    "content": "import { useState, useEffect } from \"react\";\nimport { Button } from \"@/components/ui/button\";\nimport { Input } from \"@/components/ui/input\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { useUser } from \"@/context/useUser\";\nimport { toast } from \"@/hooks/use-toast\";\nimport {\n  Tooltip,\n  TooltipContent,\n  TooltipProvider,\n  TooltipTrigger,\n} from \"@/components/ui/tooltip\";\nimport { Download, HelpCircle, Loader2, X } from \"lucide-react\";\nimport { Progress } from \"@/components/ui/progress\";\n\nexport default function AddOllamaModel() {\n  const [downloadProgress, setDownloadProgress] =\n    useState<DownloadProgressData>({\n      message: \"\",\n      totalProgress: 0,\n    });\n  const [progressMessage, setProgressMessage] = useState(\"\");\n  const [currentFile, setCurrentFile] = useState<string>();\n  const [fileProgress, setFileProgress] = useState(0);\n  const [isDownloading, setIsDownloading] = useState(false);\n  const [ollamaModel, setOllamaModel] = useState(\"\");\n  const { activeUser } = useUser();\n  const {\n    setLocalModalLoading,\n    localModalLoading,\n    setOllamaModels,\n    ollamaModels,\n  } = useSysSettings();\n\n  useEffect(() => {\n    const handleProgress = (\n      _: Electron.IpcRendererEvent,\n      message: string | OllamaProgressEvent | DownloadModelProgress\n    ) => {\n      if (\n        typeof message === \"object\" &&\n        \"type\" in message &&\n        message.type === \"progress\"\n      ) {\n        const {\n          message: progressMessage,\n          fileName,\n          fileProgress,\n          totalProgress,\n          ...rest\n        } = message.data;\n        setProgressMessage(progressMessage);\n        setDownloadProgress({\n          message: progressMessage,\n          totalProgress,\n          ...rest,\n        });\n        if (fileName) setCurrentFile(fileName);\n        if (typeof fileProgress === \"number\") setFileProgress(fileProgress);\n      }\n    };\n\n    window.electron.removeListener(\"download-model-progress\", handleProgress);\n    window.electron.on(\"download-model-progress\", handleProgress);\n\n    return () => {\n      window.electron.removeListener(\"download-model-progress\", handleProgress);\n    };\n  }, []);\n\n  const handleCancel = async () => {\n    try {\n      const result = await window.electron.cancelDownload();\n      if (result.success) {\n        toast({\n          title: \"Download cancelled\",\n          description: \"Model download was cancelled successfully\",\n        });\n      }\n    } catch (error) {\n      console.error(\"Error cancelling download:\", error);\n    } finally {\n      setIsDownloading(false);\n      setLocalModalLoading(false);\n      setDownloadProgress({ message: \"\", totalProgress: 0 });\n      setFileProgress(0);\n      setProgressMessage(\"\");\n      setCurrentFile(undefined);\n    }\n  };\n\n  const handleDownload = async () => {\n    if (ollamaModel.length === 0) {\n      toast({\n        title: \"Invalid Model\",\n        description: \"Please enter a valid model ID\",\n        variant: \"destructive\",\n      });\n      return;\n    }\n    if (!activeUser) {\n      toast({\n        title: \"Invalid User\",\n        description: \"Please login to download models\",\n        variant: \"destructive\",\n      });\n      return;\n    }\n\n    try {\n      setIsDownloading(true);\n      setLocalModalLoading(true);\n      setDownloadProgress({\n        message: \"Starting download...\",\n        totalProgress: 0,\n      });\n      setFileProgress(0);\n      setCurrentFile(undefined);\n      try {\n        await window.electron.pullModel(ollamaModel);\n        setOllamaModels([\n          ...ollamaModels,\n          { name: ollamaModel, type: \"ollama\" },\n        ]);\n      } catch (error) {\n        console.error(\"Error downloading model:\", error);\n      }\n    } catch (error) {\n      toast({\n        title: \"Error downloading model\",\n        description:\n          error instanceof Error ? error.message : \"Unknown error occurred\",\n        variant: \"destructive\",\n      });\n    } finally {\n      setIsDownloading(false);\n      setLocalModalLoading(false);\n      setDownloadProgress({ message: \"\", totalProgress: 0 });\n      setFileProgress(0);\n      setProgressMessage(\"\");\n      setCurrentFile(undefined);\n    }\n  };\n\n  // TODO:   Add in Token for Huggingface private model downloads\n  return (\n    <div className=\"text-xs text-muted-foreground\">\n      <div className=\"w-full flex flex-col gap-2\">\n        <div className=\"w-full flex flex-col gap-2\">\n          <TooltipProvider>\n            <Tooltip>\n              <TooltipTrigger className=\"flex flex-row gap-2 items-center\">\n                <HelpCircle className=\"w-4 h-4\" />\n                <Input\n                  className=\"w-full\"\n                  placeholder=\"Enter model ID (e.g. TheBloke/Mistral-7B-v0.1-GGUF)\"\n                  value={ollamaModel}\n                  onChange={(e) => setOllamaModel(e.target.value)}\n                />\n              </TooltipTrigger>\n              <TooltipContent>\n                Enter a Ollama model ID (e.g. TheBloke/Mistral-7B-v0.1-GGUF).\n                <br />\n                Hugging Face models can be used by prefixing the model ID <br />\n                with \"hf.co/\" (e.g. hf.co/TheBloke/Mistral-7B-v0.1-GGUF).\n              </TooltipContent>\n            </Tooltip>\n          </TooltipProvider>\n        </div>\n        {progressMessage && (\n          <div className=\"space-y-2\">\n            <div className=\"flex items-center justify-between\">\n              <p className=\"text-sm text-secondary-foreground\">\n                {progressMessage}\n              </p>\n              {isDownloading && (\n                <Button\n                  variant=\"destructive\"\n                  size=\"sm\"\n                  onClick={handleCancel}\n                  className=\"h-6 px-2\"\n                >\n                  <X className=\"h-4 w-4\" />\n                </Button>\n              )}\n            </div>\n            {currentFile && (\n              <div className=\"space-y-1\">\n                <div className=\"flex justify-between items-center\">\n                  <p className=\"text-xs text-muted-foreground truncate flex-1\">\n                    {currentFile}\n                  </p>\n                  <p className=\"text-xs text-muted-foreground ml-2\">\n                    {fileProgress}%\n                  </p>\n                </div>\n                <Progress value={fileProgress} className=\"h-1\" />\n                <div className=\"flex justify-between text-xs text-muted-foreground\">\n                  <span>\n                    {downloadProgress.currentSize || \"0 B\"} /{\" \"}\n                    {downloadProgress.totalSize || \"0 B\"}\n                  </span>\n                  {downloadProgress.speed && (\n                    <span>{downloadProgress.speed}</span>\n                  )}\n                </div>\n              </div>\n            )}\n            <div className=\"space-y-1\">\n              <div className=\"flex justify-between text-xs text-muted-foreground\">\n                <span>Total Progress</span>\n                <span>{downloadProgress.totalProgress}%</span>\n              </div>\n              <Progress\n                value={downloadProgress.totalProgress}\n                className=\"h-1\"\n              />\n            </div>\n          </div>\n        )}\n      </div>\n      {localModalLoading ?? (\n        <div className=\"flex items-center justify-center gap-2\">\n          <Loader2 className=\"animate-spin h-4 w-4\" />\n          <span>Downloading Model...</span>\n        </div>\n      )}\n      <div className=\"w-full flex flex-row gap-2 pt-2\">\n        <Button\n          variant=\"secondary\"\n          className=\"w-full\"\n          onClick={handleDownload}\n          disabled={localModalLoading}\n        >\n          {localModalLoading ? (\n            <div className=\"flex items-center gap-2\">\n              <Loader2 className=\"h-4 w-4 animate-spin\" />\n            </div>\n          ) : (\n            <div className=\"flex items-center gap-2\">\n              <Download className=\"h-4 w-4\" /> Download Model\n            </div>\n          )}\n        </Button>\n      </div>\n      <div className=\"w-full flex flex-row gap-2 justify-end pt-1\">\n        <a\n          href=\"https://ollama.ai/models\"\n          target=\"_blank\"\n          rel=\"noopener noreferrer\"\n          className=\"text-primary hover:underline\"\n        >\n          Browse models on Ollama →\n        </a>\n      </div>\n      <div className=\"w-full flex flex-row gap-2 justify-end pt-1\">\n        <a\n          href=\"https://huggingface.co/models?pipeline_tag=text-generation&sort=trending\"\n          target=\"_blank\"\n          rel=\"noopener noreferrer\"\n          className=\"text-primary hover:underline\"\n        >\n          Browse models on Hugging Face →\n        </a>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/AzureOpenAI.tsx",
    "content": "import { Input } from \"@/components/ui/input\";\nimport { Button } from \"@/components/ui/button\";\nimport { useState } from \"react\";\nimport { useUser } from \"@/context/useUser\";\nimport { toast } from \"@/hooks/use-toast\";\nimport {\n  Tooltip,\n  TooltipContent,\n  TooltipProvider,\n  TooltipTrigger,\n} from \"@/components/ui/tooltip\";\nimport { HelpCircle } from \"lucide-react\";\n\nexport default function AzureOpenAI() {\n  const { apiKeyInput, setApiKeyInput, activeUser, fetchAzureModels } =\n    useUser();\n  const [customProvider, setCustomProvider] = useState(\"\");\n  const [customBaseUrl, setCustomBaseUrl] = useState(\"\");\n  const [customModel, setCustomModel] = useState(\"\");\n\n  const handleSubmit = async (e: React.FormEvent) => {\n    e.preventDefault();\n    try {\n      if (!activeUser) return;\n      const azureId = await window.electron.addAzureOpenAIModel(\n        activeUser.id,\n        customProvider,\n        customModel,\n        customBaseUrl,\n        apiKeyInput\n      );\n      await window.electron.updateUserSettings({\n        userId: activeUser.id,\n        provider: \"azure open ai\",\n        selectedAzureId: azureId.id,\n        baseUrl: customBaseUrl,\n        model: customModel,\n      });\n\n      await window.electron.addAPIKey(\n        activeUser.id,\n        apiKeyInput,\n        \"azure open ai\"\n      );\n      toast({\n        title: \"Custom provider added\",\n        description: \"Your custom provider has been added\",\n      });\n      setCustomProvider(\"\");\n      setCustomBaseUrl(\"\");\n      setApiKeyInput(\"\");\n      setCustomModel(\"\");\n      fetchAzureModels();\n    } catch (error) {\n      toast({\n        title: \"Error\",\n        description:\n          \"An error occurred while adding your custom provider. Please try again.\" +\n          error,\n      });\n    }\n  };\n\n  return (\n    <div className=\"space-y-2\">\n      <TooltipProvider>\n        <div className=\"flex items-center gap-2\">\n          <Tooltip>\n            <TooltipTrigger className=\"flex items-center gap-2 w-full\">\n              <Input\n                id=\"name\"\n                type=\"text\"\n                placeholder=\"Enter a name (e.g. Azure OpenAI gpt-4)\"\n                value={customProvider}\n                onChange={(e) => setCustomProvider(e.target.value)}\n                className=\"input-field\"\n              />\n\n              <HelpCircle className=\"h-4 w-4 text-muted-foreground\" />\n            </TooltipTrigger>\n            <TooltipContent>\n              <p>\n                Give your Azure OpenAI deployment a name\n                <br />\n                Example: \"Azure GPT-4 Production\"\n              </p>\n            </TooltipContent>\n          </Tooltip>\n        </div>\n\n        <div className=\"flex items-center gap-2\">\n          {\" \"}\n          <Tooltip>\n            <TooltipTrigger className=\"flex items-center gap-2 w-full\">\n              <Input\n                id=\"azure-endpoint\"\n                type=\"text\"\n                placeholder=\"Enter Azure endpoint\"\n                value={customBaseUrl}\n                onChange={(e) => setCustomBaseUrl(e.target.value)}\n                className=\"input-field\"\n              />\n\n              <HelpCircle className=\"h-4 w-4 text-muted-foreground\" />\n            </TooltipTrigger>\n            <TooltipContent>\n              <p>\n                Your Azure OpenAI endpoint URL\n                <br />\n                Example:\n                https://your-resource.openai.azure.com/openai/deployments/your-deployment/chat/completions?api-version=2024-02-15-preview\n              </p>\n            </TooltipContent>\n          </Tooltip>\n        </div>\n\n        <div className=\"flex items-center gap-2\">\n          <Tooltip>\n            <TooltipTrigger className=\"flex items-center gap-2 w-full\">\n              <Input\n                id=\"custom-model\"\n                type=\"text\"\n                placeholder=\"Enter deployment name\"\n                value={customModel}\n                onChange={(e) => setCustomModel(e.target.value)}\n                className=\"input-field\"\n              />\n\n              <HelpCircle className=\"h-4 w-4 text-muted-foreground\" />\n            </TooltipTrigger>\n            <TooltipContent>\n              <p>\n                The deployment model name in Azure\n                <br />\n                Example: \"gpt-4\" or \"gpt-35-turbo\"\n              </p>\n            </TooltipContent>\n          </Tooltip>\n        </div>\n\n        <div className=\"flex items-center gap-2\">\n          <Tooltip>\n            <TooltipTrigger className=\"flex items-center gap-2 w-full\">\n              <Input\n                id=\"azure-api-key\"\n                type=\"password\"\n                placeholder=\"Enter your Azure API key\"\n                className=\"input-field\"\n                value={apiKeyInput}\n                onChange={(e) => setApiKeyInput(e.target.value)}\n              />\n\n              <HelpCircle className=\"h-4 w-4 text-muted-foreground\" />\n            </TooltipTrigger>\n            <TooltipContent>\n              <p>\n                Your Azure OpenAI API key\n                <br />\n                Format: 32-character string\n              </p>\n            </TooltipContent>\n          </Tooltip>\n        </div>\n      </TooltipProvider>\n\n      <Button variant=\"secondary\" onClick={handleSubmit} className=\"w-full\">\n        Add Azure Open AI Provider\n      </Button>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/CustomLLM.tsx",
    "content": "import { Input } from \"@/components/ui/input\";\nimport { Button } from \"@/components/ui/button\";\nimport { useState } from \"react\";\nimport { useUser } from \"@/context/useUser\";\nimport { toast } from \"@/hooks/use-toast\";\n\nexport default function CustomLLM() {\n  const { apiKeyInput, setApiKeyInput, activeUser } = useUser();\n  const [customProvider, setCustomProvider] = useState(\"\");\n  const [customBaseUrl, setCustomBaseUrl] = useState(\"\");\n  const [customModel, setCustomModel] = useState(\"\");\n  const handleSubmit = async (e: React.FormEvent) => {\n    e.preventDefault();\n    try {\n      if (!activeUser) return;\n      const apiId = await window.electron.addCustomAPI(\n        activeUser.id,\n        customProvider,\n        customBaseUrl,\n        apiKeyInput,\n        customModel\n      );\n      await window.electron.updateUserSettings({\n        provider: \"custom\",\n        baseUrl: customBaseUrl,\n        model: customModel,\n        isLocal: false,\n        selectedCustomId: apiId.id,\n      });\n      toast({\n        title: \"Custom provider added\",\n        description: \"Your custom provider has been added\",\n      });\n      setCustomProvider(\"\");\n      setCustomBaseUrl(\"\");\n      setApiKeyInput(\"\");\n      setCustomModel(\"\");\n    } catch (error) {\n      toast({\n        title: \"Error\",\n        description:\n          \"An error occurred while adding your custom provider. Please try again.\" +\n          error,\n      });\n    }\n  };\n\n  return (\n    <div className=\"space-y-2\">\n      <Input\n        id=\"custom-provider-name\"\n        type=\"text\"\n        placeholder=\"Enter custom provider name (e.g. Deployed ooba model)\"\n        value={customProvider}\n        onChange={(e) => setCustomProvider(e.target.value)}\n        className=\"input-field\"\n      />\n      <Input\n        id=\"custom-base-url\"\n        type=\"text\"\n        placeholder=\"Enter base url (e.g. https://api.custom.com/v1)\"\n        value={customBaseUrl}\n        onChange={(e) => setCustomBaseUrl(e.target.value)}\n        className=\"input-field\"\n      />\n      <Input\n        id=\"custom-model\"\n        type=\"text\"\n        placeholder=\"Enter custom model (e.g. gpt-4o)\"\n        value={customModel}\n        onChange={(e) => setCustomModel(e.target.value)}\n        className=\"input-field\"\n      />\n      <Input\n        id=\"custom-api-key\"\n        type=\"password\"\n        placeholder=\"Enter your custom API key\"\n        className=\"input-field\"\n        value={apiKeyInput}\n        onChange={(e) => setApiKeyInput(e.target.value)}\n      />\n      <Button variant=\"secondary\" onClick={handleSubmit} className=\"w-full\">\n        Add Custom Provider\n      </Button>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/External.tsx",
    "content": "import { Input } from \"@/components/ui/input\";\nimport { Button } from \"@/components/ui/button\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { useUser } from \"@/context/useUser\";\n\ninterface ExternalProps {\n  showUpdateInput: boolean;\n  setShowUpdateInput: (show: boolean) => void;\n}\n\nexport default function External({\n  showUpdateInput,\n  setShowUpdateInput,\n}: ExternalProps) {\n  const { selectedProvider } = useSysSettings();\n  const { apiKeyInput, setApiKeyInput, apiKeys } = useUser();\n\n  const hasProviderKey = selectedProvider\n    ? apiKeys.some(\n        (key) => key.provider.toLowerCase() === selectedProvider.toLowerCase()\n      )\n    : false;\n\n  return (\n    <div className=\"space-y-4\">\n      {!hasProviderKey || showUpdateInput ? (\n        <Input\n          id={`${selectedProvider}-api-key`}\n          type=\"password\"\n          placeholder={`Enter your ${selectedProvider?.toUpperCase()} API key`}\n          className=\"input-field\"\n          value={apiKeyInput}\n          onChange={(e) => setApiKeyInput(e.target.value)}\n        />\n      ) : (\n        hasProviderKey && (\n          <Button\n            variant=\"secondary\"\n            className=\"w-full\"\n            onClick={() => {\n              setShowUpdateInput(true);\n              setApiKeyInput(\"\");\n            }}\n          >\n            Update API Key\n          </Button>\n        )\n      )}\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/ExternalOllama.tsx",
    "content": "import { Input } from \"@/components/ui/input\";\nimport { Button } from \"@/components/ui/button\";\nimport { useState } from \"react\";\nimport { useUser } from \"@/context/useUser\";\nimport { toast } from \"@/hooks/use-toast\";\nimport { Label } from \"@/components/ui/label\";\nimport { Tabs, TabsList, TabsTrigger, TabsContent } from \"@/components/ui/tabs\";\nimport {\n  Card,\n  CardHeader,\n  CardTitle,\n  CardDescription,\n  CardContent,\n  CardFooter,\n} from \"@/components/ui/card\";\nimport {\n  Select,\n  SelectContent,\n  SelectItem,\n  SelectTrigger,\n  SelectValue,\n} from \"@/components/ui/select\";\nexport default function ExternalOllama() {\n  const { activeUser, fetchExternalOllama, externalOllama } = useUser();\n  const [externalOllamaName, setExternalOllamaName] = useState(\"\");\n  const [selectedExternalOllama, setSelectedExternalOllama] =\n    useState<ExternalOllama | null>(null);\n  const [externalOllamaEndpoint, setExternalOllamaEndpoint] = useState(\"\");\n  const [externalOllamaApiKey, setExternalOllamaApiKey] = useState(\"\");\n  const [externalOllamaModel, setExternalOllamaModel] = useState(\"\");\n  const handleSubmit = async (e: React.FormEvent) => {\n    e.preventDefault();\n    if (externalOllamaName === \"\") {\n      toast({\n        title: \"Error\",\n        description: \"Please enter a name\",\n      });\n      return;\n    }\n    if (externalOllamaEndpoint === \"\") {\n      toast({\n        title: \"Error\",\n        description: \"Please enter a endpoint\",\n      });\n      return;\n    }\n    if (externalOllamaModel === \"\") {\n      toast({\n        title: \"Error\",\n        description: \"Please enter a model\",\n      });\n      return;\n    }\n    try {\n      if (!activeUser) return;\n      const ollamaId = await window.electron.addExternalOllama(\n        activeUser.id,\n        externalOllamaName,\n        externalOllamaEndpoint,\n        externalOllamaApiKey,\n        externalOllamaModel\n      );\n      await window.electron.updateUserSettings({\n        userId: activeUser.id,\n        provider: \"ollama external\",\n        baseUrl: externalOllamaEndpoint,\n        model: externalOllamaModel,\n        isLocal: false,\n        selectedExternalOllamaId: ollamaId.id,\n      });\n      toast({\n        title: \"Custom provider added\",\n        description: \"Your custom provider has been added\",\n      });\n      setExternalOllamaName(\"\");\n      setExternalOllamaEndpoint(\"\");\n      setExternalOllamaApiKey(\"\");\n      setExternalOllamaModel(\"\");\n      setSelectedExternalOllama(null);\n      fetchExternalOllama();\n    } catch (error) {\n      toast({\n        title: \"Error\",\n        description:\n          \"An error occurred while adding your custom provider. Please try again.\" +\n          error,\n      });\n    }\n  };\n\n  const handleSelectedExternalOllama = (value: string) => {\n    const selectedModel = externalOllama.find((model) => model.name === value);\n    setSelectedExternalOllama(selectedModel || null);\n    setExternalOllamaEndpoint(selectedModel?.endpoint || \"\");\n    setExternalOllamaApiKey(selectedModel?.api_key || \"\");\n  };\n\n  return (\n    <div className=\"space-y-2\">\n      {externalOllama.length > 0 ? (\n        <Tabs className=\"w-[400px]\">\n          <TabsList className=\"grid w-full grid-cols-2\">\n            <TabsTrigger value=\"add-model\">Add Model</TabsTrigger>\n            <TabsTrigger value=\"add-endpoint\">Add Endpoint</TabsTrigger>\n          </TabsList>\n          <TabsContent value=\"add-model\">\n            <Card className=\"py-2\">\n              <CardContent className=\"space-y-2 gap-2\">\n                <div className=\"grid grid-cols-4 items-center gap-2 justify-between\">\n                  <Label className=\"text-xs font-medium\">\n                    {selectedExternalOllama\n                      ? \"Selected Endpoint\"\n                      : \"Select a Endpoint\"}\n                  </Label>\n                  <Select\n                    onValueChange={handleSelectedExternalOllama}\n                    value={selectedExternalOllama?.name}\n                  >\n                    <SelectTrigger className=\"col-span-3\">\n                      <SelectValue placeholder=\"Select a endpoint\" />\n                    </SelectTrigger>\n                    <SelectContent>\n                      {externalOllama.map((model) => (\n                        <SelectItem key={model.id} value={model.name}>\n                          {model.name}\n                        </SelectItem>\n                      ))}\n                    </SelectContent>\n                  </Select>\n                </div>\n                {selectedExternalOllama && (\n                  <div className=\"space-y-2\">\n                    <div className=\"grid grid-cols-4 items-center gap-2 justify-between\">\n                      <Label htmlFor=\"name\" className=\"text-xs font-medium\">\n                        Name\n                      </Label>\n                      <Input\n                        id=\"name\"\n                        type=\"text\"\n                        placeholder=\"Enter a a new name\"\n                        value={externalOllamaName}\n                        onChange={(e) => setExternalOllamaName(e.target.value)}\n                        className=\"input-field col-span-3 bg-background\"\n                      />\n                    </div>\n                    <div className=\"grid grid-cols-4 items-center gap-2 justify-between\">\n                      <Label\n                        htmlFor=\"model-name\"\n                        className=\"text-xs font-medium\"\n                      >\n                        Model Name\n                      </Label>\n                      <Input\n                        id=\"model-name\"\n                        value={externalOllamaModel}\n                        placeholder=\"Enter the model name\"\n                        onChange={(e) => setExternalOllamaModel(e.target.value)}\n                        className=\"input-field col-span-3 bg-background\"\n                      />\n                    </div>\n                  </div>\n                )}\n              </CardContent>\n              <CardFooter>\n                <Button onClick={handleSubmit} className=\"w-full\">\n                  Save changes\n                </Button>\n              </CardFooter>\n            </Card>\n          </TabsContent>\n          <TabsContent value=\"add-endpoint\">\n            <Card>\n              <CardHeader>\n                <CardTitle>Add Endpoint</CardTitle>\n                <CardDescription>\n                  Add a new endpoint to your external ollama provider.\n                </CardDescription>\n              </CardHeader>\n              <CardContent className=\"space-y-2\">\n                <Input\n                  id=\"Name\"\n                  type=\"text\"\n                  placeholder=\"Enter a name for your ollama external provider\"\n                  value={externalOllamaName}\n                  onChange={(e) => setExternalOllamaName(e.target.value)}\n                  className=\"input-field\"\n                />\n                <Input\n                  id=\"endpoint\"\n                  type=\"text\"\n                  placeholder=\"Endpoint (e.g. http://127.0.0.1:11434/v1/)\"\n                  value={externalOllamaEndpoint}\n                  onChange={(e) => setExternalOllamaEndpoint(e.target.value)}\n                  className=\"input-field\"\n                />\n                <Input\n                  id=\"model-name\"\n                  type=\"text\"\n                  placeholder=\"Enter the model name\"\n                  value={externalOllamaModel}\n                  onChange={(e) => setExternalOllamaModel(e.target.value)}\n                />\n                <Input\n                  id=\"api-key\"\n                  type=\"password\"\n                  placeholder=\"Enter your API key (optional)\"\n                  value={externalOllamaApiKey}\n                  onChange={(e) => setExternalOllamaApiKey(e.target.value)}\n                  className=\"input-field\"\n                />\n              </CardContent>\n              <CardFooter>\n                <Button onClick={handleSubmit}>Save changes</Button>\n              </CardFooter>\n            </Card>\n          </TabsContent>\n        </Tabs>\n      ) : (\n        <>\n          <Input\n            id=\"Name\"\n            type=\"text\"\n            placeholder=\"Enter a name for your ollama external provider\"\n            value={externalOllamaName}\n            onChange={(e) => setExternalOllamaName(e.target.value)}\n            className=\"input-field\"\n          />\n          <Input\n            id=\"endpoint\"\n            type=\"text\"\n            placeholder=\"Endpoint (e.g. http://localhost:11434/api/chat)\"\n            value={externalOllamaEndpoint}\n            onChange={(e) => setExternalOllamaEndpoint(e.target.value)}\n            className=\"input-field\"\n          />\n\n          <Input\n            id=\"api-key\"\n            type=\"password\"\n            placeholder=\"Enter your API key (optional)\"\n            value={externalOllamaApiKey}\n            onChange={(e) => setExternalOllamaApiKey(e.target.value)}\n            className=\"input-field\"\n          />\n        </>\n      )}\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/LocalLLM.tsx",
    "content": "import {\n  Tooltip,\n  TooltipContent,\n  TooltipProvider,\n  TooltipTrigger,\n} from \"@/components/ui/tooltip\";\nimport { Button } from \"@/components/ui/button\";\nimport { FolderOpenIcon, Loader2 } from \"lucide-react\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport {\n  Select,\n  SelectContent,\n  SelectItem,\n  SelectTrigger,\n  SelectValue,\n} from \"@/components/ui/select\";\nimport { useUser } from \"@/context/useUser\";\nimport { toast } from \"@/hooks/use-toast\";\nimport AddLocalModel from \"./AddLocalModel\";\n\nconst formatDirectoryPath = (path: string | null) => {\n  if (!path) return \"Not set\";\n  const parts = path.split(\"/\");\n  const lastTwoParts = parts.slice(-2);\n  return `.../${lastTwoParts.join(\"/\")}`;\n};\n\nconst formatModelName = (name: string) => {\n  const parts = name.split(\"-\");\n  if (parts.length <= 2) return name;\n  return `${parts[0]}-${parts[1]}...`;\n};\n\nexport default function LocalLLM() {\n  const { activeUser } = useUser();\n  const {\n    localModelDir,\n    localModels,\n    handleRunModel,\n    localModalLoading,\n    setLocalModelDir,\n    setLocalModels,\n    setSelectedModel,\n    selectedModel,\n  } = useSysSettings();\n\n  const handleSelectDirectory = async () => {\n    try {\n      if (!activeUser) return;\n      const dirPath = await window.electron.openDirectory();\n      if (dirPath) {\n        setLocalModelDir(dirPath);\n        window.electron.updateUserSettings({\n          userId: activeUser.id,\n          modelDirectory: dirPath,\n        });\n        const response = (await window.electron.getDirModels(\n          dirPath\n        )) as unknown as { dirPath: string; models: Model[] };\n        setLocalModels(response.models);\n        toast({\n          title: \"Directory selected\",\n          description: `Selected directory: ${dirPath}`,\n        });\n      }\n    } catch (error) {\n      console.error(\"Error selecting directory:\", error);\n      toast({\n        title: \"Error\",\n        description: \"Failed to select directory\",\n        variant: \"destructive\",\n      });\n    }\n  };\n  return (\n    <div className=\"space-y-2\">\n      <div className=\"flex items-center justify-between\">\n        <TooltipProvider>\n          <Tooltip delayDuration={200}>\n            <TooltipTrigger asChild>\n              <p className=\"truncate flex-1\">\n                {formatDirectoryPath(localModelDir)}\n              </p>\n            </TooltipTrigger>\n            <TooltipContent\n              side=\"top\"\n              align=\"start\"\n              className=\"max-w-[300px] break-all\"\n            >\n              <p>{localModelDir || \"No directory selected\"}</p>\n            </TooltipContent>\n          </Tooltip>\n        </TooltipProvider>\n        <Button\n          onClick={handleSelectDirectory}\n          variant=\"outline\"\n          className=\"ml-2\"\n        >\n          <FolderOpenIcon className=\"w-4 h-4 mr-2\" />\n          Select Directory\n        </Button>\n      </div>\n      <div className=\"w-full flex flex-row gap-2\">\n        <Select\n          value={selectedModel?.name}\n          onValueChange={(value) =>\n            setSelectedModel(localModels.find((m) => m.name === value) || null)\n          }\n        >\n          <SelectTrigger className=\"w-full\">\n            <SelectValue placeholder=\"Select a local model\" />\n          </SelectTrigger>\n          <SelectContent>\n            {Array.isArray(localModels) &&\n              localModels.map((model) => (\n                <SelectItem key={model.digest || model.name} value={model.name}>\n                  {formatModelName(model.name)} ({model.type})\n                </SelectItem>\n              ))}\n          </SelectContent>\n        </Select>\n        <Button\n          disabled={!selectedModel}\n          variant=\"secondary\"\n          onClick={() => {\n            if (!activeUser || !selectedModel) return;\n            const type = selectedModel.type;\n            const model = selectedModel.name;\n            const user_id = activeUser.id.toString();\n            const model_location = selectedModel.model_location;\n            handleRunModel(model, model_location, type, user_id);\n          }}\n        >\n          {localModalLoading ? (\n            <div className=\"flex items-center gap-2\">\n              <Loader2 className=\"h-4 w-4 animate-spin\" />\n            </div>\n          ) : (\n            \"Run\"\n          )}\n        </Button>\n      </div>\n\n      <AddLocalModel />\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/Ollama.tsx",
    "content": "import { Button } from \"@/components/ui/button\";\nimport AddOllamaModel from \"./AddOllamaModel\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { useUser } from \"@/context/useUser\";\nimport {\n  Select,\n  SelectContent,\n  SelectItem,\n  SelectTrigger,\n  SelectValue,\n} from \"@/components/ui/select\";\nimport { useState } from \"react\";\nimport { Loader2 } from \"lucide-react\";\n\nexport default function Ollama() {\n  const {\n    settings,\n    setSettings,\n    ollamaModels,\n    setOllamaModels,\n    handleRunOllama,\n    localModalLoading,\n    ollamaInit,\n    setOllamaInit,\n    handleOllamaIntegration,\n  } = useSysSettings();\n  const { activeUser } = useUser();\n  const [selectedModel, setSelectedModel] = useState(\"\");\n  const formatModelName = (name: string) => {\n    const parts = name.split(\"-\");\n    if (parts.length <= 2) return name;\n    return `${parts[0]}-${parts[1]}...`;\n  };\n\n  return (\n    <div className=\"space-y-2\">\n      <div className=\"flex items-center justify-between\">\n        <Button\n          variant={ollamaInit ? \"default\" : \"outline\"}\n          className=\"w-full\"  \n          onClick={async () => {\n            if (activeUser) {\n              const newIntegrationValue = settings.ollamaIntegration === 1 ? 0 : 1;\n              setSettings({\n                ...settings,\n                ollamaIntegration: newIntegrationValue,\n              });\n\n              await window.electron.updateUserSettings({\n                userId: activeUser.id,\n                ollamaIntegration: newIntegrationValue,\n              });\n\n              if (newIntegrationValue === 1) {\n                await handleOllamaIntegration(activeUser);\n                setOllamaInit(true);\n              } else {\n                setOllamaModels([]);\n                setOllamaInit(false);\n              }\n            }\n          }}\n        >\n          {ollamaInit ? \"Ollama Integration Enabled\" : \"Integrate with Ollama\"}\n        </Button>\n      </div>\n      {ollamaInit && (\n        <div className=\"flex flex-row gap-2\">\n          <Select value={selectedModel} onValueChange={setSelectedModel}>\n            <SelectTrigger className=\"w-full\">\n              <SelectValue placeholder=\"Select a local model\" />\n            </SelectTrigger>\n            <SelectContent>\n              {Array.isArray(ollamaModels) &&\n                ollamaModels.map((model) => (\n                  <SelectItem key={model.name} value={model.name}>\n                    {formatModelName(model.name)} ({model.type})\n                  </SelectItem>\n                ))}\n            </SelectContent>\n          </Select>\n          <Button\n            variant=\"secondary\"\n            disabled={!selectedModel || localModalLoading}\n            className=\"\"\n            onClick={() => {\n              if (activeUser) {\n                handleRunOllama(selectedModel, activeUser);\n              }\n            }}\n          >\n            {localModalLoading ? (\n              <div className=\"flex items-center gap-2\">\n                <Loader2 className=\"h-4 w-4 animate-spin\" />\n              </div>\n            ) : (\n              \"Run\"\n            )}\n          </Button>\n        </div>\n      )}\n      <div className=\"flex flex-col gap-2\">\n        {ollamaInit && <AddOllamaModel />}\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMModels/Openrouter.tsx",
    "content": "import { Button } from \"@/components/ui/button\";\nimport { Input } from \"@/components/ui/input\";\nimport { useUser } from \"@/context/useUser\";\nimport { toast } from \"@/hooks/use-toast\";\nimport { useState } from \"react\";\n\nexport default function Openrouter() {\n  const { openRouterModels, activeUser, fetchOpenRouterModels } = useUser();\n  const [openRouterModel, setOpenRouterModel] = useState<string>(\"\");\n  const [openRouterKey, setOpenRouterKey] = useState<string>(\"\");\n  const [hasOpenRouter, setHasOpenRouter] = useState<boolean>(\n    openRouterModels.length > 0\n  );\n\n  const handleSaveOpenRouterKey = async () => {\n    if (!activeUser) return;\n    await window.electron.addAPIKey(activeUser.id, openRouterKey, \"openrouter\");\n    await window.electron.updateUserSettings({\n      userId: activeUser.id,\n      provider: \"openrouter\",\n      model: openRouterModel,\n    });\n    setHasOpenRouter(true);\n    setOpenRouterKey(\"\");\n    toast({\n      title: \"OpenRouter Key Saved\",\n      description: \"Your OpenRouter key has been saved\",\n    });\n  };\n\n  const handleAddOpenRouterModel = async () => {\n    try {\n      if (!openRouterModel.trim()) {\n        toast({\n          title: \"Model Required\",\n          description: \"Please enter an OpenRouter model ID.\",\n          variant: \"destructive\",\n        });\n        return;\n      }\n      if (!activeUser) return;\n      await window.electron.addOpenRouterModel(activeUser.id, openRouterModel);\n      await window.electron.updateUserSettings({\n        userId: activeUser.id,\n        provider: \"openrouter\",\n        model: openRouterModel,\n      });\n      await fetchOpenRouterModels();\n      toast({\n        title: \"Model Added\",\n        description: \"Your OpenRouter model has been added\",\n      });\n    } catch (error) {\n      toast({\n        title: \"Error\",\n        description:\n          \"An error occurred while adding the model. Please try again.\" + error,\n        variant: \"destructive\",\n      });\n    }\n  };\n\n  return (\n    <div className=\"space-y-2\">\n      {!hasOpenRouter && (\n        <>\n          <Input\n            id=\"local-model-path\"\n            type=\"text\"\n            placeholder=\"Enter your OpenRouter API key\"\n            className=\"input-field\"\n            value={openRouterKey}\n            onChange={(e) => setOpenRouterKey(e.target.value)}\n          />\n          <Button\n            variant=\"secondary\"\n            className=\"w-full\"\n            onClick={() => {\n              handleSaveOpenRouterKey();\n            }}\n          >\n            Save API Key\n          </Button>\n        </>\n      )}\n      {hasOpenRouter && (\n        <>\n          <Button\n            variant=\"secondary\"\n            className=\"w-full\"\n            onClick={() => setHasOpenRouter(false)}\n          >\n            Update API Key\n          </Button>\n          <Input\n            className=\"w-full\"\n            placeholder=\"Enter OpenRouter model ID (e.g. openai/gpt-3.5-turbo)\"\n            value={openRouterModel}\n            onChange={(e) => setOpenRouterModel(e.target.value)}\n          />\n          <Button\n            variant=\"secondary\"\n            className=\"w-full\"\n            onClick={() => handleAddOpenRouterModel()}\n          >\n            Add Model\n          </Button>\n        </>\n      )}\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/LLMPanel.tsx",
    "content": "\"use client\";\n\nimport { useState } from \"react\";\nimport { Button } from \"@/components/ui/button\";\nimport { toast } from \"@/hooks/use-toast\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { useUser } from \"@/context/useUser\";\nimport { providerIcons } from \"./providers/providerIcons\";\nimport { defaultProviderModel } from \"./providers/defaultsProviderModels\";\nimport LocalLLM from \"./LLMModels/LocalLLM\";\nimport Ollama from \"./LLMModels/Ollama\";\nimport External from \"./LLMModels/External\";\nimport Openrouter from \"./LLMModels/Openrouter\";\nimport CustomLLM from \"./LLMModels/CustomLLM\";\nimport AzureOpenAI from \"./LLMModels/AzureOpenAI\";\nimport {\n  Command,\n  CommandDialog,\n  CommandEmpty,\n  CommandGroup,\n  CommandInput,\n  CommandItem,\n  CommandList,\n} from \"@/components/ui/command\";\nimport { Search } from \"lucide-react\";\nimport ExternalOllama from \"./LLMModels/ExternalOllama\";\n// Provider categories for better organization\nconst providerCategories = {\n  \"Cloud Providers\": [\"openai\", \"anthropic\", \"gemini\", \"deepseek\", \"xai\"],\n  \"Self-Hosted\": [\"ollama\", \"ollama external\", \"local\"],\n  Advanced: [\"openrouter\", \"azure open ai\", \"custom\"],\n} as const;\n\nexport default function LLMPanel() {\n  const [showUpdateInput, setShowUpdateInput] = useState(false);\n  const [isOpen, setIsOpen] = useState(false);\n  const {\n    activeUser,\n    apiKeys,\n    setApiKeys,\n    handleResetChat,\n    apiKeyInput,\n    setApiKeyInput,\n    customModels,\n  } = useUser();\n  const {\n    setSettings,\n    ollamaModels,\n    localModels,\n    selectedProvider,\n    setSelectedProvider,\n  } = useSysSettings();\n\n  const handleSubmit = async (e: React.FormEvent) => {\n    e.preventDefault();\n    const trimmedApiKey = apiKeyInput.trim();\n    const result = await window.electron.keyValidation({\n      apiKey: trimmedApiKey,\n      inputProvider: selectedProvider.toLowerCase(),\n    });\n    if (result.error) {\n      toast({\n        title: \"Invalid API Key\",\n        description: \"API key is invalid. Please try again.\",\n        variant: \"destructive\",\n      });\n      return;\n    }\n\n    handleResetChat();\n    if (activeUser && selectedProvider) {\n      await window.electron.addAPIKey(\n        activeUser.id,\n        trimmedApiKey,\n        selectedProvider.toLowerCase()\n      );\n      if (!apiKeys.some((key) => key.provider === selectedProvider)) {\n        setApiKeys((prevKeys) => [\n          ...prevKeys,\n          {\n            id: Date.now(),\n            key: trimmedApiKey,\n            provider: selectedProvider.toLowerCase(),\n          },\n        ]);\n      }\n      setShowUpdateInput(false);\n      setApiKeyInput(\"\");\n      toast({\n        title: \"API Key Saved\",\n        description: `Your ${selectedProvider.toUpperCase()} API key has been saved successfully.`,\n      });\n    }\n  };\n\n  const handleProviderModelChange = async (provider: LLMProvider) => {\n    setSettings((prev) => ({\n      ...prev,\n      provider: provider,\n      model:\n        defaultProviderModel[provider as keyof typeof defaultProviderModel],\n    }));\n    try {\n      if (activeUser) {\n        await window.electron.updateUserSettings({\n          userId: activeUser.id,\n          provider: provider.toLowerCase(),\n        });\n        if (provider === \"openrouter\") {\n          await window.electron.addOpenRouterModel(\n            activeUser.id,\n            \"openai/gpt-3.5-turbo\"\n          );\n        } else {\n          await window.electron.updateUserSettings({\n            userId: activeUser.id,\n            model:\n              defaultProviderModel[\n                provider as keyof typeof defaultProviderModel\n              ],\n          });\n        }\n      }\n    } catch (error) {\n      console.error(\"Error updating user settings:\", error);\n    }\n  };\n\n  const renderInputs = () => {\n    switch (selectedProvider.toLowerCase()) {\n      case \"anthropic\":\n      case \"xai\":\n      case \"gemini\":\n      case \"openai\":\n      case \"deepseek\":\n        return (\n          <External\n            showUpdateInput={showUpdateInput}\n            setShowUpdateInput={setShowUpdateInput}\n          />\n        );\n      case \"ollama external\":\n        return <ExternalOllama />;\n      case \"local\":\n        return <LocalLLM />;\n      case \"ollama\":\n        return <Ollama />;\n      case \"openrouter\":\n        return <Openrouter />;\n      case \"azure open ai\":\n        return <AzureOpenAI />;\n      case \"custom\":\n        return <CustomLLM />;\n      default:\n        return null;\n    }\n  };\n\n  return (\n    <div className=\"space-y-8\">\n      <div className=\"w-full\">\n        <div className=\"rounded-[6px] p-4 bg-gradient-to-br from-secondary/50 via-secondary/30 to-background border\">\n          {selectedProvider === \"\" && (\n            <div className=\"flex items-center justify-center mb-4\">\n              <p className=\"text-sm font-medium\">Select a provider</p>\n            </div>\n          )}\n          <Button\n            variant=\"outline\"\n            className=\"w-full justify-between\"\n            onClick={() => setIsOpen(true)}\n          >\n            {selectedProvider ? (\n              <div className=\"flex items-center gap-2\">\n                {providerIcons[selectedProvider as keyof typeof providerIcons]}\n                <span>\n                  {selectedProvider.charAt(0).toUpperCase() +\n                    selectedProvider.slice(1)}\n                </span>\n              </div>\n            ) : (\n              <span className=\"text-muted-foreground\">\n                Select a provider...\n              </span>\n            )}\n            <Search className=\"h-4 w-4 text-muted-foreground\" />\n          </Button>\n\n          <CommandDialog open={isOpen} onOpenChange={setIsOpen}>\n            <Command className=\"rounded-lg border shadow-md\">\n              <CommandInput placeholder=\"Search providers...\" />\n              <CommandList>\n                <CommandEmpty>No providers found.</CommandEmpty>\n                {Object.entries(providerCategories).map(\n                  ([category, providers]) => (\n                    <CommandGroup key={category} heading={category}>\n                      {providers.map((provider) => (\n                        <CommandItem\n                          key={provider}\n                          value={provider}\n                          onSelect={(value) => {\n                            setSelectedProvider(value as LLMProvider);\n                            setApiKeyInput(\"\");\n                            setShowUpdateInput(false);\n                            setIsOpen(false);\n                          }}\n                          className=\"flex items-center gap-2 cursor-pointer\"\n                        >\n                          {\n                            providerIcons[\n                              provider as keyof typeof providerIcons\n                            ]\n                          }\n                          <span>\n                            {provider.charAt(0).toUpperCase() +\n                              provider.slice(1)}\n                          </span>\n                        </CommandItem>\n                      ))}\n                    </CommandGroup>\n                  )\n                )}\n              </CommandList>\n            </Command>\n          </CommandDialog>\n        </div>\n      </div>\n      {selectedProvider && (\n        <>\n          <div className=\"mt-4\">\n            {renderInputs()}\n            {selectedProvider.toLowerCase() !== \"openrouter\" &&\n              selectedProvider.toLowerCase() !== \"ollama\" &&\n              selectedProvider.toLowerCase() !== \"local\" &&\n              selectedProvider.toLowerCase() !== \"custom\" &&\n              selectedProvider.toLowerCase() !== \"azure open ai\" &&\n              selectedProvider.toLowerCase() !== \"ollama external\" &&\n              (!apiKeys.some(\n                (key) => key.provider === selectedProvider.toLowerCase()\n              ) ||\n                showUpdateInput) && (\n                <div className=\"flex justify-end\">\n                  <Button\n                    variant=\"secondary\"\n                    className=\"w-full mt-2\"\n                    type=\"submit\"\n                    onClick={(e) => {\n                      handleProviderModelChange(\n                        selectedProvider as LLMProvider\n                      );\n                      handleSubmit(e);\n                    }}\n                  >\n                    Save API Key\n                  </Button>\n                </div>\n              )}\n          </div>\n        </>\n      )}\n      <div className=\"mt-4 rounded-[6px] p-4 bg-gradient-to-br from-secondary/50 via-secondary/30 to-background border\">\n        <div className=\"flex items-center gap-2 mb-3\">\n          <div className=\"h-2 w-2 rounded-full bg-green-500 animate-pulse\" />\n          <h3 className=\"text-sm font-medium\">Active Providers</h3>\n        </div>\n        {apiKeys.length > 0 && (\n          <div className=\"flex flex-wrap gap-2\">\n            {apiKeys.map((apiKey) => (\n              <div\n                key={apiKey.id}\n                className=\"inline-flex items-center px-3 py-1.5 rounded-full text-xs font-medium bg-background/80 backdrop-blur-sm border shadow-sm hover:shadow-md transition-shadow\"\n              >\n                {providerIcons[apiKey.provider as keyof typeof providerIcons]}\n                <span className=\"ml-1.5\">\n                  {apiKey.provider.charAt(0).toUpperCase() +\n                    apiKey.provider.slice(1)}\n                </span>\n              </div>\n            ))}\n            {customModels.length > 0 && (\n              <div className=\"inline-flex items-center px-3 py-1.5 rounded-full text-xs font-medium bg-background/80 backdrop-blur-sm border shadow-sm hover:shadow-md transition-shadow\">\n                {providerIcons[\"custom\" as keyof typeof providerIcons]}\n                <span className=\"ml-1.5\">Custom</span>\n              </div>\n            )}\n            {ollamaModels.length > 0 && (\n              <div className=\"inline-flex items-center px-3 py-1.5 rounded-full text-xs font-medium bg-background/80 backdrop-blur-sm border shadow-sm hover:shadow-md transition-shadow\">\n                {providerIcons[\"ollama\" as keyof typeof providerIcons]}\n                <span className=\"ml-1.5\">Ollama</span>\n              </div>\n            )}\n            {localModels.length > 0 && (\n              <div className=\"inline-flex items-center px-3 py-1.5 rounded-full text-xs font-medium bg-background/80 backdrop-blur-sm border shadow-sm hover:shadow-md transition-shadow\">\n                {providerIcons[\"local\" as keyof typeof providerIcons]}\n                <span className=\"ml-1.5\">Local</span>\n              </div>\n            )}\n          </div>\n        )}\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/providers/SvgIcon.tsx",
    "content": "export const SvgIcon = ({ src, alt }: { src: string; alt: string }) => (\n  <div className=\"h-3 w-3 relative\">\n    <img\n      src={src}\n      alt={alt}\n      className=\"w-full h-full object-contain [filter:brightness(0)_invert(1)]\"\n    />\n  </div>\n);\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/providers/defaultsProviderModels.tsx",
    "content": "export const defaultProviderModel = {\n  OpenAI: \"gpt-3.5-turbo\",\n  Anthropic: \"claude-3-5-sonnet-20241022\",\n  Gemini: \"gemini-1.5-flash\",\n  XAI: \"grok-beta\",\n  Local: \"local\",\n  Openrouter: \"openai/gpt-3.5-turbo\",\n  \"Azure Open AI\": \"gpt-4o\",\n};\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsComponents/providers/providerIcons.tsx",
    "content": "import openai from \"@/assets/providers/openai.svg\";\nimport anthropic from \"@/assets/providers/anthropic.svg\";\nimport gemini from \"@/assets/providers/gemini.svg\";\nimport xai from \"@/assets/providers/xai.svg\";\nimport openrouter from \"@/assets/providers/openrouter.svg\";\nimport ollama from \"@/assets/providers/ollama.svg\";\nimport azure from \"@/assets/providers/azure.svg\";\nimport custom from \"@/assets/providers/network.svg\";\nimport deepseek from \"@/assets/providers/deepseek.svg\";\nimport { HouseIcon } from \"lucide-react\";\nimport { ReactNode } from \"react\";\nimport { SvgIcon } from \"./SvgIcon\";\n\nexport const providerIcons: Record<string, ReactNode> = {\n  openai: <SvgIcon src={openai} alt=\"OpenAI\" />,\n  anthropic: <SvgIcon src={anthropic} alt=\"Anthropic\" />,\n  gemini: <SvgIcon src={gemini} alt=\"Gemini\" />,\n  xai: <SvgIcon src={xai} alt=\"XAI\" />,\n  local: <HouseIcon className=\"h-3 w-3\" />,\n  openrouter: <SvgIcon src={openrouter} alt=\"OpenRouter\" />,\n  ollama: <SvgIcon src={ollama} alt=\"Ollama\" />,\n  \"azure open ai\": <SvgIcon src={azure} alt=\"Azure\" />,\n  deepseek: <SvgIcon src={deepseek} alt=\"DeepSeek\" />,\n  custom: <SvgIcon src={custom} alt=\"Custom\" />,\n  \"ollama external\": <SvgIcon src={ollama} alt=\"Ollama\" />,\n};\n"
  },
  {
    "path": "Frontend/src/components/SettingsModal/SettingsModal.tsx",
    "content": "import { Tabs, TabsContent, TabsList, TabsTrigger } from \"@/components/ui/tabs\";\nimport { MessageSquare, Cpu, Settings2 } from \"lucide-react\";\nimport { motion, AnimatePresence, LayoutGroup } from \"framer-motion\";\nimport LLMPanel from \"./SettingsComponents/LLMPanel\";\nimport { DevIntegration } from \"./SettingsComponents/DevIntegration\";\nimport ChatSettings from \"./SettingsComponents/ChatSettings\";\n\nexport function SettingsModal() {\n  return (\n    <Tabs defaultValue=\"chat\" className=\"h-full flex flex-col\">\n      <TabsList className=\"grid w-full grid-cols-3 rounded-none bg-muted p-1 rounded-[8px]\">\n        <TabsTrigger value=\"chat\">\n          <div className=\"flex items-center gap-2\">\n            <MessageSquare className=\"h-4 w-4\" />\n            <p className=\"hidden md:block\">Chat Settings</p>\n          </div>\n        </TabsTrigger>\n        <TabsTrigger value=\"llm\">\n          <div className=\"flex items-center gap-2\">\n            <Settings2 className=\"h-4 w-4\" />\n            <p className=\"hidden md:block\">LLM Integration</p>\n          </div>\n        </TabsTrigger>\n        <TabsTrigger value=\"system\">\n          <div className=\"flex items-center gap-2\">\n            <Cpu className=\"h-4 w-4\" />\n            <p className=\"hidden md:block\">Dev Integration</p>\n          </div>\n        </TabsTrigger>\n      </TabsList>\n      <motion.div\n        layout\n        transition={{ duration: 0.4, ease: [0.32, 0.72, 0, 1] }}\n        className=\"flex-1 overflow-hidden\"\n      >\n        <LayoutGroup>\n          <AnimatePresence mode=\"sync\">\n            <TabsContent\n              key=\"chat-tab\"\n              value=\"chat\"\n              className=\"h-full m-0 border-none outline-none overflow-y-hidden\"\n            >\n              <motion.div\n                layout\n                initial={{ opacity: 0 }}\n                animate={{ opacity: 1 }}\n                exit={{ opacity: 0 }}\n                transition={{ duration: 0.3 }}\n                className=\"overflow-auto p-6 overflow-y-hidden\"\n              >\n                <ChatSettings />\n              </motion.div>\n            </TabsContent>\n            <TabsContent\n              key=\"llm-tab\"\n              value=\"llm\"\n              className=\"h-full m-0 border-none outline-none\"\n            >\n              <motion.div\n                layout\n                initial={{ opacity: 0 }}\n                animate={{ opacity: 1 }}\n                exit={{ opacity: 0 }}\n                transition={{ duration: 0.3 }}\n                className=\"overflow-auto p-6 overflow-y-hidden\"\n              >\n                <LLMPanel />\n              </motion.div>\n            </TabsContent>\n            <TabsContent\n              key=\"system-tab\"\n              value=\"system\"\n              className=\"h-full m-0 border-none outline-none\"\n            >\n              <motion.div\n                layout\n                initial={{ opacity: 0 }}\n                animate={{ opacity: 1 }}\n                exit={{ opacity: 0 }}\n                transition={{ duration: 0.3 }}\n                className=\"overflow-auto p-6\"\n              >\n                <DevIntegration />\n              </motion.div>\n            </TabsContent>\n          </AnimatePresence>\n        </LayoutGroup>\n      </motion.div>\n    </Tabs>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Tools/ToolComponents/AddTools.tsx",
    "content": "export default function AddTools() {\n  return (\n    <div className=\"flex flex-col gap-4 text-center\">\n      <h1>Coming Soon</h1>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Tools/ToolComponents/EnableTools.tsx",
    "content": "import { Button } from \"@/components/ui/button\";\nimport { useUser } from \"@/context/useUser\";\nimport { Globe } from \"lucide-react\";\nimport { Label } from \"@/components/ui/label\";\nimport { Switch } from \"@/components/ui/switch\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { Separator } from \"@/components/ui/separator\";\n\nconst toolIcons = {\n  \"Web Search\": <Globe />,\n};\n\nexport default function EnableTools() {\n  const { dockTool, systemTools, userTools, activeUser } = useUser();\n  const { settings, setSettings } = useSysSettings();\n\n  return (\n    <div className=\"w-full flex flex-col gap-2\">\n      <div className=\"flex flex-col space-y-4\">\n        <div className=\"text-sm font-medium\">Select Tools</div>\n        <div className=\"grid grid-cols-3 gap-2\">\n          {systemTools.map((tool) => {\n            const userTool = userTools.find((t) => t.name === tool.name);\n            return (\n              <div\n                key={tool.name}\n                className=\"flex flex-col gap-2 items-center justify-center\"\n              >\n                <Button\n                  variant={userTool?.docked === 1 ? \"secondary\" : \"outline\"}\n                  className=\"w-full h-full\"\n                  onClick={() =>\n                    dockTool(\n                      userTool || {\n                        ...tool,\n                        enabled: 1,\n                        docked: 1,\n                      }\n                    )\n                  }\n                >\n                  {toolIcons[tool.name as keyof typeof toolIcons] || tool.name}\n                  {tool.name}\n                </Button>\n              </div>\n            );\n          })}\n\n          <Button variant=\"outline\" disabled>\n            More Coming Soon\n          </Button>\n        </div>\n        <Separator />\n        <div className=\"flex items-center justify-between\">\n          <div className=\"space-y-0.5\">\n            <Label htmlFor=\"cot\">Chain of Thought / Reasoning</Label>\n            <div className=\"text-[0.8rem] text-muted-foreground\">\n              Enable to add a chain of thought / reasoning to the model's\n              response\n            </div>\n          </div>\n          <Switch\n            id=\"cot\"\n            disabled={settings.model === \"deepseek-reasoner\"}\n            checked={settings.cot === 1}\n            onCheckedChange={(checked) => {\n              if (activeUser) {\n                window.electron.updateUserSettings({\n                  userId: activeUser.id,\n                  cot: checked ? 1 : 0,\n                });\n              }\n              setSettings((prev) => ({ ...prev, cot: checked ? 1 : 0 }));\n            }}\n          />\n        </div>\n        <div className=\"rounded-[6px] bg-muted/50 p-3\">\n          <div className=\"text-xs text-muted-foreground flex items-center gap-2\">\n            <span className=\"font-medium text-yellow-500\">Beta</span>\n            These tools are currently in development and may not work as\n            expected with all models.\n          </div>\n        </div>\n      </div>\n    </div>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/Tools/Tools.tsx",
    "content": "import { Tabs, TabsList, TabsTrigger, TabsContent } from \"@/components/ui/tabs\";\nimport { Plus } from \"lucide-react\";\nimport { motion, AnimatePresence, LayoutGroup } from \"framer-motion\";\nimport ToolboxIcon from \"@/assets/toolbox/toolbox.svg\";\nimport EnableTools from \"./ToolComponents/EnableTools\";\nimport AddTools from \"./ToolComponents/AddTools\";\n\nexport default function Tools() {\n  return (\n    <Tabs defaultValue=\"sysTools\" className=\"h-full flex flex-col\">\n      <TabsList className=\"grid w-full grid-cols-2 rounded-none bg-muted p-1 rounded-[8px]\">\n        <TabsTrigger value=\"sysTools\">\n          <div className=\"flex items-center gap-2\">\n            <img src={ToolboxIcon} alt=\"Toolbox\" className=\"h-4 w-4\" />\n            <p className=\"hidden md:block\">Tools</p>\n          </div>\n        </TabsTrigger>\n        <TabsTrigger value=\"addTools\">\n          <div className=\"flex items-center gap-2\">\n            <Plus className=\"h-4 w-4\" />\n            <p className=\"hidden md:block\">Add Tools</p>\n          </div>\n        </TabsTrigger>\n      </TabsList>\n      <motion.div\n        layout\n        transition={{ duration: 0.4, ease: [0.32, 0.72, 0, 1] }}\n        className=\"flex-1 overflow-hidden\"\n      >\n        <LayoutGroup>\n          <AnimatePresence mode=\"sync\">\n            <TabsContent\n              key=\"sysTools-tab\"\n              value=\"sysTools\"\n              className=\"h-full m-0 border-none outline-none overflow-y-hidden\"\n            >\n              <motion.div\n                layout\n                initial={{ opacity: 0 }}\n                animate={{ opacity: 1 }}\n                exit={{ opacity: 0 }}\n                transition={{ duration: 0.3 }}\n                className=\"overflow-auto py-6 overflow-y-hidden\"\n              >\n                <EnableTools />\n              </motion.div>\n            </TabsContent>\n            <TabsContent\n              key=\"addTools-tab\"\n              value=\"addTools\"\n              className=\"h-full m-0 border-none outline-none\"\n            >\n              <motion.div\n                layout\n                initial={{ opacity: 0 }}\n                animate={{ opacity: 1 }}\n                exit={{ opacity: 0 }}\n                transition={{ duration: 0.3 }}\n                className=\"overflow-auto py-6 overflow-y-hidden\"\n              >\n                <AddTools />\n              </motion.div>\n            </TabsContent>\n          </AnimatePresence>\n        </LayoutGroup>\n      </motion.div>\n    </Tabs>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/components/ui/alert.tsx",
    "content": "import * as React from \"react\"\nimport { cva, type VariantProps } from \"class-variance-authority\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst alertVariants = cva(\n  \"relative w-full rounded-[8px] border px-4 py-3 text-sm [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-foreground [&>svg~*]:pl-7\",\n  {\n    variants: {\n      variant: {\n        default: \"bg-background text-foreground\",\n        destructive:\n          \"border-destructive/50 text-destructive dark:border-destructive [&>svg]:text-destructive\",\n      },\n    },\n    defaultVariants: {\n      variant: \"default\",\n    },\n  }\n)\n\nconst Alert = React.forwardRef<\n  HTMLDivElement,\n  React.HTMLAttributes<HTMLDivElement> & VariantProps<typeof alertVariants>\n>(({ className, variant, ...props }, ref) => (\n  <div\n    ref={ref}\n    role=\"alert\"\n    className={cn(alertVariants({ variant }), className)}\n    {...props}\n  />\n))\nAlert.displayName = \"Alert\"\n\nconst AlertTitle = React.forwardRef<\n  HTMLParagraphElement,\n  React.HTMLAttributes<HTMLHeadingElement>\n>(({ className, ...props }, ref) => (\n  <h5\n    ref={ref}\n    className={cn(\"mb-1 font-medium leading-none tracking-tight\", className)}\n    {...props}\n  />\n))\nAlertTitle.displayName = \"AlertTitle\"\n\nconst AlertDescription = React.forwardRef<\n  HTMLParagraphElement,\n  React.HTMLAttributes<HTMLParagraphElement>\n>(({ className, ...props }, ref) => (\n  <div\n    ref={ref}\n    className={cn(\"text-sm [&_p]:leading-relaxed\", className)}\n    {...props}\n  />\n))\nAlertDescription.displayName = \"AlertDescription\"\n\nexport { Alert, AlertTitle, AlertDescription }\n"
  },
  {
    "path": "Frontend/src/components/ui/avatar.tsx",
    "content": "import * as React from \"react\"\nimport * as AvatarPrimitive from \"@radix-ui/react-avatar\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst Avatar = React.forwardRef<\n  React.ElementRef<typeof AvatarPrimitive.Root>,\n  React.ComponentPropsWithoutRef<typeof AvatarPrimitive.Root>\n>(({ className, ...props }, ref) => (\n  <AvatarPrimitive.Root\n    ref={ref}\n    className={cn(\n      \"relative flex h-10 w-10 shrink-0 overflow-hidden rounded-full\",\n      className\n    )}\n    {...props}\n  />\n))\nAvatar.displayName = AvatarPrimitive.Root.displayName\n\nconst AvatarImage = React.forwardRef<\n  React.ElementRef<typeof AvatarPrimitive.Image>,\n  React.ComponentPropsWithoutRef<typeof AvatarPrimitive.Image>\n>(({ className, ...props }, ref) => (\n  <AvatarPrimitive.Image\n    ref={ref}\n    className={cn(\"aspect-square h-full w-full\", className)}\n    {...props}\n  />\n))\nAvatarImage.displayName = AvatarPrimitive.Image.displayName\n\nconst AvatarFallback = React.forwardRef<\n  React.ElementRef<typeof AvatarPrimitive.Fallback>,\n  React.ComponentPropsWithoutRef<typeof AvatarPrimitive.Fallback>\n>(({ className, ...props }, ref) => (\n  <AvatarPrimitive.Fallback\n    ref={ref}\n    className={cn(\n      \"flex h-full w-full items-center justify-center rounded-full bg-muted\",\n      className\n    )}\n    {...props}\n  />\n))\nAvatarFallback.displayName = AvatarPrimitive.Fallback.displayName\n\nexport { Avatar, AvatarImage, AvatarFallback }\n"
  },
  {
    "path": "Frontend/src/components/ui/badge.tsx",
    "content": "import * as React from \"react\"\nimport { cva, type VariantProps } from \"class-variance-authority\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst badgeVariants = cva(\n  \"inline-flex items-center rounded-[6px] border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2\",\n  {\n    variants: {\n      variant: {\n        default:\n          \"border-transparent bg-primary text-primary-foreground shadow hover:bg-primary/80\",\n        secondary:\n          \"border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80\",\n        destructive:\n          \"border-transparent bg-destructive text-destructive-foreground shadow hover:bg-destructive/80\",\n        outline: \"text-foreground\",\n      },\n    },\n    defaultVariants: {\n      variant: \"default\",\n    },\n  }\n)\n\nexport interface BadgeProps\n  extends React.HTMLAttributes<HTMLDivElement>,\n    VariantProps<typeof badgeVariants> {}\n\nfunction Badge({ className, variant, ...props }: BadgeProps) {\n  return (\n    <div className={cn(badgeVariants({ variant }), className)} {...props} />\n  )\n}\n\nexport { Badge, badgeVariants }\n"
  },
  {
    "path": "Frontend/src/components/ui/button.tsx",
    "content": "import * as React from \"react\";\nimport { Slot } from \"@radix-ui/react-slot\";\nimport { type VariantProps } from \"class-variance-authority\";\n\nimport { cn } from \"@/lib/utils\";\nimport { buttonVariants } from \"./buttonVariants\";\n\nexport interface ButtonProps\n  extends React.ButtonHTMLAttributes<HTMLButtonElement>,\n    VariantProps<typeof buttonVariants> {\n  asChild?: boolean;\n}\n\nconst Button = React.forwardRef<HTMLButtonElement, ButtonProps>(\n  ({ className, variant, size, asChild = false, ...props }, ref) => {\n    const Comp = asChild ? Slot : \"button\";\n    return (\n      <Comp\n        className={cn(buttonVariants({ variant, size, className }))}\n        ref={ref}\n        {...props}\n      />\n    );\n  }\n);\nButton.displayName = \"Button\";\n\nexport { Button };\n"
  },
  {
    "path": "Frontend/src/components/ui/buttonVariants.tsx",
    "content": "import { cva } from \"class-variance-authority\";\n\nexport const buttonVariants = cva(\n  \"inline-flex items-center justify-center gap-2 whitespace-nowrap  rounded-[6px] text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg]:size-4 [&_svg]:shrink-0\",\n  {\n    variants: {\n      variant: {\n        default:\n          \"bg-primary text-primary-foreground shadow hover:bg-primary/90\",\n        destructive:\n          \"bg-destructive text-destructive-foreground shadow-sm hover:bg-destructive/90\",\n        outline:\n          \"border border-input bg-background shadow-sm hover:bg-accent hover:text-accent-foreground\",\n        secondary:\n          \"bg-secondary text-secondary-foreground shadow-sm hover:bg-secondary/80\",\n        ghost: \"hover:bg-accent hover:text-accent-foreground\",\n        link: \"text-primary underline-offset-4 hover:underline\",\n      },\n      size: {\n        default: \"h-9 px-4 py-2\",\n        sm: \"h-8 rounded-[6px] px-3 text-xs\",\n        lg: \"h-10 rounded-[6px] px-8\",\n        icon: \"h-9 w-9 rounded-[6px]\",\n      },\n    },\n    defaultVariants: {\n      variant: \"default\",\n      size: \"default\",\n    },\n  }\n);\n"
  },
  {
    "path": "Frontend/src/components/ui/card.tsx",
    "content": "import * as React from \"react\";\n\nimport { cn } from \"@/lib/utils\"\n\nconst Card = React.forwardRef<\n  HTMLDivElement,\n  React.HTMLAttributes<HTMLDivElement>\n>(({ className, ...props }, ref) => (\n  <div\n    ref={ref}\n    className={cn(\n      \"rounded-[10px] border bg-card text-card-foreground shadow\",\n      className\n    )}\n    {...props}\n  />\n));\nCard.displayName = \"Card\";\n\nconst CardHeader = React.forwardRef<\n  HTMLDivElement,\n  React.HTMLAttributes<HTMLDivElement>\n>(({ className, ...props }, ref) => (\n  <div\n    ref={ref}\n    className={cn(\"flex flex-col space-y-1.5 p-6\", className)}\n    {...props}\n  />\n));\nCardHeader.displayName = \"CardHeader\";\n\nconst CardTitle = React.forwardRef<\n  HTMLDivElement,\n  React.HTMLAttributes<HTMLDivElement>\n>(({ className, ...props }, ref) => (\n  <div\n    ref={ref}\n    className={cn(\"font-semibold leading-none tracking-tight\", className)}\n    {...props}\n  />\n));\nCardTitle.displayName = \"CardTitle\";\n\nconst CardDescription = React.forwardRef<\n  HTMLDivElement,\n  React.HTMLAttributes<HTMLDivElement>\n>(({ className, ...props }, ref) => (\n  <div\n    ref={ref}\n    className={cn(\"text-sm text-muted-foreground\", className)}\n    {...props}\n  />\n));\nCardDescription.displayName = \"CardDescription\";\n\nconst CardContent = React.forwardRef<\n  HTMLDivElement,\n  React.HTMLAttributes<HTMLDivElement>\n>(({ className, ...props }, ref) => (\n  <div ref={ref} className={cn(\"p-6 pt-0\", className)} {...props} />\n));\nCardContent.displayName = \"CardContent\";\n\nconst CardFooter = React.forwardRef<\n  HTMLDivElement,\n  React.HTMLAttributes<HTMLDivElement>\n>(({ className, ...props }, ref) => (\n  <div\n    ref={ref}\n    className={cn(\"flex items-center p-6 pt-0\", className)}\n    {...props}\n  />\n));\nCardFooter.displayName = \"CardFooter\";\n\nexport {\n  Card,\n  CardHeader,\n  CardFooter,\n  CardTitle,\n  CardDescription,\n  CardContent,\n};\n"
  },
  {
    "path": "Frontend/src/components/ui/command.tsx",
    "content": "import * as React from \"react\";\nimport { DialogTitle, type DialogProps } from \"@radix-ui/react-dialog\";\nimport { Command as CommandPrimitive } from \"cmdk\";\nimport { Search } from \"lucide-react\";\n\nimport { cn } from \"@/lib/utils\";\nimport {\n  Dialog,\n  DialogContent,\n  DialogDescription,\n  DialogHeader,\n} from \"@/components/ui/dialog\";\n\nconst Command = React.forwardRef<\n  React.ElementRef<typeof CommandPrimitive>,\n  React.ComponentPropsWithoutRef<typeof CommandPrimitive>\n>(({ className, ...props }, ref) => (\n  <CommandPrimitive\n    ref={ref}\n    className={cn(\n      \"flex h-full w-full flex-col overflow-hidden rounded-[6px] bg-popover text-popover-foreground\",\n      className\n    )}\n    {...props}\n  />\n));\nCommand.displayName = CommandPrimitive.displayName;\n\nconst CommandDialog = ({ children, ...props }: DialogProps) => {\n  return (\n    <Dialog {...props}>\n      <DialogContent className=\"overflow-hidden p-4\">\n        <DialogHeader>\n          <DialogTitle>Command</DialogTitle>\n          <DialogDescription>Search for a command.</DialogDescription>\n        </DialogHeader>\n        <Command className=\"[&_[cmdk-group-heading]]:px-2 [&_[cmdk-group-heading]]:font-medium [&_[cmdk-group-heading]]:text-muted-foreground [&_[cmdk-group]:not([hidden])_~[cmdk-group]]:pt-0 [&_[cmdk-group]]:px-2 [&_[cmdk-input-wrapper]_svg]:h-5 [&_[cmdk-input-wrapper]_svg]:w-5 [&_[cmdk-input]]:h-12 [&_[cmdk-item]]:px-2 [&_[cmdk-item]]:py-3 [&_[cmdk-item]_svg]:h-5 [&_[cmdk-item]_svg]:w-5\">\n          {children}\n        </Command>\n      </DialogContent>\n    </Dialog>\n  );\n};\n\nconst CommandInput = React.forwardRef<\n  React.ElementRef<typeof CommandPrimitive.Input>,\n  React.ComponentPropsWithoutRef<typeof CommandPrimitive.Input>\n>(({ className, ...props }, ref) => (\n  <div className=\"flex items-center border-b px-3\" cmdk-input-wrapper=\"\">\n    <Search className=\"mr-2 h-4 w-4 shrink-0 opacity-50\" />\n    <CommandPrimitive.Input\n      ref={ref}\n      className={cn(\n        \"flex h-10 w-full rounded-[6px] bg-transparent py-3 text-sm outline-none placeholder:text-muted-foreground disabled:cursor-not-allowed disabled:opacity-50\",\n        className\n      )}\n      {...props}\n    />\n  </div>\n));\n\nCommandInput.displayName = CommandPrimitive.Input.displayName;\n\nconst CommandList = React.forwardRef<\n  React.ElementRef<typeof CommandPrimitive.List>,\n  React.ComponentPropsWithoutRef<typeof CommandPrimitive.List>\n>(({ className, ...props }, ref) => (\n  <CommandPrimitive.List\n    ref={ref}\n    className={cn(\"max-h-[300px] overflow-y-auto overflow-x-hidden\", className)}\n    {...props}\n  />\n));\n\nCommandList.displayName = CommandPrimitive.List.displayName;\n\nconst CommandEmpty = React.forwardRef<\n  React.ElementRef<typeof CommandPrimitive.Empty>,\n  React.ComponentPropsWithoutRef<typeof CommandPrimitive.Empty>\n>((props, ref) => (\n  <CommandPrimitive.Empty\n    ref={ref}\n    className=\"py-6 text-center text-sm\"\n    {...props}\n  />\n));\n\nCommandEmpty.displayName = CommandPrimitive.Empty.displayName;\n\nconst CommandGroup = React.forwardRef<\n  React.ElementRef<typeof CommandPrimitive.Group>,\n  React.ComponentPropsWithoutRef<typeof CommandPrimitive.Group>\n>(({ className, ...props }, ref) => (\n  <CommandPrimitive.Group\n    ref={ref}\n    className={cn(\n      \"overflow-hidden p-1 text-foreground [&_[cmdk-group-heading]]:px-2 [&_[cmdk-group-heading]]:py-1.5 [&_[cmdk-group-heading]]:text-xs [&_[cmdk-group-heading]]:font-medium [&_[cmdk-group-heading]]:text-muted-foreground\",\n      className\n    )}\n    {...props}\n  />\n));\n\nCommandGroup.displayName = CommandPrimitive.Group.displayName;\n\nconst CommandSeparator = React.forwardRef<\n  React.ElementRef<typeof CommandPrimitive.Separator>,\n  React.ComponentPropsWithoutRef<typeof CommandPrimitive.Separator>\n>(({ className, ...props }, ref) => (\n  <CommandPrimitive.Separator\n    ref={ref}\n    className={cn(\"-mx-1 h-px bg-border\", className)}\n    {...props}\n  />\n));\nCommandSeparator.displayName = CommandPrimitive.Separator.displayName;\n\nconst CommandItem = React.forwardRef<\n  React.ElementRef<typeof CommandPrimitive.Item>,\n  React.ComponentPropsWithoutRef<typeof CommandPrimitive.Item>\n>(({ className, ...props }, ref) => (\n  <CommandPrimitive.Item\n    ref={ref}\n    className={cn(\n      \"relative flex cursor-default gap-2 select-none items-center rounded-[4px] px-2 py-1.5 text-sm outline-none data-[disabled=true]:pointer-events-none data-[selected=true]:bg-accent data-[selected=true]:text-accent-foreground data-[disabled=true]:opacity-50 [&_svg]:pointer-events-none [&_svg]:size-4 [&_svg]:shrink-0\",\n      className\n    )}\n    {...props}\n  />\n));\n\nCommandItem.displayName = CommandPrimitive.Item.displayName;\n\nconst CommandShortcut = ({\n  className,\n  ...props\n}: React.HTMLAttributes<HTMLSpanElement>) => {\n  return (\n    <span\n      className={cn(\n        \"ml-auto text-xs tracking-widest text-muted-foreground\",\n        className\n      )}\n      {...props}\n    />\n  );\n};\nCommandShortcut.displayName = \"CommandShortcut\";\n\nexport {\n  Command,\n  CommandDialog,\n  CommandInput,\n  CommandList,\n  CommandEmpty,\n  CommandGroup,\n  CommandItem,\n  CommandShortcut,\n  CommandSeparator,\n};\n"
  },
  {
    "path": "Frontend/src/components/ui/dialog.tsx",
    "content": "import * as React from \"react\";\nimport * as DialogPrimitive from \"@radix-ui/react-dialog\";\nimport { X } from \"lucide-react\";\n\nimport { cn } from \"@/lib/utils\";\n\nconst Dialog = DialogPrimitive.Root;\n\nconst DialogTrigger = DialogPrimitive.Trigger;\n\nconst DialogPortal = DialogPrimitive.Portal;\n\nconst DialogClose = DialogPrimitive.Close;\n\nconst DialogOverlay = React.forwardRef<\n  React.ElementRef<typeof DialogPrimitive.Overlay>,\n  React.ComponentPropsWithoutRef<typeof DialogPrimitive.Overlay>\n>(({ className, ...props }, ref) => (\n  <DialogPrimitive.Overlay\n    ref={ref}\n    className={cn(\n      \"fixed inset-0 z-50 bg-black/80 data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0\",\n      className\n    )}\n    {...props}\n  />\n));\nDialogOverlay.displayName = DialogPrimitive.Overlay.displayName;\n\nconst DialogContent = React.forwardRef<\n  React.ElementRef<typeof DialogPrimitive.Content>,\n  React.ComponentPropsWithoutRef<typeof DialogPrimitive.Content>\n>(({ className, children, ...props }, ref) => (\n  <DialogPortal>\n    <DialogOverlay />\n    <DialogPrimitive.Content\n      ref={ref}\n      className={cn(\n        \"fixed left-[50%] top-[50%] z-50 grid w-full max-w-lg translate-x-[-50%] translate-y-[-50%] gap-4 border bg-background p-6 shadow-lg rounded-[8px] duration-300 ease-out\",\n        \"data-[state=open]:animate-in data-[state=closed]:animate-out\",\n        \"data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0\",\n        \"data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95\",\n        \"data-[state=closed]:slide-out-to-left-1/2 data-[state=closed]:slide-out-to-top-[48%]\",\n        \"data-[state=open]:slide-in-from-left-1/2 data-[state=open]:slide-in-from-top-[48%]\",\n        \"overflow-auto [&::-webkit-scrollbar]:w-2 [&::-webkit-scrollbar-track]:bg-gray-900 [&::-webkit-scrollbar-thumb]:bg-gray-700 [&::-webkit-scrollbar-thumb]:rounded\",\n        className\n      )}\n      {...props}\n    >\n      {children}\n      <DialogPrimitive.Close className=\"absolute right-4 top-4 rounded-[4px] opacity-70 ring-offset-background transition-opacity hover:opacity-100 focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:pointer-events-none data-[state=open]:bg-accent data-[state=open]:text-muted-foreground\">\n        <X className=\"h-4 w-4\" />\n        <span className=\"sr-only\">Close</span>\n      </DialogPrimitive.Close>\n    </DialogPrimitive.Content>\n  </DialogPortal>\n));\nDialogContent.displayName = DialogPrimitive.Content.displayName;\n\nconst DialogHeader = ({\n  className,\n  ...props\n}: React.HTMLAttributes<HTMLDivElement>) => (\n  <div\n    className={cn(\n      \"flex flex-col space-y-1.5 text-center sm:text-left\",\n      className\n    )}\n    {...props}\n  />\n);\nDialogHeader.displayName = \"DialogHeader\";\n\nconst DialogFooter = ({\n  className,\n  ...props\n}: React.HTMLAttributes<HTMLDivElement>) => (\n  <div\n    className={cn(\n      \"flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2\",\n      className\n    )}\n    {...props}\n  />\n);\nDialogFooter.displayName = \"DialogFooter\";\n\nconst DialogTitle = React.forwardRef<\n  React.ElementRef<typeof DialogPrimitive.Title>,\n  React.ComponentPropsWithoutRef<typeof DialogPrimitive.Title>\n>(({ className, ...props }, ref) => (\n  <DialogPrimitive.Title\n    ref={ref}\n    className={cn(\n      \"text-lg font-semibold leading-none tracking-tight\",\n      className\n    )}\n    {...props}\n  />\n));\nDialogTitle.displayName = DialogPrimitive.Title.displayName;\n\nconst DialogDescription = React.forwardRef<\n  React.ElementRef<typeof DialogPrimitive.Description>,\n  React.ComponentPropsWithoutRef<typeof DialogPrimitive.Description>\n>(({ className, ...props }, ref) => (\n  <DialogPrimitive.Description\n    ref={ref}\n    className={cn(\"text-sm text-muted-foreground\", className)}\n    {...props}\n  />\n));\nDialogDescription.displayName = DialogPrimitive.Description.displayName;\n\nexport {\n  Dialog,\n  DialogPortal,\n  DialogOverlay,\n  DialogTrigger,\n  DialogClose,\n  DialogContent,\n  DialogHeader,\n  DialogFooter,\n  DialogTitle,\n  DialogDescription,\n};\n"
  },
  {
    "path": "Frontend/src/components/ui/form.tsx",
    "content": "import * as React from \"react\"\nimport * as LabelPrimitive from \"@radix-ui/react-label\"\nimport { Slot } from \"@radix-ui/react-slot\"\nimport {\n  Controller,\n  ControllerProps,\n  FieldPath,\n  FieldValues,\n  FormProvider,\n  useFormContext,\n} from \"react-hook-form\"\n\nimport { cn } from \"@/lib/utils\"\nimport { Label } from \"@/components/ui/label\"\n\nconst Form = FormProvider\n\ntype FormFieldContextValue<\n  TFieldValues extends FieldValues = FieldValues,\n  TName extends FieldPath<TFieldValues> = FieldPath<TFieldValues>\n> = {\n  name: TName\n}\n\nconst FormFieldContext = React.createContext<FormFieldContextValue>(\n  {} as FormFieldContextValue\n)\n\nconst FormField = <\n  TFieldValues extends FieldValues = FieldValues,\n  TName extends FieldPath<TFieldValues> = FieldPath<TFieldValues>\n>({\n  ...props\n}: ControllerProps<TFieldValues, TName>) => {\n  return (\n    <FormFieldContext.Provider value={{ name: props.name }}>\n      <Controller {...props} />\n    </FormFieldContext.Provider>\n  )\n}\n\nconst useFormField = () => {\n  const fieldContext = React.useContext(FormFieldContext)\n  const itemContext = React.useContext(FormItemContext)\n  const { getFieldState, formState } = useFormContext()\n\n  const fieldState = getFieldState(fieldContext.name, formState)\n\n  if (!fieldContext) {\n    throw new Error(\"useFormField should be used within <FormField>\")\n  }\n\n  const { id } = itemContext\n\n  return {\n    id,\n    name: fieldContext.name,\n    formItemId: `${id}-form-item`,\n    formDescriptionId: `${id}-form-item-description`,\n    formMessageId: `${id}-form-item-message`,\n    ...fieldState,\n  }\n}\n\ntype FormItemContextValue = {\n  id: string\n}\n\nconst FormItemContext = React.createContext<FormItemContextValue>(\n  {} as FormItemContextValue\n)\n\nconst FormItem = React.forwardRef<\n  HTMLDivElement,\n  React.HTMLAttributes<HTMLDivElement>\n>(({ className, ...props }, ref) => {\n  const id = React.useId()\n\n  return (\n    <FormItemContext.Provider value={{ id }}>\n      <div ref={ref} className={cn(\"space-y-2\", className)} {...props} />\n    </FormItemContext.Provider>\n  )\n})\nFormItem.displayName = \"FormItem\"\n\nconst FormLabel = React.forwardRef<\n  React.ElementRef<typeof LabelPrimitive.Root>,\n  React.ComponentPropsWithoutRef<typeof LabelPrimitive.Root>\n>(({ className, ...props }, ref) => {\n  const { error, formItemId } = useFormField()\n\n  return (\n    <Label\n      ref={ref}\n      className={cn(error && \"text-destructive\", className)}\n      htmlFor={formItemId}\n      {...props}\n    />\n  )\n})\nFormLabel.displayName = \"FormLabel\"\n\nconst FormControl = React.forwardRef<\n  React.ElementRef<typeof Slot>,\n  React.ComponentPropsWithoutRef<typeof Slot>\n>(({ ...props }, ref) => {\n  const { error, formItemId, formDescriptionId, formMessageId } = useFormField()\n\n  return (\n    <Slot\n      ref={ref}\n      id={formItemId}\n      aria-describedby={\n        !error\n          ? `${formDescriptionId}`\n          : `${formDescriptionId} ${formMessageId}`\n      }\n      aria-invalid={!!error}\n      {...props}\n    />\n  )\n})\nFormControl.displayName = \"FormControl\"\n\nconst FormDescription = React.forwardRef<\n  HTMLParagraphElement,\n  React.HTMLAttributes<HTMLParagraphElement>\n>(({ className, ...props }, ref) => {\n  const { formDescriptionId } = useFormField()\n\n  return (\n    <p\n      ref={ref}\n      id={formDescriptionId}\n      className={cn(\"text-[0.8rem] text-muted-foreground\", className)}\n      {...props}\n    />\n  )\n})\nFormDescription.displayName = \"FormDescription\"\n\nconst FormMessage = React.forwardRef<\n  HTMLParagraphElement,\n  React.HTMLAttributes<HTMLParagraphElement>\n>(({ className, children, ...props }, ref) => {\n  const { error, formMessageId } = useFormField()\n  const body = error ? String(error?.message) : children\n\n  if (!body) {\n    return null\n  }\n\n  return (\n    <p\n      ref={ref}\n      id={formMessageId}\n      className={cn(\"text-[0.8rem] font-medium text-destructive\", className)}\n      {...props}\n    >\n      {body}\n    </p>\n  )\n})\nFormMessage.displayName = \"FormMessage\"\n\nexport {\n  useFormField,\n  Form,\n  FormItem,\n  FormLabel,\n  FormControl,\n  FormDescription,\n  FormMessage,\n  FormField,\n}\n"
  },
  {
    "path": "Frontend/src/components/ui/icons.tsx",
    "content": "import {\n  Github,\n  Moon,\n  SunMedium,\n  Twitter,\n  type LucideIcon,\n} from \"lucide-react\";\n\nexport type Icon = LucideIcon;\n\nexport const Icons = {\n  sun: SunMedium,\n  moon: Moon,\n  twitter: Twitter,\n  gitHub: Github,\n    google: (props: LucideIcon) => (\n    <svg {...props} viewBox=\"0 0 24 24\">\n      <path\n        d=\"M22.56 12.25c0-.78-.07-1.53-.2-2.25H12v4.26h5.92c-.26 1.37-1.04 2.53-2.21 3.31v2.77h3.57c2.08-1.92 3.28-4.74 3.28-8.09z\"\n        fill=\"#4285F4\"\n      />\n      <path\n        d=\"M12 23c2.97 0 5.46-.98 7.28-2.66l-3.57-2.77c-.98.66-2.23 1.06-3.71 1.06-2.86 0-5.29-1.93-6.16-4.53H2.18v2.84C3.99 20.53 7.7 23 12 23z\"\n        fill=\"#34A853\"\n      />\n      <path\n        d=\"M5.84 14.09c-.22-.66-.35-1.36-.35-2.09s.13-1.43.35-2.09V7.07H2.18C1.43 8.55 1 10.22 1 12s.43 3.45 1.18 4.93l2.85-2.22.81-.62z\"\n        fill=\"#FBBC05\"\n      />\n      <path\n        d=\"M12 5.38c1.62 0 3.06.56 4.21 1.64l3.15-3.15C17.45 2.09 14.97 1 12 1 7.7 1 3.99 3.47 2.18 7.07l3.66 2.84c.87-2.6 3.3-4.53 6.16-4.53z\"\n        fill=\"#EA4335\"\n      />\n      <path d=\"M1 1h22v22H1z\" fill=\"none\" />\n    </svg>\n  ),\n};\n"
  },
  {
    "path": "Frontend/src/components/ui/input.tsx",
    "content": "import * as React from \"react\"\n\nimport { cn } from \"@/lib/utils\";\n\nconst Input = React.forwardRef<HTMLInputElement, React.ComponentProps<\"input\">>(\n  ({ className, type, ...props }, ref) => {\n    return (\n      <input\n        type={type}\n        className={cn(\n          \"flex h-9 w-full rounded-[6px] border border-input bg-transparent px-3 py-1 text-base shadow-sm transition-colors file:border-0 file:bg-transparent file:text-sm file:font-medium file:text-foreground placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:cursor-not-allowed disabled:opacity-50 md:text-sm\",\n          className\n        )}\n        ref={ref}\n        {...props}\n      />\n    )\n  }\n)\nInput.displayName = \"Input\"\n\nexport { Input }\n"
  },
  {
    "path": "Frontend/src/components/ui/label.tsx",
    "content": "import * as React from \"react\"\nimport * as LabelPrimitive from \"@radix-ui/react-label\"\nimport { cva, type VariantProps } from \"class-variance-authority\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst labelVariants = cva(\n  \"text-sm font-medium leading-none peer-disabled:cursor-not-allowed peer-disabled:opacity-70\"\n)\n\nconst Label = React.forwardRef<\n  React.ElementRef<typeof LabelPrimitive.Root>,\n  React.ComponentPropsWithoutRef<typeof LabelPrimitive.Root> &\n    VariantProps<typeof labelVariants>\n>(({ className, ...props }, ref) => (\n  <LabelPrimitive.Root\n    ref={ref}\n    className={cn(labelVariants(), className)}\n    {...props}\n  />\n))\nLabel.displayName = LabelPrimitive.Root.displayName\n\nexport { Label }\n"
  },
  {
    "path": "Frontend/src/components/ui/menubar.tsx",
    "content": "import * as React from \"react\"\nimport * as MenubarPrimitive from \"@radix-ui/react-menubar\"\nimport { Check, ChevronRight, Circle } from \"lucide-react\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst MenubarMenu = MenubarPrimitive.Menu\n\nconst MenubarGroup = MenubarPrimitive.Group\n\nconst MenubarPortal = MenubarPrimitive.Portal\n\nconst MenubarSub = MenubarPrimitive.Sub\n\nconst MenubarRadioGroup = MenubarPrimitive.RadioGroup\n\nconst Menubar = React.forwardRef<\n  React.ElementRef<typeof MenubarPrimitive.Root>,\n  React.ComponentPropsWithoutRef<typeof MenubarPrimitive.Root>\n>(({ className, ...props }, ref) => (\n  <MenubarPrimitive.Root\n    ref={ref}\n    className={cn(\n      \"flex h-9 items-center space-x-1 rounded-[6px] border bg-background p-1 shadow-sm\",\n      className\n    )}\n    {...props}\n  />\n))\nMenubar.displayName = MenubarPrimitive.Root.displayName\n\nconst MenubarTrigger = React.forwardRef<\n  React.ElementRef<typeof MenubarPrimitive.Trigger>,\n  React.ComponentPropsWithoutRef<typeof MenubarPrimitive.Trigger>\n>(({ className, ...props }, ref) => (\n  <MenubarPrimitive.Trigger\n    ref={ref}\n    className={cn(\n      \"flex cursor-default select-none items-center rounded-[4px] px-3 py-1 text-sm font-medium outline-none focus:bg-accent focus:text-accent-foreground data-[state=open]:bg-accent data-[state=open]:text-accent-foreground\",\n      className\n    )}\n    {...props}\n  />\n))\nMenubarTrigger.displayName = MenubarPrimitive.Trigger.displayName\n\nconst MenubarSubTrigger = React.forwardRef<\n  React.ElementRef<typeof MenubarPrimitive.SubTrigger>,\n  React.ComponentPropsWithoutRef<typeof MenubarPrimitive.SubTrigger> & {\n    inset?: boolean\n  }\n>(({ className, inset, children, ...props }, ref) => (\n  <MenubarPrimitive.SubTrigger\n    ref={ref}\n    className={cn(\n      \"flex cursor-default select-none items-center rounded-[4px] px-2 py-1.5 text-sm outline-none focus:bg-accent focus:text-accent-foreground data-[state=open]:bg-accent data-[state=open]:text-accent-foreground\",\n      inset && \"pl-8\",\n      className\n    )}\n    {...props}\n  >\n    {children}\n    <ChevronRight className=\"ml-auto h-4 w-4\" />\n  </MenubarPrimitive.SubTrigger>\n))\nMenubarSubTrigger.displayName = MenubarPrimitive.SubTrigger.displayName\n\nconst MenubarSubContent = React.forwardRef<\n  React.ElementRef<typeof MenubarPrimitive.SubContent>,\n  React.ComponentPropsWithoutRef<typeof MenubarPrimitive.SubContent>\n>(({ className, ...props }, ref) => (\n  <MenubarPrimitive.SubContent\n    ref={ref}\n    className={cn(\n      \"z-50 min-w-[8rem] overflow-hidden rounded-[6px] border bg-popover p-1 text-popover-foreground shadow-lg data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2\",\n      className\n    )}\n    {...props}\n  />\n))\nMenubarSubContent.displayName = MenubarPrimitive.SubContent.displayName\n\nconst MenubarContent = React.forwardRef<\n  React.ElementRef<typeof MenubarPrimitive.Content>,\n  React.ComponentPropsWithoutRef<typeof MenubarPrimitive.Content>\n>(\n  (\n    { className, align = \"start\", alignOffset = -4, sideOffset = 8, ...props },\n    ref\n  ) => (\n    <MenubarPrimitive.Portal>\n      <MenubarPrimitive.Content\n        ref={ref}\n        align={align}\n        alignOffset={alignOffset}\n        sideOffset={sideOffset}\n        className={cn(\n          \"z-50 min-w-[12rem] overflow-hidden rounded-[6px] border bg-popover p-1 text-popover-foreground shadow-md data-[state=open]:animate-in data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2\",\n          className\n        )}\n        {...props}\n      />\n    </MenubarPrimitive.Portal>\n  )\n)\nMenubarContent.displayName = MenubarPrimitive.Content.displayName\n\nconst MenubarItem = React.forwardRef<\n  React.ElementRef<typeof MenubarPrimitive.Item>,\n  React.ComponentPropsWithoutRef<typeof MenubarPrimitive.Item> & {\n    inset?: boolean\n  }\n>(({ className, inset, ...props }, ref) => (\n  <MenubarPrimitive.Item\n    ref={ref}\n    className={cn(\n      \"relative flex cursor-default select-none items-center rounded-[4px] px-2 py-1.5 text-sm outline-none focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50\",\n      inset && \"pl-8\",\n      className\n    )}\n    {...props}\n  />\n))\nMenubarItem.displayName = MenubarPrimitive.Item.displayName\n\nconst MenubarCheckboxItem = React.forwardRef<\n  React.ElementRef<typeof MenubarPrimitive.CheckboxItem>,\n  React.ComponentPropsWithoutRef<typeof MenubarPrimitive.CheckboxItem>\n>(({ className, children, checked, ...props }, ref) => (\n  <MenubarPrimitive.CheckboxItem\n    ref={ref}\n    className={cn(\n      \"relative flex cursor-default select-none items-center rounded-[4px] py-1.5 pl-8 pr-2 text-sm outline-none focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50\",\n      className\n    )}\n    checked={checked}\n    {...props}\n  >\n    <span className=\"absolute left-2 flex h-3.5 w-3.5 items-center justify-center\">\n      <MenubarPrimitive.ItemIndicator>\n        <Check className=\"h-4 w-4\" />\n      </MenubarPrimitive.ItemIndicator>\n    </span>\n    {children}\n  </MenubarPrimitive.CheckboxItem>\n))\nMenubarCheckboxItem.displayName = MenubarPrimitive.CheckboxItem.displayName\n\nconst MenubarRadioItem = React.forwardRef<\n  React.ElementRef<typeof MenubarPrimitive.RadioItem>,\n  React.ComponentPropsWithoutRef<typeof MenubarPrimitive.RadioItem>\n>(({ className, children, ...props }, ref) => (\n  <MenubarPrimitive.RadioItem\n    ref={ref}\n    className={cn(\n      \"relative flex cursor-default select-none items-center rounded-[4px] py-1.5 pl-8 pr-2 text-sm outline-none focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50\",\n      className\n    )}\n    {...props}\n  >\n    <span className=\"absolute left-2 flex h-3.5 w-3.5 items-center justify-center\">\n      <MenubarPrimitive.ItemIndicator>\n        <Circle className=\"h-4 w-4 fill-current\" />\n      </MenubarPrimitive.ItemIndicator>\n    </span>\n    {children}\n  </MenubarPrimitive.RadioItem>\n))\nMenubarRadioItem.displayName = MenubarPrimitive.RadioItem.displayName\n\nconst MenubarLabel = React.forwardRef<\n  React.ElementRef<typeof MenubarPrimitive.Label>,\n  React.ComponentPropsWithoutRef<typeof MenubarPrimitive.Label> & {\n    inset?: boolean\n  }\n>(({ className, inset, ...props }, ref) => (\n  <MenubarPrimitive.Label\n    ref={ref}\n    className={cn(\n      \"px-2 py-1.5 text-sm font-semibold\",\n      inset && \"pl-8\",\n      className\n    )}\n    {...props}\n  />\n))\nMenubarLabel.displayName = MenubarPrimitive.Label.displayName\n\nconst MenubarSeparator = React.forwardRef<\n  React.ElementRef<typeof MenubarPrimitive.Separator>,\n  React.ComponentPropsWithoutRef<typeof MenubarPrimitive.Separator>\n>(({ className, ...props }, ref) => (\n  <MenubarPrimitive.Separator\n    ref={ref}\n    className={cn(\"-mx-1 my-1 h-px bg-muted\", className)}\n    {...props}\n  />\n))\nMenubarSeparator.displayName = MenubarPrimitive.Separator.displayName\n\nconst MenubarShortcut = ({\n  className,\n  ...props\n}: React.HTMLAttributes<HTMLSpanElement>) => {\n  return (\n    <span\n      className={cn(\n        \"ml-auto text-xs tracking-widest text-muted-foreground\",\n        className\n      )}\n      {...props}\n    />\n  )\n}\nMenubarShortcut.displayname = \"MenubarShortcut\"\n\nexport {\n  Menubar,\n  MenubarMenu,\n  MenubarTrigger,\n  MenubarContent,\n  MenubarItem,\n  MenubarSeparator,\n  MenubarLabel,\n  MenubarCheckboxItem,\n  MenubarRadioGroup,\n  MenubarRadioItem,\n  MenubarPortal,\n  MenubarSubContent,\n  MenubarSubTrigger,\n  MenubarGroup,\n  MenubarSub,\n  MenubarShortcut,\n}\n"
  },
  {
    "path": "Frontend/src/components/ui/popover.tsx",
    "content": "import * as React from \"react\"\nimport * as PopoverPrimitive from \"@radix-ui/react-popover\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst Popover = PopoverPrimitive.Root\n\nconst PopoverTrigger = PopoverPrimitive.Trigger\n\nconst PopoverAnchor = PopoverPrimitive.Anchor\n\nconst PopoverContent = React.forwardRef<\n  React.ElementRef<typeof PopoverPrimitive.Content>,\n  React.ComponentPropsWithoutRef<typeof PopoverPrimitive.Content>\n>(({ className, align = \"center\", sideOffset = 4, ...props }, ref) => (\n  <PopoverPrimitive.Portal>\n    <PopoverPrimitive.Content\n      ref={ref}\n      align={align}\n      sideOffset={sideOffset}\n      className={cn(\n        \"z-50 w-72 rounded-[6px] border bg-popover p-4 text-popover-foreground shadow-md outline-none data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2\",\n        className\n      )}\n      {...props}\n    />\n  </PopoverPrimitive.Portal>\n))\nPopoverContent.displayName = PopoverPrimitive.Content.displayName\n\nexport { Popover, PopoverTrigger, PopoverContent, PopoverAnchor }\n"
  },
  {
    "path": "Frontend/src/components/ui/progress.tsx",
    "content": "import * as React from \"react\"\nimport * as ProgressPrimitive from \"@radix-ui/react-progress\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst Progress = React.forwardRef<\n  React.ElementRef<typeof ProgressPrimitive.Root>,\n  React.ComponentPropsWithoutRef<typeof ProgressPrimitive.Root>\n>(({ className, value, ...props }, ref) => (\n  <ProgressPrimitive.Root\n    ref={ref}\n    className={cn(\n      \"relative h-2 w-full overflow-hidden rounded-full bg-primary/20\",\n      className\n    )}\n    {...props}\n  >\n    <ProgressPrimitive.Indicator\n      className=\"h-full w-full flex-1 bg-primary transition-all\"\n      style={{ transform: `translateX(-${100 - (value || 0)}%)` }}\n    />\n  </ProgressPrimitive.Root>\n))\nProgress.displayName = ProgressPrimitive.Root.displayName\n\nexport { Progress }\n"
  },
  {
    "path": "Frontend/src/components/ui/radio-group.tsx",
    "content": "import * as React from \"react\"\nimport * as RadioGroupPrimitive from \"@radix-ui/react-radio-group\"\nimport { Circle } from \"lucide-react\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst RadioGroup = React.forwardRef<\n  React.ElementRef<typeof RadioGroupPrimitive.Root>,\n  React.ComponentPropsWithoutRef<typeof RadioGroupPrimitive.Root>\n>(({ className, ...props }, ref) => {\n  return (\n    <RadioGroupPrimitive.Root\n      className={cn(\"grid gap-2\", className)}\n      {...props}\n      ref={ref}\n    />\n  )\n})\nRadioGroup.displayName = RadioGroupPrimitive.Root.displayName\n\nconst RadioGroupItem = React.forwardRef<\n  React.ElementRef<typeof RadioGroupPrimitive.Item>,\n  React.ComponentPropsWithoutRef<typeof RadioGroupPrimitive.Item>\n>(({ className, ...props }, ref) => {\n  return (\n    <RadioGroupPrimitive.Item\n      ref={ref}\n      className={cn(\n        \"aspect-square h-4 w-4 rounded-full border border-primary text-primary shadow focus:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:cursor-not-allowed disabled:opacity-50\",\n        className\n      )}\n      {...props}\n    >\n      <RadioGroupPrimitive.Indicator className=\"flex items-center justify-center\">\n        <Circle className=\"h-3.5 w-3.5 fill-primary\" />\n      </RadioGroupPrimitive.Indicator>\n    </RadioGroupPrimitive.Item>\n  )\n})\nRadioGroupItem.displayName = RadioGroupPrimitive.Item.displayName\n\nexport { RadioGroup, RadioGroupItem }\n"
  },
  {
    "path": "Frontend/src/components/ui/scroll-area.tsx",
    "content": "import * as React from \"react\";\nimport * as ScrollAreaPrimitive from \"@radix-ui/react-scroll-area\";\n\nimport { cn } from \"@/lib/utils\";\n\nconst ScrollArea = React.forwardRef<\n  React.ElementRef<typeof ScrollAreaPrimitive.Root>,\n  React.ComponentPropsWithoutRef<typeof ScrollAreaPrimitive.Root>\n>(({ className, children, ...props }, ref) => (\n  <ScrollAreaPrimitive.Root\n    ref={ref}\n    className={cn(\"relative overflow-hidden\", className)}\n    {...props}\n  >\n    <ScrollAreaPrimitive.Viewport className=\"!h-full w-full rounded-[inherit]\">\n      {children}\n    </ScrollAreaPrimitive.Viewport>\n    <ScrollBar />\n    <ScrollAreaPrimitive.Corner />\n  </ScrollAreaPrimitive.Root>\n));\nScrollArea.displayName = ScrollAreaPrimitive.Root.displayName;\n\nconst ScrollBar = React.forwardRef<\n  React.ElementRef<typeof ScrollAreaPrimitive.ScrollAreaScrollbar>,\n  React.ComponentPropsWithoutRef<typeof ScrollAreaPrimitive.ScrollAreaScrollbar>\n>(({ className, orientation = \"vertical\", ...props }, ref) => (\n  <ScrollAreaPrimitive.ScrollAreaScrollbar\n    ref={ref}\n    orientation={orientation}\n    className={cn(\n      \"flex touch-none select-none transition-colors\",\n      orientation === \"vertical\" &&\n        \"h-full w-2.5 border-l border-l-transparent p-[1px]\",\n      orientation === \"horizontal\" &&\n        \"h-2.5 flex-col border-t border-t-transparent p-[1px]\",\n      className\n    )}\n    {...props}\n  >\n    <ScrollAreaPrimitive.ScrollAreaThumb className=\"relative flex-1 rounded-full bg-border\" />\n  </ScrollAreaPrimitive.ScrollAreaScrollbar>\n));\nScrollBar.displayName = ScrollAreaPrimitive.ScrollAreaScrollbar.displayName;\n\nexport { ScrollArea, ScrollBar };\n"
  },
  {
    "path": "Frontend/src/components/ui/select.tsx",
    "content": "import * as React from \"react\"\nimport * as SelectPrimitive from \"@radix-ui/react-select\"\nimport { Check, ChevronDown, ChevronUp } from \"lucide-react\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst Select = SelectPrimitive.Root\n\nconst SelectGroup = SelectPrimitive.Group\n\nconst SelectValue = SelectPrimitive.Value\n\nconst SelectTrigger = React.forwardRef<\n  React.ElementRef<typeof SelectPrimitive.Trigger>,\n  React.ComponentPropsWithoutRef<typeof SelectPrimitive.Trigger>\n>(({ className, children, ...props }, ref) => (\n  <SelectPrimitive.Trigger\n    ref={ref}\n    className={cn(\n      \"flex h-9 w-full items-center justify-between whitespace-nowrap rounded-[6px] border border-input bg-transparent px-3 py-2 text-sm shadow-sm ring-offset-background placeholder:text-muted-foreground focus:outline-none focus:ring-1 focus:ring-ring disabled:cursor-not-allowed disabled:opacity-50 [&>span]:line-clamp-1\",\n      className\n    )}\n    {...props}\n  >\n    {children}\n    <SelectPrimitive.Icon asChild>\n      <ChevronDown className=\"h-4 w-4 opacity-50\" />\n    </SelectPrimitive.Icon>\n  </SelectPrimitive.Trigger>\n))\nSelectTrigger.displayName = SelectPrimitive.Trigger.displayName\n\nconst SelectScrollUpButton = React.forwardRef<\n  React.ElementRef<typeof SelectPrimitive.ScrollUpButton>,\n  React.ComponentPropsWithoutRef<typeof SelectPrimitive.ScrollUpButton>\n>(({ className, ...props }, ref) => (\n  <SelectPrimitive.ScrollUpButton\n    ref={ref}\n    className={cn(\n      \"flex cursor-default items-center justify-center py-1\",\n      className\n    )}\n    {...props}\n  >\n    <ChevronUp className=\"h-4 w-4\" />\n  </SelectPrimitive.ScrollUpButton>\n))\nSelectScrollUpButton.displayName = SelectPrimitive.ScrollUpButton.displayName\n\nconst SelectScrollDownButton = React.forwardRef<\n  React.ElementRef<typeof SelectPrimitive.ScrollDownButton>,\n  React.ComponentPropsWithoutRef<typeof SelectPrimitive.ScrollDownButton>\n>(({ className, ...props }, ref) => (\n  <SelectPrimitive.ScrollDownButton\n    ref={ref}\n    className={cn(\n      \"flex cursor-default items-center justify-center py-1\",\n      className\n    )}\n    {...props}\n  >\n    <ChevronDown className=\"h-4 w-4\" />\n  </SelectPrimitive.ScrollDownButton>\n))\nSelectScrollDownButton.displayName =\n  SelectPrimitive.ScrollDownButton.displayName\n\nconst SelectContent = React.forwardRef<\n  React.ElementRef<typeof SelectPrimitive.Content>,\n  React.ComponentPropsWithoutRef<typeof SelectPrimitive.Content>\n>(({ className, children, position = \"popper\", ...props }, ref) => (\n  <SelectPrimitive.Portal>\n    <SelectPrimitive.Content\n      ref={ref}\n      className={cn(\n        \"relative z-50 max-h-96 min-w-[8rem] overflow-hidden rounded-[6px] border bg-popover text-popover-foreground shadow-md data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2\",\n        position === \"popper\" &&\n          \"data-[side=bottom]:translate-y-1 data-[side=left]:-translate-x-1 data-[side=right]:translate-x-1 data-[side=top]:-translate-y-1\",\n        className\n      )}\n      position={position}\n      {...props}\n    >\n      <SelectScrollUpButton />\n      <SelectPrimitive.Viewport\n        className={cn(\n          \"p-1\",\n          position === \"popper\" &&\n            \"h-[var(--radix-select-trigger-height)] w-full min-w-[var(--radix-select-trigger-width)]\"\n        )}\n      >\n        {children}\n      </SelectPrimitive.Viewport>\n      <SelectScrollDownButton />\n    </SelectPrimitive.Content>\n  </SelectPrimitive.Portal>\n))\nSelectContent.displayName = SelectPrimitive.Content.displayName\n\nconst SelectLabel = React.forwardRef<\n  React.ElementRef<typeof SelectPrimitive.Label>,\n  React.ComponentPropsWithoutRef<typeof SelectPrimitive.Label>\n>(({ className, ...props }, ref) => (\n  <SelectPrimitive.Label\n    ref={ref}\n    className={cn(\"px-2 py-1.5 text-sm font-semibold\", className)}\n    {...props}\n  />\n))\nSelectLabel.displayName = SelectPrimitive.Label.displayName\n\nconst SelectItem = React.forwardRef<\n  React.ElementRef<typeof SelectPrimitive.Item>,\n  React.ComponentPropsWithoutRef<typeof SelectPrimitive.Item>\n>(({ className, children, ...props }, ref) => (\n  <SelectPrimitive.Item\n    ref={ref}\n    className={cn(\n      \"relative flex w-full cursor-default select-none items-center rounded-[4px] py-1.5 pl-2 pr-8 text-sm outline-none focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50\",\n      className\n    )}\n    {...props}\n  >\n    <span className=\"absolute right-2 flex h-3.5 w-3.5 items-center justify-center\">\n      <SelectPrimitive.ItemIndicator>\n        <Check className=\"h-4 w-4\" />\n      </SelectPrimitive.ItemIndicator>\n    </span>\n    <SelectPrimitive.ItemText>{children}</SelectPrimitive.ItemText>\n  </SelectPrimitive.Item>\n))\nSelectItem.displayName = SelectPrimitive.Item.displayName\n\nconst SelectSeparator = React.forwardRef<\n  React.ElementRef<typeof SelectPrimitive.Separator>,\n  React.ComponentPropsWithoutRef<typeof SelectPrimitive.Separator>\n>(({ className, ...props }, ref) => (\n  <SelectPrimitive.Separator\n    ref={ref}\n    className={cn(\"-mx-1 my-1 h-px bg-muted\", className)}\n    {...props}\n  />\n))\nSelectSeparator.displayName = SelectPrimitive.Separator.displayName\n\nexport {\n  Select,\n  SelectGroup,\n  SelectValue,\n  SelectTrigger,\n  SelectContent,\n  SelectLabel,\n  SelectItem,\n  SelectSeparator,\n  SelectScrollUpButton,\n  SelectScrollDownButton,\n}\n"
  },
  {
    "path": "Frontend/src/components/ui/separator.tsx",
    "content": "import * as React from \"react\"\nimport * as SeparatorPrimitive from \"@radix-ui/react-separator\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst Separator = React.forwardRef<\n  React.ElementRef<typeof SeparatorPrimitive.Root>,\n  React.ComponentPropsWithoutRef<typeof SeparatorPrimitive.Root>\n>(\n  (\n    { className, orientation = \"horizontal\", decorative = true, ...props },\n    ref\n  ) => (\n    <SeparatorPrimitive.Root\n      ref={ref}\n      decorative={decorative}\n      orientation={orientation}\n      className={cn(\n        \"shrink-0 bg-border\",\n        orientation === \"horizontal\" ? \"h-[1px] w-full\" : \"h-full w-[1px]\",\n        className\n      )}\n      {...props}\n    />\n  )\n)\nSeparator.displayName = SeparatorPrimitive.Root.displayName\n\nexport { Separator }\n"
  },
  {
    "path": "Frontend/src/components/ui/sheet.tsx",
    "content": "import * as React from \"react\"\nimport * as SheetPrimitive from \"@radix-ui/react-dialog\"\nimport { cva, type VariantProps } from \"class-variance-authority\"\nimport { X } from \"lucide-react\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst Sheet = SheetPrimitive.Root\n\nconst SheetTrigger = SheetPrimitive.Trigger\n\nconst SheetClose = SheetPrimitive.Close\n\nconst SheetPortal = SheetPrimitive.Portal\n\nconst SheetOverlay = React.forwardRef<\n  React.ElementRef<typeof SheetPrimitive.Overlay>,\n  React.ComponentPropsWithoutRef<typeof SheetPrimitive.Overlay>\n>(({ className, ...props }, ref) => (\n  <SheetPrimitive.Overlay\n    className={cn(\n      \"fixed inset-0 z-50 bg-black/80  data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0\",\n      className\n    )}\n    {...props}\n    ref={ref}\n  />\n))\nSheetOverlay.displayName = SheetPrimitive.Overlay.displayName\n\nconst sheetVariants = cva(\n  \"fixed z-50 gap-4 bg-background p-6 shadow-lg transition ease-in-out data-[state=closed]:duration-300 data-[state=open]:duration-500 data-[state=open]:animate-in data-[state=closed]:animate-out\",\n  {\n    variants: {\n      side: {\n        top: \"inset-x-0 top-0 border-b data-[state=closed]:slide-out-to-top data-[state=open]:slide-in-from-top\",\n        bottom:\n          \"inset-x-0 bottom-0 border-t data-[state=closed]:slide-out-to-bottom data-[state=open]:slide-in-from-bottom\",\n        left: \"inset-y-0 left-0 h-full border-r data-[state=closed]:slide-out-to-left data-[state=open]:slide-in-from-left md:max-w-lg\",\n        right:\n          \"inset-y-0 right-0 h-full border-l data-[state=closed]:slide-out-to-right data-[state=open]:slide-in-from-right md:max-w-lg\",\n      },\n    },\n    defaultVariants: {\n      side: \"right\",\n    },\n  }\n)\n\ninterface SheetContentProps\n  extends React.ComponentPropsWithoutRef<typeof SheetPrimitive.Content>,\n    VariantProps<typeof sheetVariants> {}\n\nconst SheetContent = React.forwardRef<\n  React.ElementRef<typeof SheetPrimitive.Content>,\n  SheetContentProps\n>(({ side = \"right\", className, children, ...props }, ref) => (\n  <SheetPortal>\n    <SheetOverlay />\n    <SheetPrimitive.Content\n      ref={ref}\n      className={cn(sheetVariants({ side }), className)}\n      {...props}\n    >\n      <SheetPrimitive.Close className=\"absolute right-4 top-4 rounded-sm opacity-70 ring-offset-background transition-opacity hover:opacity-100 focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:pointer-events-none data-[state=open]:bg-secondary\">\n        <X className=\"h-4 w-4\" />\n        <span className=\"sr-only\">Close</span>\n      </SheetPrimitive.Close>\n      {children}\n    </SheetPrimitive.Content>\n  </SheetPortal>\n))\nSheetContent.displayName = SheetPrimitive.Content.displayName\n\nconst SheetHeader = ({\n  className,\n  ...props\n}: React.HTMLAttributes<HTMLDivElement>) => (\n  <div\n    className={cn(\n      \"flex flex-col space-y-2 text-center sm:text-left\",\n      className\n    )}\n    {...props}\n  />\n)\nSheetHeader.displayName = \"SheetHeader\"\n\nconst SheetFooter = ({\n  className,\n  ...props\n}: React.HTMLAttributes<HTMLDivElement>) => (\n  <div\n    className={cn(\n      \"flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2\",\n      className\n    )}\n    {...props}\n  />\n)\nSheetFooter.displayName = \"SheetFooter\"\n\nconst SheetTitle = React.forwardRef<\n  React.ElementRef<typeof SheetPrimitive.Title>,\n  React.ComponentPropsWithoutRef<typeof SheetPrimitive.Title>\n>(({ className, ...props }, ref) => (\n  <SheetPrimitive.Title\n    ref={ref}\n    className={cn(\"text-lg font-semibold text-foreground\", className)}\n    {...props}\n  />\n))\nSheetTitle.displayName = SheetPrimitive.Title.displayName\n\nconst SheetDescription = React.forwardRef<\n  React.ElementRef<typeof SheetPrimitive.Description>,\n  React.ComponentPropsWithoutRef<typeof SheetPrimitive.Description>\n>(({ className, ...props }, ref) => (\n  <SheetPrimitive.Description\n    ref={ref}\n    className={cn(\"text-sm text-muted-foreground\", className)}\n    {...props}\n  />\n))\nSheetDescription.displayName = SheetPrimitive.Description.displayName\n\nexport {\n  Sheet,\n  SheetPortal,\n  SheetOverlay,\n  SheetTrigger,\n  SheetClose,\n  SheetContent,\n  SheetHeader,\n  SheetFooter,\n  SheetTitle,\n  SheetDescription,\n}\n"
  },
  {
    "path": "Frontend/src/components/ui/slider.tsx",
    "content": "import * as React from \"react\"\nimport * as SliderPrimitive from \"@radix-ui/react-slider\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst Slider = React.forwardRef<\n  React.ElementRef<typeof SliderPrimitive.Root>,\n  React.ComponentPropsWithoutRef<typeof SliderPrimitive.Root>\n>(({ className, ...props }, ref) => (\n  <SliderPrimitive.Root\n    ref={ref}\n    className={cn(\n      \"relative flex w-full touch-none select-none items-center\",\n      className\n    )}\n    {...props}\n  >\n    <SliderPrimitive.Track className=\"relative h-1.5 w-full grow overflow-hidden rounded-full bg-primary/20\">\n      <SliderPrimitive.Range className=\"absolute h-full bg-primary\" />\n    </SliderPrimitive.Track>\n    <SliderPrimitive.Thumb className=\"block h-4 w-4 rounded-full border border-primary/50 bg-background shadow transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:pointer-events-none disabled:opacity-50\" />\n  </SliderPrimitive.Root>\n))\nSlider.displayName = SliderPrimitive.Root.displayName\n\nexport { Slider }\n"
  },
  {
    "path": "Frontend/src/components/ui/switch.tsx",
    "content": "import * as React from \"react\"\nimport * as SwitchPrimitives from \"@radix-ui/react-switch\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst Switch = React.forwardRef<\n  React.ElementRef<typeof SwitchPrimitives.Root>,\n  React.ComponentPropsWithoutRef<typeof SwitchPrimitives.Root>\n>(({ className, ...props }, ref) => (\n  <SwitchPrimitives.Root\n    className={cn(\n      \"peer inline-flex h-5 w-9 shrink-0 cursor-pointer items-center rounded-full border-2 border-transparent shadow-sm transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 focus-visible:ring-offset-background disabled:cursor-not-allowed disabled:opacity-50 data-[state=checked]:bg-primary data-[state=unchecked]:bg-input\",\n      className\n    )}\n    {...props}\n    ref={ref}\n  >\n    <SwitchPrimitives.Thumb\n      className={cn(\n        \"pointer-events-none block h-4 w-4 rounded-full bg-background shadow-lg ring-0 transition-transform data-[state=checked]:translate-x-4 data-[state=unchecked]:translate-x-0\"\n      )}\n    />\n  </SwitchPrimitives.Root>\n))\nSwitch.displayName = SwitchPrimitives.Root.displayName\n\nexport { Switch }\n"
  },
  {
    "path": "Frontend/src/components/ui/tabs.tsx",
    "content": "import * as React from \"react\";\nimport * as TabsPrimitive from \"@radix-ui/react-tabs\";\n\nimport { cn } from \"@/lib/utils\";\n\nconst Tabs = TabsPrimitive.Root;\n\nconst TabsList = React.forwardRef<\n  React.ElementRef<typeof TabsPrimitive.List>,\n  React.ComponentPropsWithoutRef<typeof TabsPrimitive.List>\n>(({ className, ...props }, ref) => (\n  <TabsPrimitive.List\n    ref={ref}\n    className={cn(\n      \"inline-flex h-9 items-center justify-center rounded-lg bg-muted p-1 text-muted-foreground\",\n      className\n    )}\n    {...props}\n  />\n));\nTabsList.displayName = TabsPrimitive.List.displayName;\n\nconst TabsTrigger = React.forwardRef<\n  React.ElementRef<typeof TabsPrimitive.Trigger>,\n  React.ComponentPropsWithoutRef<typeof TabsPrimitive.Trigger>\n>(({ className, ...props }, ref) => (\n  <TabsPrimitive.Trigger\n    ref={ref}\n    className={cn(\n      \"inline-flex items-center justify-center whitespace-nowrap rounded-[8px] px-3 py-1 text-sm font-medium ring-offset-background transition-all focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 data-[state=active]:bg-background data-[state=active]:text-foreground data-[state=active]:shadow\",\n      className\n    )}\n    {...props}\n  />\n));\nTabsTrigger.displayName = TabsPrimitive.Trigger.displayName;\n\nconst TabsContent = React.forwardRef<\n  React.ElementRef<typeof TabsPrimitive.Content>,\n  React.ComponentPropsWithoutRef<typeof TabsPrimitive.Content>\n>(({ className, ...props }, ref) => (\n  <TabsPrimitive.Content\n    ref={ref}\n    className={cn(\n      \"mt-2 ring-offset-background focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2\",\n      className\n    )}\n    {...props}\n  />\n));\nTabsContent.displayName = TabsPrimitive.Content.displayName;\n\nexport { Tabs, TabsList, TabsTrigger, TabsContent };\n"
  },
  {
    "path": "Frontend/src/components/ui/textarea.tsx",
    "content": "import * as React from \"react\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst Textarea = React.forwardRef<\n  HTMLTextAreaElement,\n  React.ComponentProps<\"textarea\">\n>(({ className, ...props }, ref) => {\n  return (\n    <textarea\n      className={cn(\n        \"flex min-h-[60px] w-full rounded-[6px] border border-input bg-transparent px-3 py-2 text-base shadow-sm placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:cursor-not-allowed disabled:opacity-50 md:text-sm\",\n        className\n      )}\n      ref={ref}\n      {...props}\n    />\n  )\n})\nTextarea.displayName = \"Textarea\"\n\nexport { Textarea }\n"
  },
  {
    "path": "Frontend/src/components/ui/toast.tsx",
    "content": "import * as React from \"react\";\nimport * as ToastPrimitives from \"@radix-ui/react-toast\";\nimport { cva, type VariantProps } from \"class-variance-authority\";\nimport { X } from \"lucide-react\";\n\nimport { cn } from \"@/lib/utils\";\n\nconst ToastProvider = ToastPrimitives.Provider;\n\nconst ToastViewport = React.forwardRef<\n  React.ElementRef<typeof ToastPrimitives.Viewport>,\n  React.ComponentPropsWithoutRef<typeof ToastPrimitives.Viewport>\n>(({ className, ...props }, ref) => (\n  <ToastPrimitives.Viewport\n    ref={ref}\n    className={cn(\n      \"fixed top-0 z-[100] flex max-h-screen w-full flex-col-reverse p-4 sm:bottom-0 sm:right-0 sm:top-auto sm:flex-col md:max-w-[420px]\",\n      className\n    )}\n    {...props}\n  />\n));\nToastViewport.displayName = ToastPrimitives.Viewport.displayName;\n\nconst toastVariants = cva(\n  \"group pointer-events-auto relative flex w-full items-center justify-between space-x-2 overflow-hidden rounded-[6px] border p-4 pr-6 shadow-lg transition-all data-[swipe=cancel]:translate-x-0 data-[swipe=end]:translate-x-[var(--radix-toast-swipe-end-x)] data-[swipe=move]:translate-x-[var(--radix-toast-swipe-move-x)] data-[swipe=move]:transition-none data-[state=open]:animate-in data-[state=closed]:animate-out data-[swipe=end]:animate-out data-[state=closed]:fade-out-80 data-[state=closed]:slide-out-to-right-full data-[state=open]:slide-in-from-top-full data-[state=open]:sm:slide-in-from-bottom-full\",\n  {\n    variants: {\n      variant: {\n        default: \"border bg-background text-foreground\",\n        destructive:\n          \"destructive group border-destructive bg-destructive text-destructive-foreground\",\n      },\n    },\n    defaultVariants: {\n      variant: \"default\",\n    },\n  }\n);\n\nconst Toast = React.forwardRef<\n  React.ElementRef<typeof ToastPrimitives.Root>,\n  React.ComponentPropsWithoutRef<typeof ToastPrimitives.Root> &\n    VariantProps<typeof toastVariants>\n>(({ className, variant, ...props }, ref) => {\n  return (\n    <ToastPrimitives.Root\n      ref={ref}\n      className={cn(toastVariants({ variant }), className)}\n      {...props}\n    />\n  );\n});\nToast.displayName = ToastPrimitives.Root.displayName;\n\nconst ToastAction = React.forwardRef<\n  React.ElementRef<typeof ToastPrimitives.Action>,\n  React.ComponentPropsWithoutRef<typeof ToastPrimitives.Action>\n>(({ className, ...props }, ref) => (\n  <ToastPrimitives.Action\n    ref={ref}\n    className={cn(\n      \"inline-flex h-8 shrink-0 items-center justify-center rounded-[6px] border bg-transparent px-3 text-sm font-medium transition-colors hover:bg-secondary focus:outline-none focus:ring-1 focus:ring-ring disabled:pointer-events-none disabled:opacity-50 group-[.destructive]:border-muted/40 group-[.destructive]:hover:border-destructive/30 group-[.destructive]:hover:bg-destructive group-[.destructive]:hover:text-destructive-foreground group-[.destructive]:focus:ring-destructive\",\n      className\n    )}\n    {...props}\n  />\n));\nToastAction.displayName = ToastPrimitives.Action.displayName;\n\nconst ToastClose = React.forwardRef<\n  React.ElementRef<typeof ToastPrimitives.Close>,\n  React.ComponentPropsWithoutRef<typeof ToastPrimitives.Close>\n>(({ className, ...props }, ref) => (\n  <ToastPrimitives.Close\n    ref={ref}\n    className={cn(\n      \"absolute right-1 top-1 rounded-[6px] p-1 text-foreground/50 opacity-0 transition-opacity hover:text-foreground focus:opacity-100 focus:outline-none focus:ring-1 group-hover:opacity-100 group-[.destructive]:text-red-300 group-[.destructive]:hover:text-red-50 group-[.destructive]:focus:ring-red-400 group-[.destructive]:focus:ring-offset-red-600\",\n      className\n    )}\n    toast-close=\"\"\n    {...props}\n  >\n    <X className=\"h-4 w-4\" />\n  </ToastPrimitives.Close>\n));\nToastClose.displayName = ToastPrimitives.Close.displayName;\n\nconst ToastTitle = React.forwardRef<\n  React.ElementRef<typeof ToastPrimitives.Title>,\n  React.ComponentPropsWithoutRef<typeof ToastPrimitives.Title>\n>(({ className, ...props }, ref) => (\n  <ToastPrimitives.Title\n    ref={ref}\n    className={cn(\"text-sm font-semibold [&+div]:text-xs\", className)}\n    {...props}\n  />\n));\nToastTitle.displayName = ToastPrimitives.Title.displayName;\n\nconst ToastDescription = React.forwardRef<\n  React.ElementRef<typeof ToastPrimitives.Description>,\n  React.ComponentPropsWithoutRef<typeof ToastPrimitives.Description>\n>(({ className, ...props }, ref) => (\n  <ToastPrimitives.Description\n    ref={ref}\n    className={cn(\"text-sm opacity-90\", className)}\n    {...props}\n  />\n));\nToastDescription.displayName = ToastPrimitives.Description.displayName;\n\ntype ToastProps = React.ComponentPropsWithoutRef<typeof Toast>;\n\ntype ToastActionElement = React.ReactElement<typeof ToastAction>;\n\nexport {\n  type ToastProps,\n  type ToastActionElement,\n  ToastProvider,\n  ToastViewport,\n  Toast,\n  ToastTitle,\n  ToastDescription,\n  ToastClose,\n  ToastAction,\n};\n"
  },
  {
    "path": "Frontend/src/components/ui/toaster.tsx",
    "content": "import { useToast } from \"@/hooks/use-toast\"\nimport {\n  Toast,\n  ToastClose,\n  ToastDescription,\n  ToastProvider,\n  ToastTitle,\n  ToastViewport,\n} from \"@/components/ui/toast\"\n\nexport function Toaster() {\n  const { toasts } = useToast()\n\n  return (\n    <ToastProvider>\n      {toasts.map(function ({ id, title, description, action, ...props }) {\n        return (\n          <Toast key={id} {...props}>\n            <div className=\"grid gap-1\">\n              {title && <ToastTitle>{title}</ToastTitle>}\n              {description && (\n                <ToastDescription>{description}</ToastDescription>\n              )}\n            </div>\n            {action}\n            <ToastClose />\n          </Toast>\n        )\n      })}\n      <ToastViewport />\n    </ToastProvider>\n  )\n}\n"
  },
  {
    "path": "Frontend/src/components/ui/tooltip.tsx",
    "content": "import * as React from \"react\"\nimport * as TooltipPrimitive from \"@radix-ui/react-tooltip\"\n\nimport { cn } from \"@/lib/utils\"\n\nconst TooltipProvider = TooltipPrimitive.Provider\n\nconst Tooltip = TooltipPrimitive.Root\n\nconst TooltipTrigger = TooltipPrimitive.Trigger\n\nconst TooltipContent = React.forwardRef<\n  React.ElementRef<typeof TooltipPrimitive.Content>,\n  React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content>\n>(({ className, sideOffset = 4, ...props }, ref) => (\n  <TooltipPrimitive.Portal>\n    <TooltipPrimitive.Content\n      ref={ref}\n      sideOffset={sideOffset}\n      className={cn(\n        \"z-50 overflow-hidden rounded-[6px] bg-primary px-3 py-1.5 text-xs text-primary-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2\",\n        className\n      )}\n      {...props}\n    />\n  </TooltipPrimitive.Portal>\n))\nTooltipContent.displayName = TooltipPrimitive.Content.displayName\n\nexport { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }\n"
  },
  {
    "path": "Frontend/src/context/ChatInputContext.tsx",
    "content": "import { createContext } from \"react\";\n\nexport interface ChatInputContextType {\n  input: string;\n  setInput: React.Dispatch<React.SetStateAction<string>>;\n  isLoading: boolean;\n  setIsLoading: React.Dispatch<React.SetStateAction<boolean>>;\n  handleChatRequest: (\n    collectionId: number | undefined,\n    suggestion?: string,\n    conversationId?: number\n  ) => Promise<void>;\n  cancelRequest: () => Promise<void>;\n}\n\nexport const ChatInputContext = createContext<ChatInputContextType | undefined>(\n  undefined\n);\n"
  },
  {
    "path": "Frontend/src/context/LibraryContext.tsx",
    "content": "import React, { createContext, useCallback, useEffect, useState } from \"react\";\nimport { toast } from \"@/hooks/use-toast\";\nimport { useUser } from \"./useUser\";\nimport { LibraryContextType } from \"@/types/contextTypes/LibraryContextTypes\";\n\nconst LibraryContext = createContext<LibraryContextType | undefined>(undefined);\n\nconst LibraryProvider: React.FC<{ children: React.ReactNode }> = ({\n  children,\n}) => {\n  const { activeUser } = useUser();\n  const [openLibrary, setOpenLibrary] = useState<boolean>(false);\n  const [openAddToCollection, setOpenAddToCollection] =\n    useState<boolean>(false);\n  const [selectedCollection, setSelectedCollection] =\n    useState<Collection | null>(null);\n  const [ingesting, setIngesting] = useState<boolean>(false);\n  const [userCollections, setUserCollections] = useState<Collection[]>([]);\n  const [files, setFiles] = useState<string[]>([]);\n  const [progressMessage, setProgressMessage] = useState<string>(\"\");\n  const [progress, setProgress] = useState<number>(0);\n  const [showProgress, setShowProgress] = useState<boolean>(false);\n  const [showUpload, setShowUpload] = useState<boolean>(false);\n  const [fileExpanded, setFileExpanded] = useState(false);\n  const [link, setLink] = useState(\"\");\n  const [showAddStore, setShowAddStore] = useState<boolean>(false);\n  const [selectedFile, setSelectedFile] = useState<File | null>(null);\n  const [selectedLinkType, setSelectedLinkType] = useState<\n    \"website\" | \"youtube\" | \"crawl\" | \"documentation\" | null\n  >(null);\n  const [embeddingModels, setEmbeddingModels] = useState<Model[]>([]);\n  const loadFiles = useCallback(async () => {\n    if (!activeUser?.id || !activeUser?.name || !selectedCollection?.id) return;\n    const fileList = await window.electron.getFilesInCollection(\n      activeUser.id,\n      selectedCollection.id\n    );\n    setFiles(fileList.files as unknown as string[]);\n  }, [activeUser?.id, selectedCollection?.id, activeUser?.name, setFiles]);\n  const handleCancelEmbed = async () => {\n    try {\n      if (!activeUser?.id) return;\n      await window.electron.cancelEmbed({ userId: activeUser.id });\n      setProgressMessage(\"Embedding cancelled\");\n      setProgress(0);\n      setShowUpload(false);\n    } catch (error) {\n      console.error(\"Error cancelling embed:\", error);\n    }\n  };\n\n  const handleDeleteCollection = () => {\n    if (!activeUser?.id || !selectedCollection?.id || !setShowUpload) return;\n    setShowUpload(false);\n    window.electron.deleteCollection(\n      selectedCollection.id,\n      selectedCollection.name,\n      activeUser.id\n    );\n    setUserCollections(\n      [...userCollections].filter((c) => c.id !== selectedCollection.id)\n    );\n    setSelectedCollection(null);\n  };\n  const handleProgressData = (data: ProgressData) => {\n    setShowProgress(true);\n    if (\"type\" in data) {\n      switch (data.type) {\n        case \"start\":\n          setProgressMessage(data.message || \"Starting web crawl...\");\n          setProgress(0);\n          break;\n        case \"progress\":\n          if (data.current && data.total) {\n            const percentage = Math.floor((data.current / data.total) * 100);\n            setProgress(percentage);\n            setProgressMessage(\n              `Processing URL ${data.current} of ${data.total}`\n            );\n          }\n          break;\n        case \"processing\":\n          if (data.url) {\n            setProgressMessage(`Processing: ${data.url}`);\n          }\n          break;\n        case \"saved\":\n          if (data.url) {\n            setProgressMessage(`Saved: ${data.url}`);\n          }\n          break;\n        case \"links\":\n          if (data.count && data.url) {\n            setProgressMessage(\n              `Found ${data.count} new links from: ${data.url}`\n            );\n          }\n          break;\n        case \"embedding_start\":\n          setProgressMessage(data.message || \"Starting embedding process...\");\n          break;\n        case \"embedding_progress\":\n          if (data.current_batch && data.total_batches) {\n            const percentage = Math.floor(\n              (data.current_batch / data.total_batches) * 100\n            );\n            setProgress(percentage);\n            setProgressMessage(\n              `Processing batch ${data.current_batch}/${data.total_batches}`\n            );\n          }\n          break;\n        case \"error\":\n          setProgressMessage(`Error: ${data.message || \"Unknown error\"}`);\n          setProgress(0);\n          break;\n        case \"complete\":\n          setProgress(100);\n          setProgressMessage(data.message || \"Web crawl completed!\");\n          break;\n      }\n    } else if (\"status\" in data) {\n      if (data.status === \"progress\" && data.data) {\n        const { message, chunk, total_chunks, percent_complete } = data.data;\n        if (message) setProgressMessage(message);\n        if (chunk && total_chunks) {\n          const percentage = Math.floor((chunk / total_chunks) * 100);\n          setProgress(percentage);\n        } else if (percent_complete) {\n          const percentage = parseFloat(percent_complete.replace(\"%\", \"\"));\n          setProgress(percentage);\n        }\n      } else if (data.status === \"error\" && data.data?.message) {\n        setProgressMessage(`Error: ${data.data.message}`);\n        setProgress(0);\n      }\n    }\n  };\n  const fetchCollections = async () => {\n    if (activeUser) {\n      const fetchedCollections = await window.electron.getUserCollections(\n        activeUser.id\n      );\n      setUserCollections(fetchedCollections.collections as Collection[]);\n    }\n  };\n\n  const fetchFilesInCollection = useCallback(async () => {\n    if (activeUser && selectedCollection) {\n      const files = await window.electron.getFilesInCollection(\n        activeUser.id,\n        selectedCollection.id\n      );\n      setFiles(files.files);\n    }\n  }, [activeUser, selectedCollection]);\n\n  const handleUpload = useCallback(async (base64Content: string) => {\n    if (!activeUser?.id || !selectedCollection?.id || !selectedFile) return;\n\n    try {\n      setIngesting(true);\n      setShowProgress(true);\n\n      const result = await window.electron.addFileToCollection(\n        activeUser.id,\n        activeUser.name,\n        selectedCollection.id,\n        selectedCollection.name,\n        selectedFile.name,\n        base64Content\n      );\n\n      if (result.result.success) {\n        setSelectedFile(null);\n        setProgressMessage(\"\");\n        setProgress(0);\n        setShowProgress(false);\n        loadFiles();\n        setIngesting(false);\n      } else {\n        toast({\n          title: \"Error\",\n          description: \"Check your OPENAI API keys and try again\",\n          variant: \"destructive\",\n        });\n      }\n    } catch (error) {\n      setShowProgress(false);\n      setProgressMessage(\"\");\n      setIngesting(false);\n      toast({\n        title: \"Error\",\n        description: error instanceof Error ? error.message : \"Unknown error\",\n        variant: \"destructive\",\n      });\n    }\n  }, [\n    activeUser?.id,\n    activeUser?.name,\n    selectedCollection?.id,\n    selectedCollection?.name,\n    selectedFile,\n    loadFiles,\n    setProgressMessage,\n    setShowProgress,\n    setProgress,\n    setIngesting,\n    setSelectedFile,\n  ]);\n\n  useEffect(() => {\n    fetchFilesInCollection();\n  }, [fetchFilesInCollection]);\n\n  useEffect(() => {\n    const updateState = (msg: string, prog: number) => {\n      setProgressMessage(msg);\n      setProgress(prog);\n    };\n\n    const handleProgress = (\n      _: Electron.IpcRendererEvent,\n      message:\n        | string\n        | ProgressData\n        | OllamaProgressEvent\n        | DownloadModelProgress\n    ) => {\n      try {\n        const data =\n          typeof message === \"string\" ? JSON.parse(message) : message;\n\n        setShowProgress(true);\n\n        if (typeof data === \"string\") {\n          updateState(data, 0);\n          return;\n        }\n\n        if (\n          \"type\" in data &&\n          (data.type === \"pull\" || data.type === \"verify\")\n        ) {\n          // Handle Ollama progress\n          setProgressMessage(`Ollama: ${data.status || data.type}`);\n          return;\n        }\n\n        if (\"total\" in data && \"received\" in data) {\n          // Handle DownloadModelProgress\n          const percentage = Math.floor((data.received / data.total) * 100);\n          setProgressMessage(`Downloading model: ${percentage}%`);\n          setProgress(percentage);\n          return;\n        }\n\n        handleProgressData(data as ProgressData);\n      } catch (error) {\n        console.error(\"Error handling progress:\", error);\n        if (typeof message === \"string\") {\n          updateState(message, 0);\n        }\n      }\n    };\n\n    window.electron.on(\"ingest-progress\", handleProgress);\n    return () => {\n      window.electron.removeListener(\"ingest-progress\", handleProgress);\n    };\n  }, [setProgressMessage, setProgress, setShowProgress]);\n\n  return (\n    <LibraryContext.Provider\n      value={{\n        files,\n        setFiles,\n        loadFiles,\n        handleCancelEmbed,\n        handleProgressData,\n        showProgress,\n        showUpload,\n        progressMessage,\n        progress,\n        openLibrary,\n        setOpenLibrary,\n        openAddToCollection,\n        setOpenAddToCollection,\n        fetchCollections,\n        ingesting,\n        setIngesting,\n        userCollections,\n        setUserCollections,\n        selectedCollection,\n        setSelectedCollection,\n        fileExpanded,\n        setFileExpanded,\n        link,\n        setLink,\n        selectedFile,\n        setSelectedFile,\n        selectedLinkType,\n        setSelectedLinkType,\n        showAddStore,\n        setShowAddStore,\n        setShowUpload,\n        setProgressMessage,\n        setProgress,\n        setEmbeddingModels,\n        setShowProgress,\n        handleUpload,\n        handleDeleteCollection,\n        embeddingModels,\n      }}\n    >\n      {children}\n    </LibraryContext.Provider>\n  );\n};\n\nexport { LibraryProvider, LibraryContext };\n"
  },
  {
    "path": "Frontend/src/context/SysSettingsContext.tsx",
    "content": "import React, { createContext, useRef, useState } from \"react\";\nimport { toast } from \"@/hooks/use-toast\";\nimport { SysSettingsContextType } from \"@/types/contextTypes/SystemSettingsTypes\";\nimport { SystemSpecs } from \"@/data/sysSpecs\";\n\nconst SysSettingsContext = createContext<SysSettingsContextType | undefined>(\n  undefined\n);\n\nconst SysSettingsProvider: React.FC<{ children: React.ReactNode }> = ({\n  children,\n}) => {\n  const [localModel, setLocalModel] = useState<string>(\"\");\n  const [selectedProvider, setSelectedProvider] = useState<string>(\"\");\n  const [selectedModel, setSelectedModel] = useState<Model | null>(null);\n  const [localModelDir, setLocalModelDir] = useState<string>(\"\");\n  const [localModels, setLocalModels] = useState<Model[]>([]);\n  const [isMaximized, setIsMaximized] = useState<boolean>(false);\n  const [isOllamaRunning, setIsOllamaRunning] = useState<boolean>(false);\n  const [ollamaModels, setOllamaModels] = useState<OllamaModel[]>([]);\n  const [settingsOpen, setSettingsOpen] = useState<boolean>(false);\n  const [settings, setSettings] = useState<UserSettings>({});\n  const [users, setUsers] = useState<User[]>([]);\n  const [ollamaInit, setOllamaInit] = useState<boolean>(false);\n  const progressRef = useRef<HTMLDivElement>(null);\n  const [isRunningModel, setIsRunningModel] = useState(false);\n  const [isFFMPEGInstalled, setisFFMPEGInstalled] = useState(false);\n  const [localModalLoading, setLocalModalLoading] = useState(false);\n  const [progressLocalOutput, setProgressLocalOutput] = useState<string[]>([]);\n  const [sourceType, setSourceType] = useState<\"local\" | \"external\">(\n    \"external\"\n  );\n  const [platform, setPlatform] = useState<\"win32\" | \"darwin\" | \"linux\" | null>(\n    null\n  );\n\n  const [systemSpecs, setSystemSpecs] = useState<SystemSpecs>({\n    cpu: \"Unknown\",\n    vram: \"Unknown\",\n    GPU_Manufacturer: \"Unknown\",\n  });\n\n  const totalVRAM = parseInt(systemSpecs.vram);\n  const [maxTokens, setMaxTokens] = useState(4096);\n\n  const checkFFMPEG = async () => {\n    try {\n      const result = await window.electron.checkIfFFMPEGInstalled();\n      if (result && typeof result.success === \"boolean\") {\n        setisFFMPEGInstalled(result.success);\n      } else {\n        console.error(\"Invalid FFMPEG check result:\", result);\n        setisFFMPEGInstalled(false);\n      }\n    } catch (error) {\n      console.error(\"Error checking FFMPEG:\", error);\n      setisFFMPEGInstalled(false);\n    }\n  };\n\n  const handleOllamaIntegration = async (activeUser: User) => {\n    const startUpOllama = await window.electron.checkOllama();\n    if (activeUser && startUpOllama) {\n      const models = await window.electron.fetchOllamaModels();\n      const filteredModels = (models.models as unknown as string[])\n        .filter((model) => !model.includes(\"granite\"))\n        .map((model) => ({ name: model, type: \"ollama\" }));\n      await window.electron.updateUserSettings({\n        userId: activeUser.id,\n        ollamaIntegration: 1,\n      });\n      setOllamaInit(true);\n      setOllamaModels(filteredModels);\n    }\n  };\n  const handleRunModel = async (\n    model_name: string,\n    model_location: string,\n    model_type: string,\n    user_id: string\n  ) => {\n    setLocalModalLoading(true);\n    const result = (await window.electron.loadModel({\n      model_location: model_location,\n      model_name: model_name,\n      model_type: model_type,\n      user_id: Number(user_id),\n    })) as unknown as { status: string };\n    setSettings((prev) => ({\n      ...prev,\n      model: model_name,\n      provider: \"local\",\n    }));\n    await window.electron.updateUserSettings({\n      userId: Number(user_id),\n      model: model_name,\n      provider: \"local\",\n      modelType: model_type,\n      modelLocation: model_location,\n    });\n\n    if (result.status === \"success\") {\n      toast({\n        title: \"Model loaded\",\n        description: `Loaded ${model_name}`,\n      });\n      setLocalModalLoading(false);\n    } else {\n      toast({\n        title: \"Error\",\n        description: \"Failed to load model\",\n        variant: \"destructive\",\n      });\n      setLocalModalLoading(false);\n    }\n  };\n\n  const checkOllama = async () => {\n    const { isOllamaRunning } = await window.electron.checkOllama();\n    setIsOllamaRunning(isOllamaRunning);\n    if (isOllamaRunning) {\n      fetchLocalModels();\n    }\n  };\n\n  const fetchLocalModels = async () => {\n    try {\n      const data = (await window.electron.getDirModels(\n        localModelDir\n      )) as unknown as {\n        dirPath: string;\n        models: Model[];\n      };\n\n      setLocalModels(\n        Array.isArray(data.models)\n          ? data.models.map((model: string | Model) => ({\n              name: typeof model === \"string\" ? model : model.name || \"\",\n              type: typeof model === \"string\" ? \"\" : model.type || \"\",\n              model_location:\n                typeof model === \"string\" ? \"\" : model.model_location || \"\",\n              modified_at:\n                typeof model === \"string\" ? \"\" : model.modified_at || \"\",\n              size: typeof model === \"string\" ? 0 : model.size || 0,\n              digest: typeof model === \"string\" ? \"\" : model.digest || \"\",\n            }))\n          : []\n      );\n    } catch (error) {\n      console.error(\"Error fetching local models:\", error);\n    }\n  };\n\n  const handleRunOllama = async (model: string, activeUser: User) => {\n    if (!model) {\n      toast({\n        title: \"Error\",\n        description: \"Please select a model first\",\n        variant: \"destructive\",\n      });\n      return;\n    }\n    setLocalModalLoading(true);\n    setProgressLocalOutput([]);\n    await window.electron.updateUserSettings({\n      userId: activeUser.id,\n      ollamaModel: model,\n    });\n    try {\n      const result = await window.electron.runOllama(model, activeUser);\n\n      if (!result.success && result.error) {\n        toast({\n          title: \"Error\",\n          description: result.error,\n          variant: \"destructive\",\n        });\n        return;\n      }\n\n      toast({\n        title: \"Success\",\n        description: `Started Ollama with model: ${model}`,\n      });\n      await window.electron.updateUserSettings({\n        userId: activeUser.id,\n        provider: \"Ollama\",\n        model: model,\n      });\n      setSettings((prev) => ({\n        ...prev,\n        provider: \"Ollama\",\n        model: model,\n      }));\n    } catch (error) {\n      toast({\n        title: \"Error\",\n        description:\n          error instanceof Error ? error.message : \"Failed to start Ollama\",\n        variant: \"destructive\",\n      });\n    } finally {\n      setLocalModalLoading(false);\n    }\n  };\n  const fetchSettings = async (activeUser: User) => {\n    if (activeUser) {\n      const settings = await window.electron.getUserSettings(activeUser.id);\n      if (parseInt(settings?.ollamaIntegration?.toString() ?? \"0\") === 1) {\n        handleOllamaIntegration(activeUser);\n      }\n      setSettings(settings);\n      if (settings.modelDirectory) {\n        setLocalModelDir(settings.modelDirectory);\n        const models = (await window.electron.getDirModels(\n          settings.modelDirectory\n        )) as unknown as { dirPath: string; models: Model[] };\n        setLocalModels(models.models);\n        if (\n          settings.provider === \"local\" &&\n          settings.model &&\n          settings.modelType\n        ) {\n          await window.electron.loadModel({\n            model_location: settings.modelLocation as string,\n            model_name: settings.model,\n            model_type: settings.modelType as string,\n            user_id: activeUser.id,\n          });\n        }\n      }\n    }\n  };\n\n  const loadModelsFromDirectory = async (dirPath: string) => {\n    try {\n      const models = await window.electron.getDirModels(dirPath);\n      if (!Array.isArray(models)) {\n        throw new Error(\"Invalid response from getDirModels - expected array\");\n      }\n\n      // Convert the models array to the OllamaModel format\n      const formattedModels: Model[] = models.map((modelName: string) => ({\n        name: modelName,\n        type: \"\",\n        model_location: \"\",\n        modified_at: \"\", // These fields might not be available for local models\n        size: 0,\n        digest: \"\",\n      }));\n\n      setLocalModels(formattedModels);\n      setLocalModelDir(dirPath);\n\n      toast({\n        title: \"Models Loaded\",\n        description: `Found ${formattedModels.length} models in directory`,\n      });\n    } catch (error) {\n      console.error(\"Error loading models from directory:\", error);\n      toast({\n        title: \"Error\",\n        description: \"Failed to load models from directory\",\n        variant: \"destructive\",\n      });\n    }\n  };\n\n  return (\n    <SysSettingsContext.Provider\n      value={{\n        isOllamaRunning,\n        setIsOllamaRunning,\n        systemSpecs,\n        setSystemSpecs,\n        totalVRAM,\n        settingsOpen,\n        setSettingsOpen,\n        settings,\n        setSettings,\n        platform,\n        setPlatform,\n        sourceType,\n        setSourceType,\n        users,\n        setUsers,\n        localModels,\n        setLocalModels,\n        isRunningModel,\n        setIsRunningModel,\n        isFFMPEGInstalled,\n        setisFFMPEGInstalled,\n        localModalLoading,\n        setLocalModalLoading,\n        progressRef,\n        progressLocalOutput,\n        setProgressLocalOutput,\n        handleRunOllama,\n        isMaximized,\n        setIsMaximized,\n        checkFFMPEG,\n        fetchLocalModels,\n        checkOllama,\n        maxTokens,\n        setMaxTokens,\n        localModelDir,\n        setLocalModelDir,\n        loadModelsFromDirectory,\n        handleRunModel,\n        ollamaModels,\n        setOllamaModels,\n        selectedModel,\n        setSelectedModel,\n        selectedProvider,\n        setSelectedProvider,\n        localModel,\n        setLocalModel,\n        fetchSettings,\n        handleOllamaIntegration,\n        ollamaInit,\n        setOllamaInit,\n      }}\n    >\n      {children}\n    </SysSettingsContext.Provider>\n  );\n};\n\nexport { SysSettingsProvider, SysSettingsContext };\n"
  },
  {
    "path": "Frontend/src/context/UserClientProviders.tsx",
    "content": "import { UserProvider } from \"./UserContext\";\nimport { SysSettingsProvider } from \"./SysSettingsContext\";\nimport { ViewProvider } from \"./ViewContext\";\nimport { LibraryProvider } from \"./LibraryContext\";\n\nexport default function UserClientProviders({\n  children,\n}: {\n  children: React.ReactNode;\n}) {\n  return (\n    <UserProvider>\n      <LibraryProvider>\n        <SysSettingsProvider>\n          <ViewProvider>{children}</ViewProvider>\n        </SysSettingsProvider>\n      </LibraryProvider>\n    </UserProvider>\n  );\n}\n"
  },
  {
    "path": "Frontend/src/context/UserContext.tsx",
    "content": "import React, { createContext, useMemo } from \"react\";\nimport { ChatInputContext, ChatInputContextType } from \"./ChatInputContext\";\nimport { useChatManagement } from \"@/hooks/useChatManagement\";\nimport { useConversationManagement } from \"@/hooks/useConversationManagement\";\nimport { useModelManagement } from \"@/hooks/useModelManagement\";\nimport { useUIState } from \"@/hooks/useUIState\";\nimport { useState, useCallback } from \"react\";\nimport { UserContextType } from \"@/types/contextTypes/UserContextType\";\n\nconst UserContext = createContext<UserContextType | undefined>(undefined);\n\nconst UserProvider: React.FC<{ children: React.ReactNode }> = ({\n  children,\n}) => {\n  const [activeUser, setActiveUser] = useState<User | null>(null);\n  const [apiKeys, setApiKeys] = useState<ApiKey[]>([]);\n  const [apiKeyInput, setApiKeyInput] = useState<string>(\"\");\n  const [filteredConversations, setFilteredConversations] = useState<\n    Conversation[]\n  >([]);\n  const [prompts, setPrompts] = useState<UserPrompts[]>([]);\n  const [devAPIKeys, setDevAPIKeys] = useState<Keys[]>([]);\n\n  // Initialize conversation management first\n  const {\n    conversations,\n    activeConversation,\n    title,\n    newConversation,\n    getUserConversations,\n    setActiveConversation,\n    setTitle,\n    setNewConversation,\n    setConversations,\n  } = useConversationManagement(activeUser);\n\n  // Then initialize chat management with the getUserConversations function\n  const {\n    messages,\n    streamingMessage,\n    streamingMessageReasoning,\n    isLoading,\n    error,\n    handleChatRequest: baseChatRequest,\n    cancelRequest,\n    setMessages,\n    setStreamingMessage,\n    setStreamingMessageReasoning,\n    setError,\n    currentRequestId,\n    setCurrentRequestId,\n    setIsLoading,\n    input,\n    setInput,\n    agentActions,\n    setAgentActions,\n  } = useChatManagement(activeUser, getUserConversations);\n\n  const {\n    openRouterModels,\n    azureModels,\n    customModels,\n    fetchOpenRouterModels,\n    fetchAzureModels,\n    fetchCustomModels,\n    setOpenRouterModels,\n    setAzureModels,\n    setCustomModels,\n    tools,\n    setTools,\n    dockTool,\n    fetchTools,\n    systemTools,\n    setSystemTools,\n    fetchSystemTools,\n    userTools,\n    setUserTools,\n    toggleTool,\n    externalOllama,\n    setExternalOllama,\n    fetchExternalOllama,\n  } = useModelManagement(activeUser);\n\n  const {\n    isSearchOpen,\n    setIsSearchOpen,\n    searchTerm,\n    setSearchTerm,\n    searchRef,\n    alertForUser,\n    setAlertForUser,\n  } = useUIState();\n\n  const fetchDevAPIKeys = useCallback(async () => {\n    if (activeUser) {\n      const keys = await window.electron.getDevAPIKeys(activeUser.id);\n      setDevAPIKeys(keys.keys);\n    }\n  }, [activeUser]);\n\n  const fetchApiKey = useCallback(async () => {\n    if (activeUser) {\n      const apiKeys = await window.electron.getUserApiKeys(activeUser.id);\n      const settings = await window.electron.getUserSettings(activeUser.id);\n      if (apiKeys.apiKeys.length === 0 && settings.provider !== \"local\") {\n        setAlertForUser(true);\n        return;\n      }\n      setApiKeys(apiKeys.apiKeys as ApiKey[]);\n    }\n  }, [activeUser, setAlertForUser]);\n\n  const fetchPrompts = useCallback(async () => {\n    if (activeUser) {\n      const fetchedPrompts = await window.electron.getUserPrompts(\n        activeUser.id\n      );\n      setPrompts(fetchedPrompts.prompts as UserPrompts[]);\n    }\n  }, [activeUser]);\n\n  const fetchMessages = useCallback(async () => {\n    if (activeConversation) {\n      const conversation = conversations.find(\n        (conv: Conversation) => conv.id === activeConversation\n      );\n      if (conversation && activeUser) {\n        const newMessages =\n          await window.electron.getConversationMessagesWithData(\n            activeUser.id,\n            conversation.id\n          );\n        setMessages(newMessages.messages);\n      }\n    }\n  }, [activeConversation, conversations, activeUser, setMessages]);\n\n  const handleResetChat = useCallback(async () => {\n    await cancelRequest();\n    setMessages([]);\n    setStreamingMessage(\"\");\n    setStreamingMessageReasoning(\"\");\n    setIsLoading(false);\n    setActiveConversation(null);\n  }, [\n    cancelRequest,\n    setMessages,\n    setStreamingMessage,\n    setStreamingMessageReasoning,\n    setActiveConversation,\n    setIsLoading,\n  ]);\n\n  // Memoize chat input related values\n  const chatInputValue = useMemo<ChatInputContextType>(\n    () => ({\n      input,\n      setInput,\n      isLoading,\n      setIsLoading,\n      handleChatRequest: baseChatRequest,\n      cancelRequest,\n    }),\n    [input, setInput, isLoading, setIsLoading, baseChatRequest, cancelRequest]\n  );\n\n  // Memoize the main context value\n  const contextValue = useMemo<UserContextType>(\n    () => ({\n      activeUser,\n      setActiveUser,\n      apiKeys,\n      setApiKeys,\n      activeConversation,\n      setActiveConversation,\n      conversations,\n      setConversations,\n      prompts,\n      setPrompts,\n      filteredConversations,\n      setFilteredConversations,\n      isSearchOpen,\n      setIsSearchOpen,\n      searchTerm,\n      setSearchTerm,\n      searchRef,\n      messages,\n      setMessages,\n      newConversation,\n      setNewConversation,\n      title,\n      setTitle,\n      streamingMessage,\n      setStreamingMessage,\n      handleResetChat,\n      devAPIKeys,\n      setDevAPIKeys,\n      fetchDevAPIKeys,\n      getUserConversations,\n      alertForUser,\n      setAlertForUser,\n      fetchApiKey,\n      fetchPrompts,\n      fetchMessages,\n      error,\n      setError,\n      currentRequestId,\n      setCurrentRequestId,\n      openRouterModels,\n      setOpenRouterModels,\n      apiKeyInput,\n      setApiKeyInput,\n      azureModels,\n      setAzureModels,\n      customModels,\n      setCustomModels,\n      fetchOpenRouterModels,\n      fetchAzureModels,\n      fetchCustomModels,\n      streamingMessageReasoning,\n      setStreamingMessageReasoning,\n      agentActions,\n      setAgentActions,\n      tools,\n      setTools,\n      dockTool,\n      fetchTools,\n      systemTools,\n      setSystemTools,\n      fetchSystemTools,\n      userTools,\n      setUserTools,\n      toggleTool,\n      externalOllama,\n      setExternalOllama,\n      fetchExternalOllama,\n    }),\n    [\n      activeUser,\n      apiKeys,\n      activeConversation,\n      conversations,\n      prompts,\n      filteredConversations,\n      isSearchOpen,\n      searchTerm,\n      searchRef,\n      messages,\n      newConversation,\n      title,\n      streamingMessage,\n      handleResetChat,\n      devAPIKeys,\n      fetchDevAPIKeys,\n      getUserConversations,\n      alertForUser,\n      fetchApiKey,\n      fetchPrompts,\n      fetchMessages,\n      error,\n      currentRequestId,\n      openRouterModels,\n      apiKeyInput,\n      azureModels,\n      customModels,\n      fetchOpenRouterModels,\n      fetchAzureModels,\n      fetchCustomModels,\n      streamingMessageReasoning,\n      setActiveConversation,\n      setAlertForUser,\n      setCurrentRequestId,\n      setError,\n      setIsSearchOpen,\n      setMessages,\n      setNewConversation,\n      setSearchTerm,\n      setStreamingMessage,\n      setStreamingMessageReasoning,\n      setTitle,\n      setConversations,\n      setOpenRouterModels,\n      setAzureModels,\n      setCustomModels,\n      setAgentActions,\n      agentActions,\n      tools,\n      setTools,\n      dockTool,\n      fetchTools,\n      systemTools,\n      setSystemTools,\n      fetchSystemTools,\n      userTools,\n      setUserTools,\n      toggleTool,\n      externalOllama,\n      setExternalOllama,\n      fetchExternalOllama,\n    ]\n  );\n\n  return (\n    <UserContext.Provider value={contextValue}>\n      <ChatInputContext.Provider value={chatInputValue}>\n        {children}\n      </ChatInputContext.Provider>\n    </UserContext.Provider>\n  );\n};\n\nexport { UserProvider, UserContext };\n"
  },
  {
    "path": "Frontend/src/context/ViewContext.tsx",
    "content": "import React, { createContext, useState } from \"react\";\nimport { UserViewContextType } from \"@/types/contextTypes/UserViewTypes\";\n\nconst ViewContext = createContext<UserViewContextType | undefined>(undefined);\n\nconst ViewProvider: React.FC<{ children: React.ReactNode }> = ({\n  children,\n}) => {\n  const [activeView, setActiveView] = useState<View>(\"Chat\");\n\n  return (\n    <ViewContext.Provider value={{ activeView, setActiveView }}>\n      {children}\n    </ViewContext.Provider>\n  );\n};\n\nexport { ViewProvider, ViewContext };\n"
  },
  {
    "path": "Frontend/src/context/useChatInput.tsx",
    "content": "import { useContext } from \"react\";\nimport { ChatInputContext } from \"./ChatInputContext\";\n\nexport const useChatInput = () => {\n  const context = useContext(ChatInputContext);\n  if (context === undefined) {\n    throw new Error(\"useChatInput must be used within a UserProvider\");\n  }\n  return context;\n}; "
  },
  {
    "path": "Frontend/src/context/useLibrary.tsx",
    "content": "import { useContext } from \"react\";\nimport { LibraryContext } from \"./LibraryContext\";\n\nexport const useLibrary = () => {\n  const context = useContext(LibraryContext);\n  if (context === undefined) {\n    throw new Error(\"useLibrary must be used within a LibraryProvider\");\n  }\n  return context;\n};\n"
  },
  {
    "path": "Frontend/src/context/useSysSettings.tsx",
    "content": "import { useContext } from \"react\";\nimport { SysSettingsContext } from \"./SysSettingsContext\";\n\nexport const useSysSettings = () => {\n  const context = useContext(SysSettingsContext);\n  if (context === undefined) {\n    throw new Error(\"useSysSettings must be used within a SysSettingsProvider\");\n  }\n  return context;\n};\n"
  },
  {
    "path": "Frontend/src/context/useUser.tsx",
    "content": "import { useContext } from \"react\";\nimport { UserContext } from \"./UserContext\";\n\nexport const useUser = () => {\n  const context = useContext(UserContext);\n  if (context === undefined) {\n    throw new Error(\"useUser must be used within a UserProvider\");\n  }\n  return context;\n};\n"
  },
  {
    "path": "Frontend/src/context/useView.tsx",
    "content": "import { useContext } from \"react\";\nimport { ViewContext } from \"./ViewContext\";\n\nexport const useView = () => {\n  const context = useContext(ViewContext);\n  if (context === undefined) {\n    throw new Error(\"useView must be used within a ViewProvider\");\n  }\n  return context;\n};\n"
  },
  {
    "path": "Frontend/src/data/models.ts",
    "content": "import React from \"react\";\n\nexport const fetchEmbeddingModels = async (\n  setEmbeddingModels: React.Dispatch<React.SetStateAction<Model[]>>\n) => {\n  try {\n    const result = await window.electron.getEmbeddingsModels();\n    if (result && result.models) {\n      setEmbeddingModels(result.models);\n    }\n  } catch (error) {\n    console.error(\"Error fetching embedding models:\", error);\n    setEmbeddingModels([]);\n  }\n};\n"
  },
  {
    "path": "Frontend/src/data/sysSpecs.ts",
    "content": "export type SystemSpecs = {\n  cpu: string;\n  vram: string;\n  GPU_Manufacturer?: string;\n};\n\nexport const fetchSystemSpecs = async (\n  setSystemSpecs: React.Dispatch<React.SetStateAction<SystemSpecs>>\n) => {\n  try {\n    const { cpu, vram, GPU_Manufacturer } = await window.electron.systemSpecs();\n    if (!GPU_Manufacturer) {\n      setSystemSpecs({ cpu, vram, GPU_Manufacturer: \"Unknown\" });\n    } else {\n      setSystemSpecs({ cpu, vram, GPU_Manufacturer });\n    }\n  } catch (error) {\n    console.error(\"Error fetching system specs:\", error);\n    setSystemSpecs({\n      cpu: \"Unknown\",\n      vram: \"Unknown\",\n      GPU_Manufacturer: \"Unknown\",\n    });\n  }\n};\n"
  },
  {
    "path": "Frontend/src/electron/authentication/devApi.ts",
    "content": "import jwt from \"jsonwebtoken\";\nimport fs from \"fs\";\nimport path from \"path\";\nimport { app } from \"electron\";\nimport crypto from \"crypto\";\nimport { isDev } from \"../util.js\";\n\nexport function getDevSecretPath(): string {\n  const secretFileName = \".dev.secret\";\n  return isDev()\n    ? path.join(process.cwd(), \"..\", secretFileName)\n    : path.join(app.getPath(\"userData\"), secretFileName);\n}\n\nexport function getSecret(): string {\n  const secretPath = getDevSecretPath();\n\n  try {\n    // Try to read existing secret\n    if (fs.existsSync(secretPath)) {\n      return fs.readFileSync(secretPath, \"utf8\").trim();\n    }\n\n    // Generate new secret if none exists\n    const secret = crypto.randomBytes(32).toString(\"base64\");\n    fs.writeFileSync(secretPath, secret);\n    return secret;\n  } catch (error) {\n    console.error(\"Error handling dev API secret:\", error);\n    return crypto.randomBytes(32).toString(\"base64\"); // Fallback to memory-only secret\n  }\n}\n\nexport async function getDevApiKey({\n  userId,\n  expiration,\n}: {\n  userId: string;\n  expiration: string | null;\n}) {\n  const secret = getSecret();\n  return jwt.sign({ userId, expiration }, secret, {\n    algorithm: \"HS256\",\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/authentication/secret.ts",
    "content": "import crypto from 'crypto';\n\nlet jwtSecret: string | null = null;\n\nexport function generateSecret(): string {\n    // Generate a secure random 256-bit (32-byte) secret and convert to base64\n    const secret = crypto.randomBytes(32).toString('base64');\n    jwtSecret = secret;\n    return secret;\n}\n\nexport function getSecret(): string {\n    if (!jwtSecret) {\n        throw new Error('JWT secret has not been generated');\n    }\n    return jwtSecret;\n} "
  },
  {
    "path": "Frontend/src/electron/authentication/token.ts",
    "content": "import jwt from \"jsonwebtoken\";\nimport { getSecret } from \"./secret.js\";\n\nexport async function getToken({ userId }: { userId: string }) {\n  return jwt.sign({ userId }, getSecret(), {\n    algorithm: \"HS256\",\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/crawl/cancelWebcrawl.ts",
    "content": "import { getToken } from \"../authentication/token.js\";\n\nexport async function cancelWebcrawl(payload: {\n  userId: number;\n}): Promise<Response> {\n  try {\n    const token = await getToken({ userId: payload.userId.toString() });\n    return await fetch(\"http://localhost:47372/cancel-crawl\", {\n      method: \"POST\",\n      headers: {\n        Authorization: `Bearer ${token}`,\n      },\n    });\n  } catch (error) {\n    console.error(\"Error canceling webcrawl:\", error);\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/crawl/webcrawl.ts",
    "content": "import { getToken } from \"../authentication/token.js\";\nimport db from \"../db.js\";\nimport { BrowserWindow } from \"electron\";\n\ninterface ProgressData {\n  status: string;\n  data: {\n    message: string;\n    chunk?: number;\n    total_chunks?: number;\n    percent_complete?: string;\n  };\n}\n\nexport async function webcrawl(payload: {\n  base_url: string;\n  user_id: number;\n  user_name: string;\n  collection_id: number;\n  collection_name: string;\n  max_workers: number;\n}) {\n  const windows = BrowserWindow.getAllWindows();\n  const mainWindow = windows[0];\n\n  const sendProgress = (data: string | ProgressData) => {\n    try {\n      if (typeof data === \"string\") {\n        const jsonStr = data.replace(/^data:\\s*/, \"\").trim();\n        if (jsonStr) {\n          try {\n            const parsedData = JSON.parse(jsonStr) as ProgressData;\n\n            mainWindow?.webContents.send(\"ingest-progress\", parsedData);\n          } catch (parseError) {\n            console.error(\"[WEBCRAWL] JSON parse error:\", parseError);\n            console.error(\"[WEBCRAWL] Failed to parse data:\", jsonStr);\n          }\n        }\n      } else {\n        mainWindow?.webContents.send(\"ingest-progress\", data);\n      }\n    } catch (error) {\n      console.error(\"[WEBCRAWL] Error in sendProgress:\", error);\n      console.error(\"[WEBCRAWL] Problematic data:\", data);\n      mainWindow?.webContents.send(\"ingest-progress\", {\n        status: \"error\",\n        data: {\n          message: \"Error processing progress update\",\n        },\n      });\n    }\n  };\n\n  let apiKey = null;\n  try {\n    apiKey = db.getApiKey(payload.user_id, \"openai\");\n  } catch (error) {\n    console.error(\n      \"[WEBCRAWL] No OpenAI API key found, using local embeddings\",\n      error\n    );\n    apiKey = null;\n  }\n\n  let isLocal = false;\n  let localEmbeddingModel = \"\";\n  if (!apiKey) {\n    isLocal = true;\n    localEmbeddingModel =\n      \"HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\";\n  }\n  if (payload.collection_id) {\n    if (db.isCollectionLocal(payload.collection_id)) {\n      isLocal = true;\n      localEmbeddingModel = db.getCollectionLocalEmbeddingModel(\n        payload.collection_id\n      );\n    }\n  }\n\n  try {\n    const token = await getToken({ userId: payload.user_id.toString() });\n    db.addFileToCollection(\n      payload.user_id,\n      payload.collection_id,\n      payload.base_url\n    );\n    const response = await fetch(\"http://localhost:47372/webcrawl\", {\n      method: \"POST\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n        Accept: \"text/event-stream\",\n        Authorization: `Bearer ${token}`,\n      },\n      body: JSON.stringify({\n        base_url: payload.base_url,\n        max_workers: payload.max_workers,\n        collection_name: payload.collection_name,\n        collection_id: payload.collection_id,\n        user_id: payload.user_id,\n        user_name: payload.user_name,\n        api_key: apiKey,\n        is_local: isLocal,\n        local_embedding_model: isLocal ? localEmbeddingModel : undefined,\n      }),\n    });\n\n    if (!response.ok) {\n      throw new Error(`Server responded with status: ${response.status}`);\n    }\n\n    const reader = response.body?.getReader();\n    if (!reader) throw new Error(\"Failed to get response reader\");\n\n    const decoder = new TextDecoder();\n    let buffer = \"\";\n\n    while (true) {\n      const { done, value } = await reader.read();\n      if (done) {\n        break;\n      }\n\n      buffer += decoder.decode(value, { stream: true });\n\n      const messages = buffer.split(\"\\n\\n\");\n      buffer = messages.pop() || \"\";\n\n      for (const message of messages) {\n        if (message.trim()) {\n          sendProgress(message);\n        }\n      }\n    }\n\n    if (buffer.trim()) {\n      sendProgress(buffer);\n    }\n\n    return {\n      userId: payload.user_id,\n      conversationId: payload.collection_id,\n    };\n  } catch (error) {\n    console.error(\"[WEBCRAWL] Critical error in webcrawl:\", error);\n    sendProgress({\n      status: \"error\",\n      data: {\n        message: error instanceof Error ? error.message : \"Unknown error\",\n      },\n    });\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/db.ts",
    "content": "import { fileURLToPath } from \"url\";\nimport path from \"path\";\nimport fs from \"fs\";\nimport { app } from \"electron\";\nimport { isDev } from \"./util.js\";\nimport Database from \"better-sqlite3\";\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = path.dirname(__filename);\n\nconst initialTools = [\n  { name: \"Web Search\", description: \"Search the web for information\" },\n];\n\nclass DatabaseService {\n  db: Database.Database;\n\n  constructor() {\n    let dbPath: string;\n    if (isDev()) {\n      dbPath = path.join(__dirname, \"..\", \"..\", \"Database\", \"database.sqlite\");\n    } else {\n      const userDataPath = app.getPath(\"userData\");\n      dbPath = path.join(userDataPath, \"Database\", \"database.sqlite\");\n    }\n    const dbDir = path.dirname(dbPath);\n    if (!fs.existsSync(dbDir)) {\n      fs.mkdirSync(dbDir, { recursive: true });\n    }\n\n    this.db = new Database(dbPath, {});\n  }\n\n  initializeDBTables = () => {\n    try {\n      this.db.exec(`\n        CREATE TABLE IF NOT EXISTS users (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          name TEXT NOT NULL,\n          created_at DATETIME DEFAULT CURRENT_TIMESTAMP\n        );\n        \n        CREATE TABLE IF NOT EXISTS settings (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          user_id INTEGER,\n          model TEXT,\n          promptId INTEGER,\n          temperature FLOAT,\n          provider TEXT,\n          maxTokens INTEGER,\n          vectorstore TEXT,\n          modelDirectory TEXT,\n          modelType TEXT,\n          modelLocation TEXT,\n          ollamaIntegration INTEGER DEFAULT 0,\n          ollamaModel TEXT,\n          baseUrl TEXT,\n          selectedAzureId INTEGER,\n          selectedCustomId INTEGER,\n          selectedExternalOllamaId INTEGER,\n          cot INTEGER DEFAULT 0,\n          webSearch INTEGER DEFAULT 0,\n          reasoningEffort TEXT,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n\n   \n        CREATE TABLE IF NOT EXISTS openrouter_models (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          user_id INTEGER,\n          model TEXT NOT NULL,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n\n        CREATE TABLE IF NOT EXISTS azure_openai_models (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          user_id INTEGER,\n          name TEXT NOT NULL,\n          model TEXT NOT NULL,\n          endpoint TEXT NOT NULL,\n          api_key TEXT NOT NULL,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n\n        CREATE TABLE IF NOT EXISTS custom_api (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          user_id INTEGER,\n          name TEXT NOT NULL,\n          endpoint TEXT NOT NULL,\n          api_key TEXT NOT NULL,\n          model TEXT,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n          \n        CREATE TABLE IF NOT EXISTS api_keys (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          user_id INTEGER,\n          key TEXT NOT NULL,\n          provider TEXT,\n          created_at DATETIME DEFAULT CURRENT_TIMESTAMP,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n         \n        CREATE TABLE IF NOT EXISTS tools (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          name TEXT,\n          description TEXT\n        );\n\n        CREATE TABLE IF NOT EXISTS user_tools (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          user_id INTEGER,\n          tool_id INTEGER,\n          docked INTEGER DEFAULT 0,\n          enabled INTEGER DEFAULT 0,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE,\n          FOREIGN KEY (tool_id) REFERENCES tools(id) ON DELETE CASCADE\n        );\n        CREATE TABLE IF NOT EXISTS prompts (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          user_id INTEGER,\n          name TEXT,\n          prompt TEXT,\n          created_at DATETIME DEFAULT CURRENT_TIMESTAMP,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n\n        CREATE TABLE IF NOT EXISTS conversations (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          user_id INTEGER,\n          title TEXT,\n          created_at DATETIME DEFAULT CURRENT_TIMESTAMP,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n  \n        CREATE TABLE IF NOT EXISTS collections (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          user_id INTEGER,\n          name TEXT,\n          description TEXT,\n          is_local INTEGER DEFAULT 0,\n          local_embedding_model TEXT,\n          type TEXT,\n          files TEXT, \n          created_at DATETIME DEFAULT CURRENT_TIMESTAMP,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n\n        CREATE TABLE IF NOT EXISTS messages (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          conversation_id INTEGER,\n          user_id INTEGER,\n          role TEXT NOT NULL,\n          content TEXT NOT NULL,\n          reasoning_content TEXT DEFAULT NULL,\n          is_retrieval INTEGER DEFAULT 0,\n          collection_id INTEGER,\n          data_id INTEGER,\n          timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,\n          FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE,\n          FOREIGN KEY (collection_id) REFERENCES collections(id) ON DELETE CASCADE,\n          FOREIGN KEY (data_id) REFERENCES retrieved_data(id) ON DELETE SET NULL\n        );\n\n        CREATE TABLE IF NOT EXISTS retrieved_data (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          message_id INTEGER UNIQUE,\n          data_content TEXT NOT NULL,\n          FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE\n        );\n\n        CREATE TABLE IF NOT EXISTS dev_api_keys (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          name TEXT NOT NULL,\n          user_id INTEGER,\n          key TEXT NOT NULL,\n          expiration DATETIME DEFAULT NULL,\n          created_at DATETIME DEFAULT CURRENT_TIMESTAMP,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n\n        CREATE TABLE IF NOT EXISTS ollama_external (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          user_id INTEGER,\n          name TEXT NOT NULL,\n          endpoint TEXT NOT NULL,\n          api_key TEXT NOT NULL,\n          model TEXT NOT NULL,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n\n        CREATE TABLE IF NOT EXISTS account (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          type TEXT NOT NULL,\n          provider TEXT NOT NULL,\n          providerAccountId TEXT NOT NULL,\n          refresh_token TEXT,\n          access_token TEXT,\n          expires_at INTEGER,\n          token_type TEXT,\n          scope TEXT,\n          id_token TEXT,\n          session_state TEXT,\n          refresh_token_expires_in INTEGER,\n          user_id INTEGER,\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n\n        CREATE TABLE IF NOT EXISTS verificationToken (\n          identifier TEXT PRIMARY KEY,\n          token TEXT NOT NULL,\n          expires DATETIME NOT NULL\n        );\n\n        CREATE TABLE IF NOT EXISTS web_user (\n          id INTEGER PRIMARY KEY AUTOINCREMENT,\n          email TEXT NOT NULL,\n          password TEXT NOT NULL,\n          username TEXT NOT NULL,\n          picture TEXT,\n          user_id INTEGER,\n          image TEXT,\n\n          created_at BIGINT DEFAULT (strftime('%s', 'now')),\n          updatedAt BIGINT DEFAULT (strftime('%s', 'now')),\n          FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n        );\n        \n      `);\n      console.log(\"Database initialized successfully\");\n      this.migrateSettingsTable();\n    } catch (error) {\n      console.error(\"Error initializing database:\", error);\n    }\n  };\n\n  migrateSettingsTable = () => {\n    try {\n      // Define expected schema\n      const expectedColumns = [\n        { name: \"id\", type: \"INTEGER\" },\n        { name: \"user_id\", type: \"INTEGER\" },\n        { name: \"model\", type: \"TEXT\" },\n        { name: \"promptId\", type: \"INTEGER\" },\n        { name: \"temperature\", type: \"FLOAT\" },\n        { name: \"provider\", type: \"TEXT\" },\n        { name: \"maxTokens\", type: \"INTEGER\" },\n        { name: \"vectorstore\", type: \"TEXT\" },\n        { name: \"modelDirectory\", type: \"TEXT\" },\n        { name: \"modelType\", type: \"TEXT\" },\n        { name: \"modelLocation\", type: \"TEXT\" },\n        { name: \"ollamaIntegration\", type: \"INTEGER\" },\n        { name: \"ollamaModel\", type: \"TEXT\" },\n        { name: \"baseUrl\", type: \"TEXT\" },\n        { name: \"selectedAzureId\", type: \"INTEGER\" },\n        { name: \"selectedCustomId\", type: \"INTEGER\" },\n        { name: \"cot\", type: \"INTEGER\" },\n        { name: \"webSearch\", type: \"INTEGER\" },\n        { name: \"reasoningEffort\", type: \"TEXT\" },\n        { name: \"selectedExternalOllamaId\", type: \"INTEGER\" },\n      ];\n      // Get current table info\n      const tableInfo = this.db\n        .prepare(\"PRAGMA table_info(settings)\")\n        .all() as { name: string; type: string }[];\n\n      // Check if schema matches exactly\n      const needsReset =\n        tableInfo.length !== expectedColumns.length ||\n        !tableInfo.every(\n          (col, i) =>\n            col.name === expectedColumns[i].name &&\n            col.type.toUpperCase().includes(expectedColumns[i].type)\n        );\n\n      if (needsReset) {\n        // Backup existing data\n        const existingData = this.db.prepare(\"SELECT * FROM settings\").all();\n\n        // Drop and recreate table with correct schema\n        this.db.exec(`\n          DROP TABLE IF EXISTS settings;\n          CREATE TABLE settings (\n            id INTEGER PRIMARY KEY AUTOINCREMENT,\n            user_id INTEGER,\n            model TEXT,\n            promptId INTEGER,\n            temperature FLOAT,\n            provider TEXT,\n            maxTokens INTEGER,\n            vectorstore TEXT,\n            modelDirectory TEXT,\n            modelType TEXT,\n            modelLocation TEXT,\n            ollamaIntegration INTEGER,\n            ollamaModel TEXT,\n            baseUrl TEXT,\n            selectedAzureId INTEGER,\n            selectedCustomId INTEGER,\n            cot INTEGER DEFAULT 0,\n            webSearch INTEGER DEFAULT 0,\n            reasoningEffort TEXT,\n            selectedExternalOllamaId INTEGER,\n            FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE\n          );\n        `);\n\n        // Attempt to restore data that matches the new schema\n        for (const row of existingData as {\n          user_id: number;\n          model: string;\n          promptId: number;\n          temperature: number;\n          provider: string;\n          maxTokens: number;\n          vectorstore: string;\n          modelDirectory: string;\n          modelType: string;\n          modelLocation: string;\n          ollamaIntegration: number;\n          ollamaModel: string;\n          baseUrl: string;\n          selectedAzureId: number;\n          selectedCustomId: number;\n          cot: number;\n          webSearch: number;\n          reasoningEffort: string;\n          selectedExternalOllamaId: number;\n        }[]) {\n          try {\n            // Check if user exists before restoring their settings\n            const userExists = this.db\n              .prepare(\"SELECT 1 FROM users WHERE id = ?\")\n              .get(row.user_id);\n            if (!userExists) continue;\n\n            this.db\n              .prepare(\n                `\n              INSERT INTO settings (\n                user_id, model, promptId, temperature, provider, maxTokens,\n                vectorstore, modelDirectory, modelType, modelLocation,\n                ollamaIntegration, ollamaModel, baseUrl, selectedAzureId, selectedCustomId, webSearch, reasoningEffort, selectedExternalOllamaId\n              ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n            `\n              )\n              .run(\n                row.user_id,\n                row.model,\n                row.promptId,\n                row.temperature,\n                row.provider,\n                row.maxTokens,\n                row.vectorstore,\n                row.modelDirectory,\n                row.modelType,\n                row.modelLocation,\n                row.ollamaIntegration,\n                row.ollamaModel,\n                row.baseUrl,\n                row.selectedAzureId,\n                row.selectedCustomId,\n                row.webSearch,\n                row.reasoningEffort,\n                row.selectedExternalOllamaId\n              );\n          } catch (error) {\n            console.error(\"Error restoring settings row:\", error);\n          }\n        }\n        console.log(\"Settings table reset and migrated to exact schema\");\n      }\n    } catch (error) {\n      console.error(\"Error checking/resetting settings table:\", error);\n    }\n  };\n\n  checkAndAddMissingColumns = () => {\n    try {\n      // Get table info for each table\n      interface TableInfo {\n        name: string;\n      }\n      const tables = this.db\n        .prepare(`SELECT name FROM sqlite_master WHERE type='table'`)\n        .all() as TableInfo[];\n\n      // Expected schema for each table\n      type TableName = keyof typeof expectedColumns;\n      const expectedColumns = {\n        tools: [\"id\", \"name\", \"description\"],\n        user_tools: [\"id\", \"user_id\", \"tool_id\", \"enabled\", \"docked\"],\n        openrouter_models: [\"id\", \"user_id\", \"model\"],\n        azure_openai_models: [\n          \"id\",\n          \"user_id\",\n          \"name\",\n          \"model\",\n          \"endpoint\",\n          \"api_key\",\n        ],\n        account: [\n          \"id\",\n          \"user_id\",\n          \"type\",\n          \"provider\",\n          \"providerAccountId\",\n          \"refresh_token\",\n          \"access_token\",\n          \"expires_at\",\n          \"token_type\",\n          \"scope\",\n          \"id_token\",\n          \"session_state\",\n          \"refresh_token_expires_in\",\n        ],\n        verificationToken: [\"identifier\", \"token\", \"expires\"],\n        web_user: [\n          \"id\",\n          \"email\",\n          \"password\",\n          \"username\",\n          \"user_id\",\n          \"picture\",\n          \"image\",\n          \"created_at\",\n          \"updatedAt\",\n        ],\n        custom_api: [\"id\", \"user_id\", \"name\", \"endpoint\", \"api_key\", \"model\"],\n        users: [\"id\", \"name\", \"created_at\"],\n        settings: [\n          \"id\",\n          \"user_id\",\n          \"model\",\n          \"promptId\",\n          \"temperature\",\n          \"provider\",\n          \"maxTokens\",\n          \"vectorstore\",\n          \"modelDirectory\",\n          \"modelType\",\n          \"modelLocation\",\n          \"ollamaIntegration\",\n          \"ollamaModel\",\n          \"baseUrl\",\n          \"selectedAzureId\",\n          \"selectedCustomId\",\n          \"cot\",\n          \"webSearch\",\n          \"reasoningEffort\",\n        ],\n        api_keys: [\"id\", \"user_id\", \"key\", \"provider\", \"created_at\"],\n        prompts: [\"id\", \"user_id\", \"name\", \"prompt\", \"created_at\"],\n        conversations: [\"id\", \"user_id\", \"title\", \"created_at\"],\n        collections: [\n          \"id\",\n          \"user_id\",\n          \"name\",\n          \"description\",\n          \"is_local\",\n          \"local_embedding_model\",\n          \"type\",\n          \"files\",\n          \"created_at\",\n        ],\n        messages: [\n          \"id\",\n          \"conversation_id\",\n          \"user_id\",\n          \"role\",\n          \"content\",\n          \"reasoning_content\",\n          \"is_retrieval\",\n          \"collection_id\",\n          \"data_id\",\n          \"timestamp\",\n        ],\n        retrieved_data: [\"id\", \"message_id\", \"data_content\"],\n        ollama_external: [\"id\", \"user_id\", \"name\", \"endpoint\", \"api_key\", \"model\"],\n      } as const;\n\n      tables.forEach((table) => {\n        const tableName = table.name as TableName;\n        if (expectedColumns[tableName]) {\n          // Get current columns for the table\n          interface ColumnInfo {\n            name: string;\n            type: string;\n          }\n          const tableInfo = this.db\n            .prepare(`PRAGMA table_info(${tableName})`)\n            .all() as ColumnInfo[];\n          const currentColumns = tableInfo.map((col) => col.name);\n\n          // Find missing columns\n          const missingColumns = expectedColumns[tableName].filter(\n            (col) => !currentColumns.includes(col)\n          );\n\n          // Add missing columns\n          missingColumns.forEach((column) => {\n            try {\n              let columnDef = \"\";\n              // Define column types based on the original schema\n              switch (column) {\n                case \"id\":\n                  columnDef = \"INTEGER PRIMARY KEY AUTOINCREMENT\";\n                  break;\n                case \"created_at\":\n                case \"timestamp\":\n                  columnDef = \"DATETIME DEFAULT CURRENT_TIMESTAMP\";\n                  break;\n                case \"is_local\":\n                case \"is_retrieval\":\n                  columnDef = \"BOOLEAN DEFAULT FALSE\";\n                  break;\n                case \"user_id\":\n                case \"conversation_id\":\n                case \"collection_id\":\n                case \"data_id\":\n                case \"message_id\":\n                  columnDef = \"INTEGER\";\n                  break;\n                default:\n                  columnDef = \"TEXT\";\n              }\n\n              const alterQuery = `ALTER TABLE ${tableName} ADD COLUMN ${column} ${columnDef}`;\n              this.db.exec(alterQuery);\n            } catch (error) {\n              console.error(\n                `Error adding column ${column} to table ${tableName}:`,\n                error\n              );\n            }\n          });\n        }\n      });\n      console.log(\"Database columns checked and updated successfully\");\n    } catch (error) {\n      console.error(\"Error checking and adding columns:\", error);\n    }\n  };\n\n  init() {\n    this.initializeDBTables();\n    this.checkAndAddMissingColumns();\n    this.addInitialTools();\n  }\n\n  addInitialTools() {\n    initialTools.forEach((tool) => {\n      this.addTool(tool.name, tool.description);\n    });\n  }\n\n  getUsers() {\n    return this.db.prepare(\"SELECT * FROM users\").all() as {\n      id: number;\n      name: string;\n    }[];\n  }\n\n  getUserSettings(userId: string | number): Promise<UserSettings> {\n    const settings = this.db\n      .prepare(\"SELECT * FROM settings WHERE user_id = ?\")\n      .get(userId) as UserSettings;\n\n    return Promise.resolve(settings || {});\n  }\n\n  updateUserSettings(settings: UserSettings) {\n    // First, get the current settings\n    const currentSettings = this.db\n      .prepare(\"SELECT * FROM settings WHERE user_id = ?\")\n      .get(settings.userId) as UserSettings;\n    console.log(\"currentSettings\", currentSettings);\n    // Merge current settings with new settings, preserving non-null values\n    const updatedSettings = {\n      cot: settings.cot ?? currentSettings?.cot,\n      model: settings.model ?? currentSettings?.model,\n      promptId: settings.promptId ?? currentSettings?.promptId,\n      temperature: settings.temperature ?? currentSettings?.temperature,\n      provider: settings.provider ?? currentSettings?.provider,\n      maxTokens: settings.maxTokens ?? currentSettings?.maxTokens,\n      vectorstore: settings.vectorstore ?? currentSettings?.vectorstore,\n      modelDirectory:\n        settings.modelDirectory ?? currentSettings?.modelDirectory,\n      modelType: settings.modelType ?? currentSettings?.modelType,\n      modelLocation: settings.modelLocation ?? currentSettings?.modelLocation,\n      ollamaIntegration:\n        settings.ollamaIntegration ?? currentSettings?.ollamaIntegration,\n      ollamaModel: settings.ollamaModel ?? currentSettings?.ollamaModel,\n      baseUrl: settings.baseUrl ?? currentSettings?.baseUrl,\n      selectedAzureId:\n        settings.selectedAzureId ?? currentSettings?.selectedAzureId,\n      selectedCustomId:\n        settings.selectedCustomId ?? currentSettings?.selectedCustomId,\n      webSearch: settings.webSearch ?? currentSettings?.webSearch,\n      reasoningEffort:\n        settings.reasoningEffort ?? currentSettings?.reasoningEffort,\n      selectedExternalOllamaId:\n        settings.selectedExternalOllamaId ??\n        currentSettings?.selectedExternalOllamaId,\n    };\n    console.log(\"updatedSettings\", updatedSettings);\n    return this.db\n      .prepare(\n        \"UPDATE settings SET model = ?, promptId = ?, temperature = ?, provider = ?, maxTokens = ?, vectorstore = ?, modelDirectory = ?, modelType = ?, modelLocation = ?, ollamaIntegration = ?, ollamaModel = ?, baseUrl = ?, selectedAzureId = ?, selectedCustomId = ?, cot = ?, webSearch = ?, reasoningEffort = ?, selectedExternalOllamaId = ? WHERE user_id = ?\"\n      )\n      .run(\n        updatedSettings.model,\n        updatedSettings.promptId,\n        updatedSettings.temperature,\n        updatedSettings.provider,\n        updatedSettings.maxTokens,\n        updatedSettings.vectorstore,\n        updatedSettings.modelDirectory,\n        updatedSettings.modelType,\n        updatedSettings.modelLocation,\n        updatedSettings.ollamaIntegration,\n        updatedSettings.ollamaModel,\n        updatedSettings.baseUrl,\n        updatedSettings.selectedAzureId,\n        updatedSettings.selectedCustomId,\n        updatedSettings.cot,\n        updatedSettings.webSearch,\n        updatedSettings.reasoningEffort,\n        updatedSettings.selectedExternalOllamaId,\n        settings.userId\n      );\n  }\n\n  getUserPrompts(userId: number) {\n    const prompts = this.db\n      .prepare(\"SELECT * FROM prompts WHERE user_id = ?\")\n      .all(userId);\n    return prompts as UserPrompts[];\n  }\n\n  addUserPrompt(\n    userId: number,\n    name: string,\n    prompt: string\n  ): {\n    id: number;\n    name: string;\n    prompt: string;\n    userId: number;\n  } {\n    const result = this.db\n      .prepare(\"INSERT INTO prompts (user_id, name, prompt) VALUES (?, ?, ?)\")\n      .run(userId, name, prompt);\n    return {\n      id: result.lastInsertRowid as number,\n      name,\n      prompt,\n      userId,\n    };\n  }\n\n  addAPIKey(userId: number, key: string, provider: string) {\n    const existingKey = this.db\n      .prepare(\"SELECT * FROM api_keys WHERE user_id = ? AND provider = ?\")\n      .get(userId, provider) as { id: number };\n    if (existingKey) {\n      return this.db\n        .prepare(\"UPDATE api_keys SET key = ? WHERE id = ?\")\n        .run(key, existingKey.id);\n    } else {\n      return this.db\n        .prepare(\n          \"INSERT INTO api_keys (user_id, key, provider) VALUES (?, ?, ?)\"\n        )\n        .run(userId, key, provider);\n    }\n  }\n\n  updateUserPrompt(userId: number, id: number, name: string, prompt: string) {\n    return this.db\n      .prepare(\n        \"UPDATE prompts SET name = ?, prompt = ? WHERE id = ? AND user_id = ?\"\n      )\n      .run(name, prompt, id, userId);\n  }\n\n  isCollectionLocal(collectionId: number): boolean {\n    const collection = this.db\n      .prepare(\"SELECT is_local FROM collections WHERE id = ?\")\n      .get(collectionId) as { is_local: boolean };\n    return collection.is_local;\n  }\n\n  getCollectionLocalEmbeddingModel(collectionId: number): string {\n    const collection = this.db\n      .prepare(\"SELECT local_embedding_model FROM collections WHERE id = ?\")\n      .get(collectionId) as { local_embedding_model: string };\n    return collection.local_embedding_model;\n  }\n  createCollection(\n    userId: number,\n    name: string,\n    description: string,\n    type: string,\n    isLocal: number,\n    localEmbeddingModel: string\n  ) {\n    const checkIfExists = this.db\n      .prepare(\"SELECT * FROM collections WHERE user_id = ? AND name = ?\")\n      .get(userId, name);\n    if (checkIfExists) {\n      return {\n        error: \"Collection name already exists\",\n      };\n    }\n    const result = this.db\n      .prepare(\n        \"INSERT INTO collections (user_id, name, description, type, is_local, local_embedding_model) VALUES (?, ?, ?, ?, ?, ?)\"\n      )\n      .run(userId, name, description, type, isLocal, localEmbeddingModel);\n    return {\n      id: result.lastInsertRowid as number,\n      name,\n      description,\n      type,\n      userId,\n    };\n  }\n  addFileToCollection(userId: number, id: number, file: string) {\n    const collection = this.db\n      .prepare(\"SELECT * FROM collections WHERE id = ? AND user_id =?\")\n      .get(id, userId) as Collection;\n    if (collection) {\n      // files is a string and needs to be split into an array\n      const files = collection.files ? collection.files.split(\",\") : [];\n      files.push(file);\n      return this.db\n        .prepare(\n          \"UPDATE collections SET files = ? WHERE id = ? AND user_id = ?\"\n        )\n        .run(files.join(\",\"), id, userId);\n    }\n  }\n\n  deleteCollection(userId: number, id: number) {\n    return this.db\n      .prepare(\"DELETE FROM collections WHERE id = ? AND user_id = ?\")\n      .run(id, userId);\n  }\n  getCollection(collectionId: number) {\n    return this.db\n      .prepare(\"SELECT * FROM collections WHERE id = ?\")\n      .get(collectionId) as Collection;\n  }\n  getCollectionName(collectionId: number) {\n    return this.db\n      .prepare(\"SELECT name FROM collections WHERE id = ?\")\n      .get(collectionId) as { name: string };\n  }\n  getFilesInCollection(userId: number, collectionId: number) {\n    return this.db\n      .prepare(\"SELECT files FROM collections WHERE id = ? AND user_id =?\")\n      .get(collectionId, userId) as { files: string };\n  }\n  getUserCollections(userId: number) {\n    return this.db\n      .prepare(\"SELECT * FROM collections WHERE user_id = ?\")\n      .all(userId);\n  }\n\n  addUser(name: string): { id: number; name: string; error?: string } {\n    const existingUser = this.db\n      .prepare(\"SELECT * FROM users WHERE name = ?\")\n      .get(name);\n    if (existingUser) {\n      return {\n        id: -1,\n        name: \"\",\n        error: \"User already exists\",\n      };\n    }\n    const user = this.db\n      .prepare(\"INSERT INTO users (name) VALUES (?)\")\n      .run(name);\n    const defaultPrompt = \"You are a helpful assistant\";\n    const promptName = \"Default Prompt\";\n    const addDefaultPrompt = this.db\n      .prepare(\"INSERT INTO prompts (user_id, name, prompt) VALUES (?, ?, ?)\")\n      .run(user.lastInsertRowid, promptName, defaultPrompt);\n    const promptId = addDefaultPrompt.lastInsertRowid;\n    this.db\n      .prepare(\"INSERT INTO settings (user_id, promptId) VALUES (?, ?)\")\n      .run(user.lastInsertRowid, promptId);\n    return { id: user.lastInsertRowid as number, name };\n  }\n\n  getUserApiKeys(userId: number): Promise<ApiKey[]> {\n    const apiKeys = this.db\n      .prepare(\"SELECT * FROM api_keys WHERE user_id = ?\")\n      .all(userId);\n    return Promise.resolve(apiKeys as unknown as ApiKey[]);\n  }\n\n  getApiKey(userId: number, provider: string): string {\n    const apiKey = this.db\n      .prepare(\"SELECT * FROM api_keys WHERE user_id = ? AND provider = ?\")\n      .get(userId, provider) as { key: string };\n    return apiKey.key;\n  }\n\n  getUserConversations(userId: number) {\n    return this.db\n      .prepare(\n        `\n        SELECT DISTINCT c.* \n        FROM conversations c\n        INNER JOIN messages m ON c.id = m.conversation_id\n        WHERE c.user_id = ?\n        ORDER BY c.created_at DESC\n      `\n      )\n      .all(userId);\n  }\n\n  getUserConversationTitle(userId: number, conversationId: number) {\n    return this.db\n      .prepare(\"SELECT title FROM conversations WHERE id = ? AND user_id =?\")\n      .get(conversationId, userId) as string;\n  }\n\n  addUserConversation(userId: number, title: string) {\n    const result = this.db\n      .prepare(\"INSERT INTO conversations (user_id, title) VALUES (?, ?)\")\n      .run(userId, title);\n\n    return {\n      id: result.lastInsertRowid as number,\n      title,\n      userId,\n    };\n  }\n\n  deleteUserConversation(userId: number, id: number) {\n    return this.db\n      .prepare(\"DELETE FROM conversations WHERE id = ? AND user_id = ?\")\n      .run(id, userId);\n  }\n\n  getConversationMessages(userId: number, conversationId: number) {\n    return this.db\n      .prepare(\n        \"SELECT * FROM messages WHERE user_id = ? AND conversation_id = ?\"\n      )\n      .all(userId, conversationId);\n  }\n  addUserMessage(\n    userId: number,\n    conversationId: number,\n    role: string,\n    content: string,\n    reasoningContent?: string,\n    collectionId?: number,\n    dataId?: number\n  ) {\n    const timestamp = new Date().toISOString();\n    return this.db\n      .prepare(\n        \"INSERT INTO messages (user_id, conversation_id, role, content, reasoning_content, collection_id, data_id, timestamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\"\n      )\n      .run(\n        userId,\n        conversationId,\n        role,\n        content,\n        reasoningContent || null,\n        collectionId || null,\n        dataId || null,\n        timestamp\n      );\n  }\n\n  addReasoningContent(messageId: number, reasoningContent: string) {\n    return this.db\n      .prepare(\"UPDATE messages SET reasoning_content = ? WHERE id = ?\")\n      .run(reasoningContent, messageId);\n  }\n\n  deleteUserMessage(userId: number, id: number) {\n    return this.db\n      .prepare(\"DELETE FROM messages WHERE id = ? AND user_id = ?\")\n      .run(id, userId);\n  }\n\n  getUserPrompt(userId: number, promptId: number) {\n    return this.db\n      .prepare(\"SELECT * FROM prompts WHERE id = ? AND user_id =?\")\n      .get(promptId, userId) as { prompt: string };\n  }\n  updateMessageDataId(messageId: number, dataId: number) {\n    return this.db\n      .prepare(\"UPDATE messages SET data_id = ? WHERE id = ?\")\n      .run(dataId, messageId);\n  }\n  addRetrievedData(messageId: number, data: string): number {\n    const result = this.db\n      .prepare(\n        \"INSERT INTO retrieved_data (message_id, data_content) VALUES (?, ?)\"\n      )\n      .run(messageId, data);\n    const dataId = result.lastInsertRowid as number;\n    this.updateMessageDataId(messageId, dataId);\n    return dataId;\n  }\n  getConversationMessagesWithData(userId: number, conversationId: number) {\n    const messages = this.db\n      .prepare(\n        `\n        SELECT m.*, rd.data_content \n        FROM messages m\n        LEFT JOIN retrieved_data rd ON m.data_id = rd.id\n        WHERE m.user_id = ? AND m.conversation_id = ?\n      `\n      )\n      .all(userId, conversationId);\n    return messages;\n  }\n\n  addDevAPIKey(\n    userId: number,\n    name: string,\n    key: string,\n    expiration: string | null\n  ) {\n    return this.db\n      .prepare(\n        \"INSERT INTO dev_api_keys (user_id, name, key, expiration) VALUES (?, ?, ?, ?)\"\n      )\n      .run(userId, name, key, expiration);\n  }\n  getDevAPIKeys(userId: number) {\n    return this.db\n      .prepare(\"SELECT * FROM dev_api_keys WHERE user_id = ?\")\n      .all(userId);\n  }\n  deleteDevAPIKey(userId: number, id: number) {\n    return this.db\n      .prepare(\"DELETE FROM dev_api_keys WHERE id = ? AND user_id = ?\")\n      .run(id, userId);\n  }\n\n  getOpenRouterModel(userId: number) {\n    return this.db\n      .prepare(\"SELECT model FROM openrouter_models WHERE user_id = ?\")\n      .get(userId) as { model: string };\n  }\n  addOpenRouterModel(userId: number, model: string) {\n    const existingModel = this.db\n      .prepare(\n        \"SELECT * FROM openrouter_models WHERE user_id = ? AND model = ?\"\n      )\n      .get(userId, model);\n    if (existingModel) {\n      return {\n        error: \"Model already exists\",\n      };\n    }\n    return this.db\n      .prepare(\"INSERT INTO openrouter_models (user_id, model) VALUES (?, ?)\")\n      .run(userId, model);\n  }\n  deleteOpenRouterModel(userId: number, id: number) {\n    return this.db\n      .prepare(\"DELETE FROM openrouter_models WHERE id = ? AND user_id = ?\")\n      .run(id, userId);\n  }\n  getOpenRouterModels(userId: number) {\n    const rows = this.db\n      .prepare(\"SELECT model FROM openrouter_models WHERE user_id = ?\")\n      .all(userId) as { model: string }[];\n    return rows.map((row) => row.model);\n  }\n\n  getAzureOpenAIModels(userId: number) {\n    const rows = this.db\n      .prepare(\"SELECT * FROM azure_openai_models WHERE user_id = ?\")\n      .all(userId) as {\n      id: number;\n      name: string;\n      model: string;\n      endpoint: string;\n      api_key: string;\n    }[];\n    return rows;\n  }\n  addAzureOpenAIModel(\n    userId: number,\n    name: string,\n    model: string,\n    endpoint: string,\n    api_key: string\n  ) {\n    const result = this.db\n      .prepare(\n        \"INSERT INTO azure_openai_models (user_id, name, model, endpoint, api_key) VALUES (?, ?, ?, ?, ?)\"\n      )\n      .run(userId, name, model, endpoint, api_key);\n    return result.lastInsertRowid as number;\n  }\n  deleteAzureOpenAIModel(userId: number, id: number) {\n    return this.db\n      .prepare(\"DELETE FROM azure_openai_models WHERE id = ? AND user_id = ?\")\n      .run(id, userId);\n  }\n  getAzureOpenAIModel(userId: number, id: number) {\n    return this.db\n      .prepare(\"SELECT * FROM azure_openai_models WHERE id = ? AND user_id =?\")\n      .get(id, userId) as {\n      name: string;\n      model: string;\n      endpoint: string;\n      api_key: string;\n    };\n  }\n  getCustomAPI(userId: number) {\n    return this.db\n      .prepare(\"SELECT * FROM custom_api WHERE user_id = ?\")\n      .all(userId) as {\n      id: number;\n      user_id: number;\n      name: string;\n      endpoint: string;\n      api_key: string;\n      model: string;\n    }[];\n  }\n  getCustomAPIs(userId: number) {\n    return this.db\n      .prepare(\"SELECT * FROM custom_api WHERE user_id = ?\")\n      .all(userId) as {\n      id: number;\n      user_id: number;\n      name: string;\n      endpoint: string;\n      api_key: string;\n      model: string;\n    }[];\n  }\n  deleteCustomAPI(userId: number, id: number) {\n    return this.db\n      .prepare(\"DELETE FROM custom_api WHERE id = ? AND user_id = ?\")\n      .run(id, userId);\n  }\n  addCustomAPI(\n    userId: number,\n    name: string,\n    endpoint: string,\n    api_key: string,\n    model: string\n  ) {\n    const result = this.db\n      .prepare(\n        \"INSERT INTO custom_api (user_id, name, endpoint, api_key, model) VALUES (?, ?, ?, ?, ?)\"\n      )\n      .run(userId, name, endpoint, api_key, model);\n    return result.lastInsertRowid as number;\n  }\n\n  // Tool Section\n\n  addTool(name: string, description: string) {\n    const existingTool = this.db\n      .prepare(\"SELECT * FROM tools WHERE name = ?\")\n      .get(name);\n    if (existingTool) {\n      return;\n    }\n    return this.db\n      .prepare(\"INSERT INTO tools ( name, description) VALUES ( ?, ?)\")\n      .run(name, description);\n  }\n\n  getUserTools(userId: number) {\n    return this.db\n      .prepare(\"SELECT * FROM user_tools WHERE user_id = ?\")\n      .all(userId) as {\n      id: number;\n      user_id: number;\n      tool_id: number;\n      enabled: number;\n      docked: number;\n    }[];\n  }\n\n  addUserTool(userId: number, toolId: number, enabled: number, docked: number) {\n    const existingUserTool = this.db\n      .prepare(\"SELECT * FROM user_tools WHERE user_id = ? AND tool_id = ?\")\n      .get(userId, toolId);\n    if (existingUserTool) {\n      return;\n    }\n    const newToolId = this.db\n      .prepare(\n        \"INSERT INTO user_tools (user_id, tool_id, enabled, docked) VALUES (?, ?, ?, ?)\"\n      )\n      .run(userId, toolId, enabled, docked).lastInsertRowid;\n    return newToolId;\n  }\n\n  removeUserTool(userId: number, toolId: number) {\n    return this.db\n      .prepare(\"DELETE FROM user_tools WHERE user_id = ? AND tool_id = ?\")\n      .run(userId, toolId);\n  }\n\n  updateUserTool(\n    userId: number,\n    toolId: number,\n    enabled: number,\n    docked: number\n  ) {\n    const existingTool = this.db\n      .prepare(\"SELECT * FROM user_tools WHERE user_id = ? AND tool_id = ?\")\n      .get(userId, toolId);\n    if (!existingTool) {\n      const addedTool = this.addUserTool(userId, toolId, enabled, docked);\n      return addedTool;\n    }\n    return this.db\n      .prepare(\n        \"UPDATE user_tools SET enabled = ?, docked = ? WHERE user_id = ? AND tool_id = ?\"\n      )\n      .run(enabled, docked, userId, toolId);\n  }\n\n  getTools() {\n    return this.db.prepare(\"SELECT * FROM tools\").all();\n  }\n\n  addExternalOllama(\n    userId: number,\n    name: string,\n    endpoint: string,\n    api_key: string,\n    model: string\n  ) {\n    const existingOllama = this.db\n      .prepare(\"SELECT * FROM ollama_external WHERE user_id = ? AND name = ?\")\n      .get(userId, name);\n    if (existingOllama) {\n      return {\n        error: \"Ollama already exists\",\n      };\n    }\n    const lastInsertRowid = this.db\n      .prepare(\n        \"INSERT INTO ollama_external (user_id, name, endpoint, api_key, model) VALUES (?, ?, ?, ?, ?)\"\n      )\n      .run(userId, name, endpoint, api_key, model).lastInsertRowid;\n    return lastInsertRowid;\n  }\n  getExternalOllama(userId: number) {\n    return this.db\n      .prepare(\"SELECT * FROM ollama_external WHERE user_id = ?\")\n      .all(userId) as ExternalOllama[];\n  }\n}\n\nexport default new DatabaseService();\n"
  },
  {
    "path": "Frontend/src/electron/embedding/cancelEmbed.ts",
    "content": "import { getToken } from \"../authentication/token.js\";\n\nexport async function cancelEmbed(payload: {\n  userId: number;\n}): Promise<Response> {\n  try {\n    const token = await getToken({ userId: payload.userId.toString() });\n    return await fetch(\"http://localhost:47372/cancel-embed\", {\n      method: \"POST\",\n      headers: {\n        Authorization: `Bearer ${token}`,\n      },\n    });\n  } catch (error) {\n    console.error(\"Error canceling embed:\", error);\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/embedding/vectorstoreQuery.ts",
    "content": "import { getToken } from \"../authentication/token.js\";\nimport db from \"../db.js\";\n\nexport async function vectorstoreQuery(payload: {\n  query: string;\n  userId: number;\n  userName: string;\n  collectionId: number;\n  collectionName: string;\n}) {\n  let apiKey = null;\n  try {\n    apiKey = db.getApiKey(payload.userId, \"openai\");\n  } catch {\n    apiKey = null;\n  }\n  let isLocal = false;\n  let localEmbeddingModel = \"\";\n\n  if (!apiKey) {\n    isLocal = true;\n    localEmbeddingModel =\n      \"HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\";\n  }\n  if (payload.collectionId) {\n    if (db.isCollectionLocal(payload.collectionId)) {\n      isLocal = true;\n      localEmbeddingModel = db.getCollectionLocalEmbeddingModel(\n        payload.collectionId\n      );\n    }\n  }\n  const token = await getToken({ userId: payload.userId.toString() });\n  const response = await fetch(`http://localhost:47372/vector-query`, {\n    method: \"POST\",\n    headers: {\n      \"Content-Type\": \"application/json\",\n      Authorization: `Bearer ${token}`,\n    },\n\n    body: JSON.stringify({\n      query: payload.query,\n      collection: payload.collectionId,\n      collection_name: payload.collectionName,\n      user: payload.userId,\n      api_key: apiKey,\n      top_k: 5,\n      is_local: isLocal,\n      local_embedding_model: localEmbeddingModel,\n    }),\n  });\n\n  const data = await response.json();\n  return data;\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/azureHandlers.ts",
    "content": "import { ipcMainDatabaseHandle } from \"../util.js\";\nimport db from \"../db.js\";\n\nexport async function setupAzureOpenAI() {\n  ipcMainDatabaseHandle(\"addAzureOpenAIModel\", async (payload) => {\n    const id = await db.addAzureOpenAIModel(\n      payload.userId,\n      payload.name,\n      payload.model,\n      payload.endpoint,\n      payload.api_key\n    );\n    return {\n      id,\n      userId: payload.userId,\n      name: payload.name,\n      model: payload.model,\n      endpoint: payload.endpoint,\n      api_key: payload.api_key\n    };\n  });\n\n  ipcMainDatabaseHandle(\"deleteAzureOpenAIModel\", async (payload) => {\n    await db.deleteAzureOpenAIModel(payload.userId, payload.id);\n    return { userId: payload.userId, id: payload.id };\n  });\n\n  ipcMainDatabaseHandle(\"getAzureOpenAIModels\", async (payload) => {\n    const models = await db.getAzureOpenAIModels(payload.userId);\n    return { userId: payload.userId, models };\n  });\n\n  ipcMainDatabaseHandle(\"getAzureOpenAIModel\", async (payload) => {\n    const model = await db.getAzureOpenAIModel(payload.userId, payload.id);\n    return { ...model, userId: payload.userId, id: payload.id };\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/chatHandlers.ts",
    "content": "import { ipcMain } from \"electron\";\nimport { chatRequest } from \"../llms/llms.js\";\nimport { keyValidation } from \"../llms/keyValidation.js\";\nimport { BrowserWindow } from \"electron\";\nconst activeRequests = new Map();\n\nexport function setupChatHandlers(mainWindow: BrowserWindow) {\n  ipcMain.handle(\"keyValidation\", async (event, { apiKey, inputProvider }) => {\n    return keyValidation({ apiKey, inputProvider });\n  });\n  ipcMain.handle(\n    \"chatRequest\",\n    async (\n      event,\n      { messages, activeUser, conversationId, requestId, title, collectionId }\n    ) => {\n      const controller = new AbortController();\n      activeRequests.set(requestId, controller);\n      try {\n        const result = await chatRequest(\n          messages,\n          activeUser,\n          mainWindow,\n          conversationId,\n          title,\n          collectionId,\n          controller.signal\n        );\n        activeRequests.delete(requestId);\n        return {\n          messages: result.messages,\n          id: result.id,\n          title: result.title,\n          collectionId: collectionId || undefined,\n        };\n      } catch (error) {\n        activeRequests.delete(requestId);\n        if (error instanceof Error && error.name === \"AbortError\") {\n          return { error: \"Request was aborted\" };\n        }\n        throw error;\n      }\n    }\n  );\n\n  ipcMain.on(\"abortChatRequest\", (event, requestId) => {\n    const controller = activeRequests.get(requestId);\n    if (controller) {\n      controller.abort();\n      activeRequests.delete(requestId);\n    }\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/closeEventHandler.ts",
    "content": "import { app, BrowserWindow } from \"electron\";\n\nexport function handleCloseEvents(mainWindow: BrowserWindow) {\n  let willClose = false;\n  mainWindow.on(\"close\", (e) => {\n    if (willClose) return;\n    e.preventDefault();\n    mainWindow.hide();\n    if (app.dock) app.dock.hide();\n  });\n  app.on(\"before-quit\", () => (willClose = true));\n  mainWindow.on(\"show\", () => (willClose = false));\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/collectionHandlers.ts",
    "content": "import { vectorstoreQuery } from \"../embedding/vectorstoreQuery.js\";\nimport { websiteFetch } from \"../storage/websiteFetch.js\";\nimport { youtubeIngest } from \"../youtube/youtubeIngest.js\";\nimport { ipcMainDatabaseHandle } from \"../util.js\";\nimport { webcrawl } from \"../crawl/webcrawl.js\";\nimport { cancelWebcrawl } from \"../crawl/cancelWebcrawl.js\";\nimport { cancelEmbed } from \"../embedding/cancelEmbed.js\";\nimport { addFileToCollection } from \"../storage/newFile.js\";\nimport { getFilesInCollection } from \"../storage/getFiles.js\";\nimport { openCollectionFolder } from \"../storage/openCollectionFolder.js\";\nimport { deleteCollection } from \"../storage/deleteCollection.js\";\nimport db from \"../db.js\";\n\nexport function setupCollectionHandlers() {\n  ipcMainDatabaseHandle(\"openCollectionFolder\", async (payload) => {\n    try {\n      openCollectionFolder(payload.filepath);\n      return { filepath: payload.filepath };\n    } catch (error) {\n      console.error(\"Error opening collection folder:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\n    \"deleteCollection\",\n    async (payload: { userId: number; id: number; collectionName: string }) => {\n      try {\n        deleteCollection(payload.id, payload.collectionName, payload.userId);\n        db.deleteCollection(payload.userId, payload.id);\n        return {\n          userId: payload.userId,\n          id: payload.id,\n          collectionName: payload.collectionName,\n        };\n      } catch (error) {\n        console.error(\"Error deleting collection:\", error);\n        throw error;\n      }\n    }\n  );\n\n  ipcMainDatabaseHandle(\"getFilesInCollection\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        collectionId: payload.collectionId,\n        files: await getFilesInCollection(payload.userId, payload.collectionId),\n      };\n    } catch (error) {\n      console.error(\"Error getting files in collection:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"addFileToCollection\", async (payload: {\n    userId: number;\n    userName: string;\n    collectionId: number;\n    collectionName: string;\n    fileName: string;\n    fileContent: string;\n  }) => {\n    try {\n      const result = await addFileToCollection(\n        payload.userId,\n        payload.userName,\n        payload.collectionId,\n        payload.collectionName,\n        payload.fileName,\n        payload.fileContent\n      );\n\n      if (result.success) {\n        return {\n          userId: payload.userId,\n          userName: payload.userName,\n          collectionId: payload.collectionId,\n          collectionName: payload.collectionName,\n          fileName: payload.fileName,\n          fileContent: payload.fileContent,\n          result\n        };\n      } else {\n        throw new Error(result.error || 'Unknown error occurred');\n      }\n    } catch (error) {\n      console.error(\"Error adding file to collection:\", error);\n      \n      if (error instanceof Error && (error.message.includes('terminated') || error.message.includes('socket'))) {\n        throw new Error('Connection to processing server was lost. Please try again.');\n      }\n      \n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"cancelEmbed\", async (payload) => {\n    try {\n      const response = await cancelEmbed(payload);\n      const result = await response.json();\n      return result;\n    } catch (error) {\n      console.error(\"Error canceling embed:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"vectorstoreQuery\", async (payload) => {\n    try {\n      const result = await vectorstoreQuery(payload);\n      return result;\n    } catch (error) {\n      console.error(\"Error querying vectorstore:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"webcrawl\", async (payload) => {\n    try {\n      await webcrawl(payload);\n      return payload;\n    } catch (error) {\n      console.error(\"[WEBCRAWL] Error:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"cancelWebcrawl\", async (payload) => {\n    try {\n      await cancelWebcrawl(payload);\n      return payload;\n    } catch (error) {\n      console.error(\"[CANCEL WEBCRAWL] Error:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"websiteFetch\", async (payload) => {\n    try {\n      const result = await websiteFetch(payload);\n      return {\n        ...result,\n        userId: payload.userId,\n        userName: payload.userName,\n        collectionId: payload.collectionId,\n        collectionName: payload.collectionName,\n      };\n    } catch (error) {\n      console.error(\"Error fetching website:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"youtubeIngest\", async (payload) => {\n    try {\n      await youtubeIngest(payload);\n      return payload;\n    } catch (error) {\n      console.error(\"Error ingesting youtube:\", error);\n      throw error;\n    }\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/customApiHandlers.ts",
    "content": "import { ipcMainDatabaseHandle } from \"../util.js\";\nimport db from \"../db.js\";\n\nexport async function setupCustomApiHandlers() {\n  ipcMainDatabaseHandle(\"addCustomAPI\", async (payload) => {\n    const apiId = await db.addCustomAPI(\n      payload.userId,\n      payload.name,\n      payload.endpoint,\n      payload.api_key,\n      payload.model\n    );\n    return {\n      userId: payload.userId,\n      name: payload.name,\n      endpoint: payload.endpoint,\n      api_key: payload.api_key,\n      model: payload.model,\n      id: apiId,\n    };\n  });\n  ipcMainDatabaseHandle(\"deleteCustomAPI\", async (payload) => {\n    await db.deleteCustomAPI(payload.userId, payload.id);\n    return payload;\n  });\n  ipcMainDatabaseHandle(\"getCustomAPI\", async (payload) => {\n    const apis = await db.getCustomAPI(payload.userId);\n    return { userId: payload.userId, api: apis };\n  });\n  ipcMainDatabaseHandle(\"getCustomAPIs\", async (payload) => {\n    const apis = await db.getCustomAPIs(payload.userId);\n    return { userId: payload.userId, api: apis };\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/dbHandlers.ts",
    "content": "import db from \"../db.js\";\nimport { ipcMainHandle, ipcMainDatabaseHandle } from \"../util.js\";\nimport { getDevApiKey } from \"../authentication/devApi.js\";\nexport function setupDbHandlers() {\n  ipcMainDatabaseHandle(\"getUserSettings\", async (payload) => {\n    try {\n      const userSettings = await db.getUserSettings(payload.userId);\n      return { userId: payload.userId, ...userSettings };\n    } catch (error) {\n      console.error(\"Error getting user settings:\", error);\n      throw error;\n    }\n  });\n  ipcMainHandle(\"addUser\", async (_, { name }) => {\n    try {\n      const result = await db.addUser(name as string);\n      if (result.error) {\n        return { name: result.name, error: result.error };\n      }\n      return { name: result.name };\n    } catch (error) {\n      console.error(\"Error adding user:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"addUserConversation\", async (payload) => {\n    try {\n      let title: string | undefined;\n      const conversation = await db.addUserConversation(\n        payload.userId,\n        title ?? \"New Conversation\"\n      );\n      return {\n        userId: conversation.userId,\n        id: Number(conversation.id),\n        input: payload.input,\n        title: conversation.title ?? \"New Conversation\",\n      };\n    } catch (error) {\n      console.error(\"Error adding user conversation:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"getUserCollections\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        collections: await db.getUserCollections(payload.userId),\n      };\n    } catch (error) {\n      console.error(\"Error getting user collections:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"updateUserSettings\", async (payload) => {\n    try {\n      await db.updateUserSettings(payload);\n      return payload;\n    } catch (error) {\n      console.error(\"Error updating user settings:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"getUserApiKeys\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        apiKeys: await db.getUserApiKeys(payload.userId),\n      };\n    } catch (error) {\n      console.error(\"Error getting user api keys:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"getUserPrompts\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        prompts: await db.getUserPrompts(payload.userId),\n      };\n    } catch (error) {\n      console.error(\"Error getting user prompts:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"getUsers\", async () => {\n    try {\n      const users = await db.getUsers();\n      return {\n        users: users.map((user: { name: string; id: number }) => ({\n          name: user.name,\n          id: user.id,\n        })),\n      };\n    } catch (error) {\n      console.error(\"Error getting users:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"createCollection\", async (payload) => {\n    try {\n      const createdCollection = await db.createCollection(\n        payload.userId,\n        payload.name,\n        payload.description,\n        payload.type,\n        payload.isLocal ? 1 : 0,\n        payload.localEmbeddingModel\n      );\n\n      return {\n        userId: payload.userId,\n        name: payload.name,\n        description: payload.description,\n        type: payload.type,\n        id: createdCollection.id,\n        isLocal: Boolean(payload.isLocal),\n        localEmbeddingModel: payload.localEmbeddingModel,\n      };\n    } catch (error) {\n      console.error(\"Error creating collection:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"deleteConversation\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        conversationId: payload.conversationId,\n        result: await db.deleteUserConversation(\n          payload.userId,\n          payload.conversationId\n        ),\n      };\n    } catch (error) {\n      console.error(\"Error deleting conversation:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"getConversationMessagesWithData\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        conversationId: payload.conversationId,\n        messages: await db.getConversationMessagesWithData(\n          payload.userId,\n          payload.conversationId\n        ),\n      };\n    } catch (error) {\n      console.error(\"Error getting conversation messages:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"getConversationMessages\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        conversationId: payload.conversationId,\n        messages: await db.getConversationMessages(\n          payload.userId,\n          payload.conversationId\n        ),\n      };\n    } catch (error) {\n      console.error(\"Error getting conversation messages:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"addUserPrompt\", async (payload) => {\n    try {\n      const promptAdded = await db.addUserPrompt(\n        payload.userId,\n        payload.name,\n        payload.prompt\n      );\n      return {\n        userId: promptAdded.userId,\n        name: promptAdded.name,\n        prompt: promptAdded.prompt,\n        id: promptAdded.id,\n      };\n    } catch (error) {\n      console.error(\"Error adding user prompt:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"updateUserPrompt\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        id: payload.id,\n        name: payload.name,\n        prompt: payload.prompt,\n        result: await db.updateUserPrompt(\n          payload.userId,\n          payload.id,\n          payload.name,\n          payload.prompt\n        ),\n      };\n    } catch (error) {\n      console.error(\"Error updating user prompt:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"addAPIKey\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        key: payload.key,\n        provider: payload.provider,\n        result: await db.addAPIKey(\n          payload.userId,\n          payload.key,\n          payload.provider\n        ),\n      };\n    } catch (error) {\n      console.error(\"Error adding API key:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"getUserConversations\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        conversations: await db.getUserConversations(payload.userId),\n      };\n    } catch (error) {\n      console.error(\"Error getting user conversations:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"addDevAPIKey\", async (payload) => {\n    try {\n      const newApiKey = await getDevApiKey({\n        userId: payload.userId.toString(),\n        expiration: payload.expiration,\n      });\n      const result = await db.addDevAPIKey(\n        payload.userId,\n        payload.name,\n        newApiKey,\n        payload.expiration ?? null\n      );\n      return {\n        userId: payload.userId,\n        name: payload.name,\n        key: newApiKey,\n        expiration: payload.expiration,\n        id: result.lastInsertRowid,\n      } as Keys;\n    } catch (error) {\n      console.error(\"Error adding dev API key:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"getDevAPIKeys\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        keys: await db.getDevAPIKeys(payload.userId),\n      };\n    } catch (error) {\n      console.error(\"Error getting dev API keys:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"deleteDevAPIKey\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        id: payload.id,\n        result: await db.deleteDevAPIKey(payload.userId, payload.id),\n      };\n    } catch (error) {\n      console.error(\"Error deleting dev API key:\", error);\n      throw error;\n    }\n  });\n\n  // Tools Section\n\n  ipcMainDatabaseHandle(\"getUserTools\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        tools: await db.getUserTools(payload.userId),\n      };\n    } catch (error) {\n      console.error(\"Error getting tools:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"addUserTool\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        toolId: payload.toolId,\n        enabled: payload.enabled,\n        docked: payload.docked,\n        result: await db.addUserTool(\n          payload.userId,\n          payload.toolId,\n          payload.enabled,\n          payload.docked\n        ),\n      };\n    } catch (error) {\n      console.error(\"Error adding user tool:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"removeUserTool\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        toolId: payload.toolId,\n        result: await db.removeUserTool(payload.userId, payload.toolId),\n      };\n    } catch (error) {\n      console.error(\"Error removing user tool:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"updateUserTool\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        toolId: payload.toolId,\n        enabled: payload.enabled,\n        docked: payload.docked,\n        result: await db.updateUserTool(\n          payload.userId,\n          payload.toolId,\n          payload.enabled,\n          payload.docked\n        ),\n      };\n    } catch (error) {\n      console.error(\"Error updating user tool:\", error);\n      throw error;\n    }\n  });\n  ipcMainHandle(\"getTools\", async () => {\n    try {\n      return {\n        tools: await db.getTools(),\n      };\n    } catch (error) {\n      console.error(\"Error getting tools:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"addExternalOllama\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        name: payload.name,\n        endpoint: payload.endpoint,\n        api_key: payload.api_key,\n        model: payload.model,\n        id: await db.addExternalOllama(\n          payload.userId,\n          payload.name,\n          payload.endpoint,\n          payload.api_key,\n          payload.model\n        ),\n      };\n    } catch (error) {\n      console.error(\"Error adding external ollama:\", error);\n      throw error;\n    }\n  });\n  ipcMainDatabaseHandle(\"getExternalOllama\", async (payload) => {\n    try {\n      return {\n        userId: payload.userId,\n        ollama: await db.getExternalOllama(payload.userId),\n      };\n    } catch (error) {\n      console.error(\"Error getting external ollama:\", error);\n      throw error;\n    }\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/fileHandlers.ts",
    "content": "import { ipcMainDatabaseHandle } from \"../util.js\";\nimport { openCollectionFolderFromFileExplorer } from \"../storage/openCollectionFolder.js\";\nimport { getUserCollectionFiles } from \"../storage/getUserFiles.js\";\nimport { removeFileorFolder } from \"../storage/removeFileorFolder.js\";\nimport { renameFile } from \"../storage/renameFile.js\";\n\nexport function setupFileHandlers() {\n  ipcMainDatabaseHandle(\"getUserCollectionFiles\", async (payload) => {\n    const result = await getUserCollectionFiles(payload);\n    return result;\n  });\n\n  ipcMainDatabaseHandle(\n    \"openCollectionFolderFromFileExplorer\",\n    async (payload) => {\n      const result = await openCollectionFolderFromFileExplorer(\n        payload.filepath\n      );\n      return result;\n    }\n  );\n\n  ipcMainDatabaseHandle(\"removeFileorFolder\", async (payload) => {\n    const result = await removeFileorFolder(payload);\n    return result;\n  });\n\n  ipcMainDatabaseHandle(\"renameFile\", async (payload) => {\n    const result = await renameFile(payload);\n    return result;\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/handlers.test.ts",
    "content": "import { test, expect, vi, Mock } from \"vitest\";\nimport { ipcMain } from \"electron\";\n\n// Mock electron IPC\nvi.mock(\"electron\", () => ({\n  ipcMain: {\n    handle: vi.fn(),\n    on: vi.fn(),\n    removeHandler: vi.fn(),\n  },\n}));\n\n// Example handler function to test\nconst exampleHandler = async (event: Electron.Event, ...args: unknown[]) => {\n  return { success: true, data: args[0] };\n};\n\ntest(\"IPC handler registration\", () => {\n  // Register handler\n  ipcMain.handle(\"example-channel\", exampleHandler);\n\n  // Verify handler was registered\n  expect(ipcMain.handle).toHaveBeenCalledWith(\n    \"example-channel\",\n    expect.any(Function)\n  );\n\n  // Get the registered handler\n  const registeredHandler = (ipcMain.handle as Mock).mock.calls.find(\n    (\n      call: unknown[]\n    ): call is [string, (...args: unknown[]) => Promise<unknown>] =>\n      Array.isArray(call) && call.length === 2 && typeof call[0] === \"string\"\n  )?.[1];\n\n  expect(registeredHandler).toBeDefined();\n});\n\ntest(\"IPC handler execution\", async () => {\n  const mockData = { test: \"data\" };\n  // Execute handler\n  const result = await exampleHandler(\n    {\n      preventDefault: () => {},\n      defaultPrevented: false,\n    },\n    mockData\n  );\n\n  // Verify result\n  expect(result).toEqual({\n    success: true,\n    data: mockData,\n  });\n});\n\ntest(\"IPC handler error handling\", async () => {\n  const errorHandler = async () => {\n    throw new Error(\"Test error\");\n  };\n\n  // Register error handler\n  ipcMain.handle(\"error-channel\", errorHandler);\n\n  // Execute handler and expect it to throw\n  await expect(errorHandler()).rejects.toThrow(\"Test error\");\n});\n"
  },
  {
    "path": "Frontend/src/electron/handlers/ipcHandlers.ts",
    "content": "import { BrowserWindow, ipcMain, dialog } from \"electron\";\nimport { ipcMainHandle, ipcMainOn, isDev } from \"../util.js\";\nimport { getStaticData } from \"../resourceManager.js\";\n\nexport function setupIpcHandlers(mainWindow: BrowserWindow) {\n  ipcMain.on(\"resetAppState\", (event) => {\n    event.reply(\"stateResetComplete\");\n  });\n\n  ipcMainHandle(\"getStaticData\", async () => await getStaticData());\n\n  ipcMainHandle(\"openDirectory\", async () => {\n    const result = await dialog.showOpenDialog(mainWindow, {\n      properties: ['openDirectory']\n    });\n    return result.filePaths[0];\n  });\n\n  ipcMainOn(\"resizeWindow\", ({ width, height }) => {\n    if (mainWindow) {\n      mainWindow.setSize(width, height);\n    }\n  });\n\n  ipcMainOn(\"openDevTools\", () => {\n    if (isDev()) {\n      mainWindow.webContents.openDevTools();\n    }\n  });\n\n  ipcMainOn(\"frameWindowAction\", (payload) => {\n    switch (payload) {\n      case \"close\":\n        mainWindow.close();\n        break;\n      case \"minimize\":\n        mainWindow.minimize();\n        break;\n      case \"maximize\":\n        mainWindow.maximize();\n        break;\n      case \"unmaximize\":\n        mainWindow.unmaximize();\n        break;\n    }\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/localModelHandlers.ts",
    "content": "import { ipcMainDatabaseHandle } from \"../util.js\";\n\nimport { getDirModels } from \"../localLLMs/getDirModels.js\";\nimport { unloadModel } from \"../localLLMs/unloadModel.js\";\nimport { loadModel } from \"../localLLMs/loadModel.js\";\nimport { modelInfo } from \"../localLLMs/modelInfo.js\";\nimport * as fs from \"fs\";\nimport * as path from \"path\";\nimport { BrowserWindow, app } from \"electron\";\nimport { platform } from \"os\";\n\nlet currentDownloadController: AbortController | null = null;\n\nfunction getModelsPath() {\n  const home = app.getPath(\"home\");\n  if (platform() === \"darwin\") {\n    return path.join(\n      home,\n      \"Library/Application Support/Notate/embeddings_models\"\n    );\n  } else if (platform() === \"linux\") {\n    return path.join(home, \".local/share/Notate/embeddings_models\");\n  } else {\n    return path.join(home, \".notate/embeddings_models\");\n  }\n}\n\nasync function downloadModel(payload: {\n  modelId: string;\n  dirPath: string;\n  hfToken?: string;\n}) {\n  const windows = BrowserWindow.getAllWindows();\n  const mainWindow = windows[0];\n  const sendProgress = (data: DownloadProgress) => {\n    mainWindow?.webContents.send(\"download-model-progress\", data);\n  };\n\n  // Calculate download speed\n  const calculateSpeed = (bytes: number, timeInMs: number) => {\n    const bytesPerSecond = (bytes / timeInMs) * 1000;\n    if (bytesPerSecond > 1024 * 1024) {\n      return `${(bytesPerSecond / (1024 * 1024)).toFixed(2)} MB/s`;\n    } else if (bytesPerSecond > 1024) {\n      return `${(bytesPerSecond / 1024).toFixed(2)} KB/s`;\n    }\n    return `${bytesPerSecond.toFixed(2)} B/s`;\n  };\n\n  try {\n    // Check if directory exists and has files\n    if (fs.existsSync(payload.dirPath)) {\n      const existingFiles = fs.readdirSync(payload.dirPath);\n      if (existingFiles.length > 0) {\n        // Check for common model files\n        const hasModelFiles = existingFiles.some(\n          (file) =>\n            file.endsWith(\".gguf\") ||\n            file.endsWith(\".bin\") ||\n            file.endsWith(\".safetensors\") ||\n            file === \"config.json\" ||\n            file === \"tokenizer.json\"\n        );\n\n        if (hasModelFiles) {\n          sendProgress({\n            type: \"progress\",\n            data: {\n              message: \"Model already exists\",\n              totalProgress: 100,\n              currentStep: \"complete\",\n            },\n          });\n          return payload;\n        }\n      }\n    }\n\n    // Create directory if it doesn't exist\n    fs.mkdirSync(payload.dirPath, { recursive: true });\n\n    // Get repository contents from Hugging Face\n    const headers: { [key: string]: string } = {\n      Accept: \"application/json\",\n    };\n    if (payload.hfToken) {\n      headers[\"Authorization\"] = `Bearer ${payload.hfToken}`;\n    }\n\n    sendProgress({\n      type: \"progress\",\n      data: {\n        message: \"Fetching model information...\",\n        totalProgress: 0,\n        currentStep: \"init\",\n      },\n    });\n\n    currentDownloadController = new AbortController();\n    const signal = currentDownloadController.signal;\n\n    // Extract the model name and path\n    const modelPath = payload.modelId.split('/');\n    const repoOwner = modelPath[0];\n    const repoName = modelPath[1];\n    const subPath = modelPath.slice(2).join('/');\n\n    // Construct the API URL with the correct path\n    const apiUrl = `https://huggingface.co/api/models/${repoOwner}/${repoName}/tree/main/${subPath}`;\n    const response = await fetch(apiUrl, { headers, signal });\n\n    if (!response.ok) {\n      throw new Error(`Failed to fetch model info: ${response.statusText}`);\n    }\n\n    const files = (await response.json()) as { path: string; size: number }[];\n\n    // Filter out zero-size files and sort by size (largest first)\n    const downloadableFiles = files\n      .filter((file) => file.size > 0)\n      .sort((a, b) => b.size - a.size);\n\n    // Calculate total size\n    const totalSize = downloadableFiles.reduce(\n      (acc, file) => acc + file.size,\n      0\n    );\n    let downloadedSize = 0;\n\n    // Format size to human readable\n    const formatSize = (bytes: number) => {\n      const units = [\"B\", \"KB\", \"MB\", \"GB\"];\n      let size = bytes;\n      let unitIndex = 0;\n      while (size >= 1024 && unitIndex < units.length - 1) {\n        size /= 1024;\n        unitIndex++;\n      }\n      return `${size.toFixed(2)} ${units[unitIndex]}`;\n    };\n\n    // Keep track of failed files\n    const failedFiles: string[] = [];\n\n    // Download each file\n    for (const [index, file] of downloadableFiles.entries()) {\n      const fileName = file.path;\n      const downloadUrl = `https://huggingface.co/${repoOwner}/${repoName}/resolve/main/${fileName}`;\n      const filePath = path.join(payload.dirPath, path.basename(fileName));\n\n      // Create subdirectories if needed\n      fs.mkdirSync(path.dirname(filePath), { recursive: true });\n\n      sendProgress({\n        type: \"progress\",\n        data: {\n          message: `Downloading file ${index + 1} of ${\n            downloadableFiles.length\n          }`,\n          fileName,\n          fileNumber: index + 1,\n          totalFiles: downloadableFiles.length,\n          fileProgress: 0,\n          totalProgress: Math.round((downloadedSize / totalSize) * 100),\n          currentSize: formatSize(downloadedSize),\n          totalSize: formatSize(totalSize),\n          currentStep: \"downloading\",\n          speed: \"Starting...\",\n        },\n      });\n\n      try {\n        const fileResponse = await fetch(downloadUrl, { headers, signal });\n\n        if (!fileResponse.ok) {\n          console.warn(\n            `Failed to download ${fileName}: ${fileResponse.statusText}`\n          );\n          failedFiles.push(fileName);\n          continue;\n        }\n\n        if (!fileResponse.body) {\n          console.warn(`No data received for ${fileName}`);\n          failedFiles.push(fileName);\n          continue;\n        }\n\n        // Stream the file to disk with progress tracking\n        const fileStream = fs.createWriteStream(filePath);\n        const reader = fileResponse.body.getReader();\n        let receivedLength = 0;\n        let lastUpdate = Date.now();\n        let lastBytes = 0;\n\n        while (true) {\n          try {\n            const { done, value } = await reader.read();\n\n            // Add check for abort signal\n            if (signal.aborted) {\n              reader.cancel();\n              fileStream.destroy();\n              throw new Error(\"Download cancelled\");\n            }\n\n            if (done) break;\n\n            receivedLength += value.length;\n            downloadedSize += value.length;\n            fileStream.write(value);\n\n            // Calculate speed every 500ms\n            const now = Date.now();\n            const timeDiff = now - lastUpdate;\n            if (timeDiff >= 500) {\n              const bytesDiff = receivedLength - lastBytes;\n              const speed = calculateSpeed(bytesDiff, timeDiff);\n              lastUpdate = now;\n              lastBytes = receivedLength;\n\n              // Update progress\n              const totalProgress = Math.round(\n                (downloadedSize / totalSize) * 100\n              );\n              const fileProgress = Math.round(\n                (receivedLength / file.size) * 100\n              );\n\n              sendProgress({\n                type: \"progress\",\n                data: {\n                  message: `Downloading file ${index + 1} of ${\n                    downloadableFiles.length\n                  }`,\n                  fileName,\n                  fileNumber: index + 1,\n                  totalFiles: downloadableFiles.length,\n                  fileProgress,\n                  totalProgress,\n                  currentSize: formatSize(downloadedSize),\n                  totalSize: formatSize(totalSize),\n                  currentStep: \"downloading\",\n                  speed,\n                },\n              });\n            }\n          } catch (error) {\n            reader.cancel();\n            fileStream.destroy();\n            throw error;\n          }\n        }\n\n        fileStream.end();\n      } catch (error) {\n        console.warn(`Error downloading ${fileName}:`, error);\n        failedFiles.push(fileName);\n        continue;\n      }\n    }\n\n    // If all files failed, throw error and remove the directory\n    if (failedFiles.length === downloadableFiles.length) {\n      fs.rmSync(payload.dirPath, { recursive: true, force: true });\n      throw new Error(\"Failed to download any files from the model\");\n    }\n\n    // If some files failed but not all, show warning\n    if (failedFiles.length > 0) {\n      console.warn(\"Some files failed to download:\", failedFiles);\n      sendProgress({\n        type: \"progress\",\n        data: {\n          message: `Download completed with ${failedFiles.length} skipped files`,\n          totalProgress: 100,\n          currentStep: \"complete\",\n          currentSize: formatSize(downloadedSize),\n          totalSize: formatSize(totalSize),\n        },\n      });\n    } else {\n      sendProgress({\n        type: \"progress\",\n        data: {\n          message: \"Download completed successfully\",\n          totalProgress: 100,\n          currentStep: \"complete\",\n          currentSize: formatSize(totalSize),\n          totalSize: formatSize(totalSize),\n        },\n      });\n    }\n\n    return payload;\n  } catch (error) {\n    console.error(\"Error downloading model:\", error);\n    // Clean up partially downloaded files on error\n    if (fs.existsSync(payload.dirPath)) {\n      fs.rmSync(payload.dirPath, { recursive: true, force: true });\n    }\n\n    // Add specific handling for cancellation\n    if (error instanceof Error && error.message === \"Download cancelled\") {\n      throw new Error(\"Download cancelled by user\");\n    }\n    throw error;\n  } finally {\n    currentDownloadController = null;\n  }\n}\n\nexport function setupLocalModelHandlers() {\n  ipcMainDatabaseHandle(\"getModelsPath\", async () => {\n    const modelsPath = getModelsPath();\n    fs.mkdirSync(modelsPath, { recursive: true });\n    return modelsPath;\n  });\n\n  ipcMainDatabaseHandle(\"getEmbeddingsModels\", async () => {\n    const models = await getDirModels({ dirPath: getModelsPath() });\n    return { models };\n  });\n\n  ipcMainDatabaseHandle(\"getDirModels\", async (payload) => {\n    const models = await getDirModels(payload);\n    return { dirPath: payload.dirPath, models };\n  });\n\n  ipcMainDatabaseHandle(\"loadModel\", async (payload) => {\n    try {\n      const result = await loadModel(payload);\n      return result;\n    } catch (error) {\n      console.error(\"Error loading model:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"unloadModel\", async (payload) => {\n    try {\n      const result = await unloadModel(payload);\n      return result;\n    } catch (error) {\n      console.error(\"Error unloading model:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"downloadModel\", async (payload) => {\n    try {\n      const result = await downloadModel(payload);\n      return result;\n    } catch (error) {\n      console.error(\"Error downloading model:\", error);\n      throw error;\n    }\n  });\n\n  ipcMainDatabaseHandle(\"cancelDownload\", async () => {\n    if (currentDownloadController) {\n      currentDownloadController.abort();\n      currentDownloadController = null;\n      return { success: true };\n    }\n    return { success: false };\n  });\n\n  ipcMainDatabaseHandle(\"getModelInfo\", async (payload) => {\n    try {\n      const result = await modelInfo(payload);\n      return result;\n    } catch (error) {\n      console.error(\"Error getting model info:\", error);\n      throw error;\n    }\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/menuHandlers.ts",
    "content": "import { ipcMain, app, BrowserWindow } from \"electron\";\n\nexport function setupMenuHandlers(mainWindow: BrowserWindow) {\n  ipcMain.handle(\"changeUser\", async () => {\n    mainWindow.webContents.send(\"resetUserState\");\n    await new Promise(resolve => setTimeout(resolve, 100));\n    mainWindow.webContents.send(\"changeView\", \"SelectAccount\");\n  });\n\n  ipcMain.handle(\"quit\", async () => {\n    app.quit();\n  });\n\n  ipcMain.handle(\"undo\", async () => {\n    mainWindow.webContents.undo();\n  });\n\n  ipcMain.handle(\"redo\", async () => {\n    mainWindow.webContents.redo();\n  });\n\n  ipcMain.handle(\"cut\", async () => {\n    mainWindow.webContents.cut();\n  });\n\n  ipcMain.handle(\"copy\", async () => {\n    mainWindow.webContents.copy();\n  });\n\n  ipcMain.handle(\"paste\", async () => {\n    mainWindow.webContents.paste();\n  });\n\n  ipcMain.handle(\"delete\", async () => {\n    mainWindow.webContents.delete();\n  });\n\n  ipcMain.handle(\"selectAll\", async () => {\n    mainWindow.webContents.selectAll();\n  });\n\n  ipcMain.handle(\"chat\", async () => {\n    mainWindow.webContents.send(\"changeView\", \"Chat\");\n  });\n\n  ipcMain.handle(\"history\", async () => {\n    mainWindow.webContents.send(\"changeView\", \"History\");\n  });\n\n  ipcMain.handle(\"toggleDevTools\", async () => {\n    mainWindow.webContents.toggleDevTools();\n  });\n\n  ipcMain.handle(\"openDevTools\", async () => {\n    mainWindow.webContents.openDevTools();\n  });\n}"
  },
  {
    "path": "Frontend/src/electron/handlers/ollamaHandlers.ts",
    "content": "import { ipcMainHandle } from \"../util.js\";\nimport { fetchOllamaModels } from \"../ollama/fetchLocalModels.js\";\nimport { systemSpecs } from \"../specs/systemSpecs.js\";\nimport { runOllama } from \"../ollama/runOllama.js\";\nimport { pullModel } from \"../ollama/pullModel.js\";\nimport { checkOllama } from \"../ollama/checkOllama.js\";\nimport db from \"../db.js\";\n/* import log from \"electron-log\";\n */\nexport async function setupOllamaHandlers() {\n  ipcMainHandle(\"getPlatform\", async () => {\n    return { platform: process.platform as \"win32\" | \"darwin\" | \"linux\" };\n  });\n\n  ipcMainHandle(\"systemSpecs\", async () => {\n    try {\n      const { cpu, vram, GPU_Manufacturer } = await systemSpecs();\n      return { cpu, vram, GPU_Manufacturer };\n    } catch (error) {\n      console.error(\"Error in systemSpecs:\", error);\n      return { cpu: \"Unknown\", vram: \"Unknown\", GPU_Manufacturer: \"Unknown\" };\n    }\n  });\n\n  ipcMainHandle(\"checkOllama\", async () => {\n    try {\n      const isOllamaRunning = await checkOllama();\n      return { isOllamaRunning };\n    } catch (error) {\n      console.error(\"Error checking Ollama:\", error);\n      return { isOllamaRunning: false };\n    }\n  });\n\n  ipcMainHandle(\"fetchOllamaModels\", async () => {\n    try {\n      const models = await fetchOllamaModels();\n      return { models };\n    } catch (error) {\n      console.error(\"Error in fetchOllamaModels:\", error);\n      return { models: [] };\n    }\n  });\n\n  ipcMainHandle(\"pullModel\", async (_, { model }: { model: string }) => {\n    await pullModel(model);\n    return { model };\n  });\n\n  ipcMainHandle(\n    \"runOllama\",\n    async (\n      _event,\n      { model, user }: { model: string; user: User }\n    ): Promise<{ model: string; user: User }> => {\n      try {\n        await checkOllama();\n        await runOllama({ model });\n\n        db.updateUserSettings({ ...user, provider: \"ollama\" });\n        db.updateUserSettings({ ...user, model });\n        return { model, user };\n      } catch (error) {\n        const errorMessage =\n          error instanceof Error ? error.message : \"Unknown error occurred\";\n        console.error(\"Error running Ollama:\", error);\n        throw new Error(errorMessage);\n      }\n    }\n  );\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/openRouterHandlers.ts",
    "content": "import { ipcMainDatabaseHandle } from \"../util.js\";\nimport db from \"../db.js\";\nimport { OpenRouterProviderAPIKeyCheck } from \"../llms/apiCheckProviders/openrouter.js\";\n\nexport async function setupOpenRouterHandlers() {\n  ipcMainDatabaseHandle(\"getOpenRouterModel\", async (payload) => {\n    const model = await db.getOpenRouterModel(payload.userId);\n    return {\n      userId: payload.userId,\n      model,\n    };\n  });\n\n  ipcMainDatabaseHandle(\"addOpenRouterModel\", async (payload) => {\n    const userApiKey = await db.getApiKey(payload.userId, \"openrouter\");\n    const checkModel = await OpenRouterProviderAPIKeyCheck(\n      userApiKey,\n      payload.model\n    );\n    if (checkModel.error) {\n      throw new Error(checkModel.error);\n    }\n    await db.addOpenRouterModel(payload.userId, payload.model);\n    return {\n      userId: payload.userId,\n      model: payload.model,\n    };\n  });\n\n  ipcMainDatabaseHandle(\"deleteOpenRouterModel\", async (payload) => {\n    await db.deleteOpenRouterModel(payload.userId, payload.id);\n    return {\n      userId: payload.userId,\n      id: payload.id,\n    };\n  });\n\n  ipcMainDatabaseHandle(\"getOpenRouterModels\", async (payload) => {\n    const models = await db.getOpenRouterModels(payload.userId);\n    return {\n      userId: payload.userId,\n      models,\n    };\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/handlers/voiceHandlers.ts",
    "content": "import { ipcMainHandle } from \"../util.js\";\nimport * as fs from \"fs\";\nimport * as path from \"path\";\nimport { app } from \"electron\";\nimport ffmpegStatic from \"ffmpeg-static\";\nimport log from \"electron-log\";\nimport { spawn } from \"child_process\";\nimport { audioTranscription } from \"../voice/audioTranscription.js\";\n\nlog.transports.file.level = \"info\";\nlog.transports.file.resolvePathFn = () =>\n  path.join(app.getPath(\"userData\"), \"logs/main.log\");\n\nexport function setupVttHandlers() {\n  // Set FFMPEG_PATH environment variable for Python to use\n  let ffmpegPath = ffmpegStatic as unknown as string | null;\n\n  // In production, use the bundled FFmpeg\n  if (app.isPackaged) {\n    ffmpegPath = path.join(process.resourcesPath, \"ffmpeg\");\n    if (process.platform === \"win32\") {\n      ffmpegPath += \".exe\";\n    }\n  }\n\n  if (ffmpegPath) {\n    process.env.FFMPEG_PATH = ffmpegPath;\n    log.info(`Setting FFMPEG_PATH to: ${ffmpegPath}`);\n  }\n\n  ipcMainHandle(\"checkIfFFMPEGInstalled\", async () => {\n    const checkResult = { success: false, message: false };\n\n    if (!ffmpegPath) {\n      log.error(\"FFmpeg binary not found\");\n      return checkResult;\n    }\n\n    if (!fs.existsSync(ffmpegPath)) {\n      log.error(`FFmpeg binary not found at path: ${ffmpegPath}`);\n      return checkResult;\n    }\n\n    return new Promise((resolve) => {\n      const ffmpeg = spawn(ffmpegPath!, [\"-version\"]);\n\n      ffmpeg.on(\"error\", (err: Error) => {\n        log.error(`Error executing FFmpeg: ${err}`);\n        resolve(checkResult);\n      });\n\n      ffmpeg.on(\"close\", (code: number) => {\n        const success = code === 0;\n        log.info(`FFmpeg check completed with code ${code}`);\n        resolve({ success, message: success });\n      });\n    }).catch(() => checkResult);\n  });\n\n  ipcMainHandle<\n    \"transcribeAudio\",\n    { audioData: Buffer; userId: number },\n    TranscribeAudioOutput\n  >(\"transcribeAudio\", async (_event, { audioData, userId }) => {\n    const data = await audioTranscription(audioData, userId);\n    if (!data) {\n      return {\n        success: false,\n        error: \"Error transcribing audio\",\n      };\n    }\n    return {\n      success: true,\n      transcription: data.transcription,\n      language: data.language,\n    };\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/helpers/spawnAsync.ts",
    "content": "import { spawn } from \"child_process\";\nimport log from \"electron-log\";\nimport { updateLoadingStatus } from \"../loadingWindow.js\";\n\nexport function spawnAsync(\n  command: string,\n  args: string[],\n  options: { env?: NodeJS.ProcessEnv; stdio?: \"inherit\" | \"pipe\" } = {}\n): Promise<string> {\n  return new Promise((resolve, reject) => {\n    const process = spawn(command, args, options);\n    let stdout = \"\";\n    let stderr = \"\";\n\n    process.stdout?.on(\"data\", (data) => {\n      stdout += data.toString();\n      log.info(`[Installation Output] ${data.toString().trim()}`);\n      updateLoadingStatus(data.toString().trim(), -1);\n    });\n\n    process.stderr?.on(\"data\", (data) => {\n      stderr += data.toString();\n      log.warn(`[Installation Error] ${data.toString().trim()}`);\n    });\n\n    process.on(\"close\", (code) => {\n      if (code === 0) {\n        resolve(stdout);\n      } else {\n        reject(\n          new Error(`Process exited with code ${code}\\nStderr: ${stderr}`)\n        );\n      }\n    });\n\n    process.on(\"error\", (err) => {\n      reject(err);\n    });\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/agentLayer/anthropicAgent.ts",
    "content": "import { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { sendMessageChunk } from \"../llmHelpers/sendMessageChunk.js\";\nimport { BrowserWindow } from \"electron\";\nimport { Anthropic } from \"@anthropic-ai/sdk\";\nimport { z } from \"zod\";\nimport { webSearch } from \"./tools/websearch.js\";\n\nexport async function anthropicAgent(\n  anthropic: Anthropic,\n  messages: ChatCompletionMessageParam[],\n  maxOutputTokens: number,\n  signal?: AbortSignal,\n  mainWindow: BrowserWindow | null = null\n): Promise<{\n  content: string;\n  webSearchResult: WebSearchResult | null;\n}> {\n  sendMessageChunk(\"[Agent]: \", mainWindow);\n  const sysPrompt: ChatCompletionMessageParam = {\n    role: \"system\",\n    content: `You are an AI Agent with the ability to visit websites and extract text and metadata.\n    Your task is to analyze if the user is DIRECTLY requesting to visit or check a specific website.\n    \n    ONLY use web search or news search if the user explicitly asks to visit, check, or get information from a specific URL or website or websearch or news search.\n    Do not infer or assume web search would be helpful unless directly requested asking what is on a website is a valid web search. \n    \n    If the user directly requests web search, respond with EXACTLY this JSON format:\n    {\n      \"webUrl\": 1,\n      \"url\": \"full_url_here\"\n    }\n    \n    For all other queries, even if web search might be helpful, respond with EXACTLY:\n    {\n      \"webUrl\": 0,\n      \"url\": \"\"\n    }\n    \n    example:\n    user: \"What is on the google news page?\"\n    agent: {\n      \"webUrl\": 1,\n      \"url\": \"https://news.google.com\"\n    }\n    \n\n    user: \"What is the capital of France?\"\n    agent: {\n      \"webUrl\": 0,\n      \"url\": \"\"\n    }\n\n    Only respond with one of these two JSON formats, nothing else.\n    Make sure the URL is a complete, valid URL starting with http:// or https://\n    Do not include any explanation or additional text in your response.`,\n  };\n\n  const AgentActions = z.object({\n    webUrl: z.number(),\n    url: z.string(),\n  });\n\n  const response = await anthropic.messages.create(\n    {\n      model: \"claude-3-sonnet-20240229\",\n      max_tokens: maxOutputTokens,\n      system: sysPrompt.content,\n      messages: messages.map((msg) => ({\n        role: msg.role === \"assistant\" ? \"assistant\" : \"user\",\n        content: msg.content as string,\n      })),\n    },\n    { signal }\n  );\n\n  let agentActions;\n  try {\n    const responseText =\n      response.content[0].type === \"text\" ? response.content[0].text : \"\";\n    agentActions = AgentActions.parse(JSON.parse(responseText.trim()));\n  } catch (error) {\n    console.error(\"Failed to parse agent response:\", error);\n    // Fallback to no web search if parsing fails\n    agentActions = { webUrl: 0, url: \"\" };\n  }\n\n  let webSearchResult = null;\n  if (agentActions.webUrl === 1 && agentActions.url) {\n    try {\n      webSearchResult = (await webSearch({\n        url: agentActions.url,\n      })) as WebSearchResult;\n      sendMessageChunk(\n        \"[REASONING]: Visiting website: \" + agentActions.url + \"\\n\",\n        mainWindow\n      );\n    } catch (error) {\n      console.error(\"Web search failed:\", error);\n      sendMessageChunk(\n        \"[REASONING]: Failed to visit website: \" + agentActions.url + \"\\n\",\n        mainWindow\n      );\n    }\n  }\n\n  // Prepare final response\n  const finalResponse = {\n    content: webSearchResult\n      ? `Retrieved content from: ${agentActions.url}`\n      : \"No web search was needed or the search failed\",\n    webSearchResult,\n  };\n\n  return finalResponse;\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/agentLayer/geminiAgent.ts",
    "content": "import { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { sendMessageChunk } from \"../llmHelpers/sendMessageChunk.js\";\nimport { BrowserWindow } from \"electron\";\nimport { z } from \"zod\";\nimport { webSearch } from \"./tools/websearch.js\";\nimport { GoogleGenerativeAI } from \"@google/generative-ai\";\n\nexport async function geminiAgent(\n  gemini: GoogleGenerativeAI,\n  messages: ChatCompletionMessageParam[],\n  maxOutputTokens: number,\n  userSettings: UserSettings,\n  signal?: AbortSignal,\n  mainWindow: BrowserWindow | null = null\n): Promise<{\n  content: string;\n  webSearchResult: WebSearchResult | null;\n}> {\n  console.log(\"geminiAgent\");\n  sendMessageChunk(\"[Agent]: \", mainWindow);\n  const sysPrompt = `You are an AI Agent with the ability to visit websites and extract text and metadata.\n    Your task is to analyze if the user is DIRECTLY requesting to visit or check a specific website.\n    \n    ONLY use web search or news search if the user explicitly asks to visit, check, or get information from a specific URL or website or websearch or news search.\n    Do not infer or assume web search would be helpful unless directly requested asking what is on a website is a valid web search. \n    \n    If the user directly requests web search, respond with EXACTLY this JSON format:\n    {\n      \"webUrl\": 1,\n      \"url\": \"full_url_here\"\n    }\n    \n    For all other queries, even if web search might be helpful, respond with EXACTLY:\n    {\n      \"webUrl\": 0,\n      \"url\": \"\"\n    }\n    \n    example:\n    user: \"What is on the google news page?\"\n    agent: {\n      \"webUrl\": 1,\n      \"url\": \"https://news.google.com\"\n    }\n    \n\n    user: \"What is the capital of France?\"\n    agent: {\n      \"webUrl\": 0,\n      \"url\": \"\"\n    }\n\n    Only respond with one of these two JSON formats, nothing else.\n    Make sure the URL is a complete, valid URL starting with http:// or https://\n    Do not include any explanation or additional text in your response.`;\n\n  const AgentActions = z.object({\n    webUrl: z.number(),\n    url: z.string(),\n  });\n\n  const model = gemini.getGenerativeModel({\n    model: userSettings.model as string,\n  });\n\n  const chat = model.startChat({\n    history: [],\n    generationConfig: {\n      temperature: Number(userSettings.temperature),\n      maxOutputTokens: maxOutputTokens,\n    },\n  });\n\n  const result = await chat.sendMessage(\n    sysPrompt + \"\\n\\n\" + messages[messages.length - 1].content,\n    { signal }\n  );\n  const responseText = result.response.text();\n  let agentActions;\n  try {\n    // Clean up markdown formatting if present\n    const cleanedResponse = responseText\n      .replace(/```json\\n?/g, \"\") // Remove ```json\n      .replace(/```\\n?/g, \"\") // Remove closing ```\n      .trim(); // Remove extra whitespace\n\n    agentActions = AgentActions.parse(JSON.parse(cleanedResponse));\n  } catch (error) {\n    console.error(\"Failed to parse agent response:\", error);\n    // Fallback to no web search if parsing fails\n    agentActions = { webUrl: 0, url: \"\" };\n  }\n\n  let webSearchResult = null;\n  if (agentActions.webUrl === 1 && agentActions.url) {\n    try {\n      webSearchResult = (await webSearch({\n        url: agentActions.url,\n      })) as WebSearchResult;\n    } catch (error) {\n      console.error(\"Web search failed:\", error);\n      sendMessageChunk(\n        \"[REASONING]: Failed to visit website: \" + agentActions.url + \"\\n\",\n        mainWindow\n      );\n    }\n  }\n\n  // Prepare final response\n  const finalResponse = {\n    content: webSearchResult\n      ? `Retrieved content from: ${agentActions.url}`\n      : \"No web search was needed or the search failed\",\n    webSearchResult,\n  };\n\n  return finalResponse;\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/agentLayer/ollamaAgent.ts",
    "content": "import { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { sendMessageChunk } from \"../llmHelpers/sendMessageChunk.js\";\nimport { BrowserWindow } from \"electron\";\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\nimport ollama from \"ollama\";\nimport { z } from \"zod\";\nimport { webSearch } from \"./tools/websearch.js\";\n\nexport async function ollamaAgent(\n  messages: ChatCompletionMessageParam[],\n  userSettings: UserSettings,\n  mainWindow: BrowserWindow | null = null\n): Promise<{\n  content: string;\n  webSearchResult: WebSearchResult | null;\n}> {\n  console.log(\"ollamaAgent\");\n  sendMessageChunk(\"[Agent]: \", mainWindow);\n  const sysPrompt: ChatCompletionMessageParam = {\n    role: \"system\",\n    content: `You are an AI Agent with the ability to visit websites and extract text and metadata.\n    Your task is to analyze if the user is DIRECTLY requesting to visit or check a specific website.\n    \n    ONLY use web search or news search if the user explicitly asks to visit, check, or get information from a specific URL or website or websearch or news search.\n    Do not infer or assume web search would be helpful unless directly requested asking what is on a website is a valid web search. \n    \n    If the user directly requests web search, respond with EXACTLY this JSON format:\n    {\n      \"webUrl\": 1,\n      \"url\": \"full_url_here\"\n    }\n    \n    For all other queries, even if web search might be helpful, respond with EXACTLY:\n    {\n      \"webUrl\": 0,\n      \"url\": \"\"\n    }\n    \n    example:\n    user: \"What is on the google news page?\"\n    agent: {\n      \"webUrl\": 1,\n      \"url\": \"https://news.google.com\"\n    }\n    \n\n    user: \"What is the capital of France?\"\n    agent: {\n      \"webUrl\": 0,\n      \"url\": \"\"\n    }\n\n    Only respond with one of these two JSON formats, nothing else.\n    Make sure the URL is a complete, valid URL starting with http:// or https://\n    Do not include any explanation or additional text in your response.`,\n  };\n\n  const AgentActions = z.object({\n    webUrl: z.number(),\n    url: z.string(),\n  });\n\n  type OllamaMessage = { role: string; content: string };\n  const convertToOllamaMessages = (\n    msgs: ChatCompletionMessageParam[]\n  ): OllamaMessage[] =>\n    msgs.map((msg) => ({\n      role: msg.role,\n      content: msg.content\n        ? typeof msg.content === \"string\"\n          ? msg.content\n          : Array.isArray(msg.content) &&\n            msg.content[0] &&\n            \"text\" in msg.content[0]\n          ? msg.content[0].text\n          : \"\"\n        : \"\",\n    }));\n\n  const response = await ollama.chat({\n    model: userSettings.model || \"llama2\",\n    messages: convertToOllamaMessages([sysPrompt, ...messages]),\n    format: zodToJsonSchema(AgentActions),\n  });\n\n  const agentActions = AgentActions.parse(JSON.parse(response.message.content));\n\n  let webSearchResult;\n  if (agentActions.webUrl === 1) {\n    webSearchResult = (await webSearch({\n      url: agentActions.url,\n    })) as WebSearchResult;\n  }\n  console.log(\"agentActions\", agentActions);\n  sendMessageChunk(\n    \"[REASONING]: \" + \"Visiting website: \" + agentActions.url,\n    mainWindow\n  );\n  sendMessageChunk(\"[Agent]: \" + JSON.stringify(webSearchResult), mainWindow);\n  return {\n    content: \"Visiting website: \" + agentActions.url,\n    webSearchResult: webSearchResult || null,\n  };\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/agentLayer/openAiAgent.ts",
    "content": "import { OpenAI } from \"openai\";\nimport { webSearch } from \"./tools/websearch.js\";\nimport { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { sendMessageChunk } from \"../llmHelpers/sendMessageChunk.js\";\nimport { BrowserWindow } from \"electron\";\n\nexport async function openAiAgent(\n  provider: OpenAI,\n  messages: ChatCompletionMessageParam[],\n  maxOutputTokens: number,\n  userSettings: UserSettings,\n  signal?: AbortSignal,\n  mainWindow: BrowserWindow | null = null\n): Promise<{\n  content: string;\n  webSearchResult: WebSearchResult | null;\n}> {\n  sendMessageChunk(\"[Agent]: \", mainWindow);\n  const sysPrompt: ChatCompletionMessageParam = {\n    role: \"system\",\n    content: `You are an AI Agent with the ability to visit websites and extract text and metadata.\n    Your task is to analyze if the user is DIRECTLY requesting to visit or check a specific website.\n    \n    ONLY use web search or news search if the user explicitly asks to visit, check, or get information from a specific URL or website or websearch or news search.\n    Do not infer or assume web search would be helpful unless directly requested asking what is on a website is a valid web search. \n    \n    If the user directly requests web search, respond with EXACTLY this JSON format:\n    {\n      \"webUrl\": 1,\n      \"url\": \"full_url_here\"\n    }\n    \n    For all other queries, even if web search might be helpful, respond with EXACTLY:\n    {\n      \"webUrl\": 0,\n      \"url\": \"\"\n    }\n    \n    example:\n    user: \"What is on the google news page?\"\n    agent: {\n      \"webUrl\": 1,\n      \"url\": \"https://news.google.com\"\n    }\n    \n\n    user: \"What is the capital of France?\"\n    agent: {\n      \"webUrl\": 0,\n      \"url\": \"\"\n    }\n\n    Only respond with one of these two JSON formats, nothing else.\n    Make sure the URL is a complete, valid URL starting with http:// or https://\n    Do not include any explanation or additional text in your response.`,\n  };\n\n  const response = await provider.chat.completions.create(\n    {\n      model: userSettings.model as string,\n      messages: [sysPrompt, ...messages],\n      response_format: { type: \"json_object\" },\n    },\n    { signal }\n  );\n\n  const content = response.choices[0]?.message?.content || \"{}\";\n\n  const contentObj = JSON.parse(content) as { webUrl: number; url: string };\n\n  let webSearchResult: {\n    metadata: {\n      title: string;\n      source: string;\n      description: string;\n      author: string;\n      keywords: string;\n      ogImage: string;\n    };\n    textContent: string;\n  } | null = null;\n\n  if (contentObj.webUrl === 1) {\n    webSearchResult = (await webSearch({\n      url: contentObj.url,\n    })) as WebSearchResult;\n  }\n  sendMessageChunk(\"[REASONING]: \" + content, mainWindow);\n  sendMessageChunk(\"[Agent]: \" + JSON.stringify(webSearchResult), mainWindow);\n  return { content, webSearchResult };\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/agentLayer/tools/websearch.ts",
    "content": "import { chromium } from \"playwright\";\n\nexport async function webSearch(payload: { url: string }) {\n  const browser = await chromium.launch({\n    headless: true,\n    executablePath:\n      process.platform === \"win32\"\n        ? \"C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\"\n        : process.platform === \"linux\"\n        ? \"/usr/bin/google-chrome\"\n        : \"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome\",\n  });\n\n  const context = await browser.newContext({\n    userAgent:\n      \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36\",\n    viewport: { width: 1920, height: 1080 },\n    deviceScaleFactor: 1,\n    isMobile: false,\n    hasTouch: false,\n    javaScriptEnabled: true,\n    bypassCSP: true,\n    ignoreHTTPSErrors: true,\n  });\n\n  const page = await context.newPage();\n\n  try {\n    await page.goto(payload.url, {\n      waitUntil: \"networkidle\",\n      timeout: 3000,\n    });\n\n    await page.waitForSelector(\"body\");\n\n    const metadata = await page.evaluate((targetUrl: string) => {\n      const getMetaContent = (name: string): string => {\n        const element = document.querySelector(\n          `meta[name=\"${name}\"], meta[property=\"${name}\"]`\n        );\n        return element ? (element as HTMLMetaElement).content : \"\";\n      };\n      return {\n        title: document.title,\n        source: targetUrl,\n        description:\n          getMetaContent(\"description\") || getMetaContent(\"og:description\"),\n        author: getMetaContent(\"author\"),\n        keywords: getMetaContent(\"keywords\"),\n        ogImage: getMetaContent(\"og:image\"),\n      };\n    }, payload.url);\n\n    const textContent = await page.evaluate(() => {\n      // Remove scripts and styles before getting text content\n      const scripts = document.getElementsByTagName(\"script\");\n      const styles = document.getElementsByTagName(\"style\");\n      Array.from(scripts).forEach((script) => script.remove());\n      Array.from(styles).forEach((style) => style.remove());\n      return document.body.innerText;\n    });\n    console.log(\"Length of textContent\", textContent.length);\n    let content = textContent;\n    if (textContent.length > 2000) {\n      content = textContent.slice(0, 2000);\n    }\n    console.log(\"Length of content\", content.length);\n    return {\n      metadata,\n      textContent: content,\n    };\n  } catch {\n    return {\n      metadata: null,\n      textContent: \"The URL is invalid or the page is not accessible\",\n    };\n  } finally {\n    await context.close();\n    await browser.close();\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/anthropic.ts",
    "content": "import Anthropic from \"@anthropic-ai/sdk\";\nimport log from \"electron-log\";\nexport async function AnthropicProviderAPIKeyCheck(apiKey: string): Promise<{\n  error?: string;\n  success?: boolean;\n}> {\n  if (!apiKey) {\n    log.error(\"Anthropic API key not found for the active user\");\n    throw new Error(\"Anthropic API key not found for the active user\");\n  }\n  const anthropic = new Anthropic({ apiKey });\n\n  const response = await anthropic.messages.create({\n    model: \"claude-3-5-sonnet-20240620\",\n    messages: [{ role: \"user\", content: \"Hello, world!\" }],\n    max_tokens: 10,\n  });\n  log.info(`Response: ${JSON.stringify(response)}`);    \n  if (response.content) {\n    return {\n      success: true,\n    };\n  }\n\n  return {\n    error: \"Anthropic API key is invalid\",\n  };\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/deepseek.ts",
    "content": "import OpenAI from \"openai\";\nimport log from \"electron-log\";\nexport async function DeepSeekProviderAPIKeyCheck(\n  apiKey: string,\n  model?: string\n): Promise<{\n  error?: string;\n  success?: boolean;\n}> {\n  if (!apiKey) {\n    log.error(\"DeepSeek API key not found for the active user\");\n    throw new Error(\"DeepSeek API key not found for the active user\");\n  }\n  const openai = new OpenAI({\n    apiKey,\n    baseURL: \"https://api.deepseek.com\",\n  });\n\n  const response = await openai.chat.completions.create({\n    model: model || \"deepseek-chat\",\n    messages: [{ role: \"user\", content: \"Hello, world!\" }],\n    max_tokens: 10,\n  });\n  log.info(`Response: ${JSON.stringify(response)}`);\n  if (response.choices[0]?.message?.content) {\n    return {\n      success: true,\n    };\n  }\n\n  return {\n    error: \"OpenRouter API key is invalid\",\n  };\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/gemini.ts",
    "content": "import { GoogleGenerativeAI } from \"@google/generative-ai\";\nimport log from \"electron-log\"; \nlet genAI: GoogleGenerativeAI;\nasync function initializeGemini(apiKey: string) {\n  genAI = new GoogleGenerativeAI(apiKey);\n}\n\nexport async function GeminiProviderAPIKeyCheck(apiKey: string): Promise<{\n  error?: string;\n  success?: boolean;\n}> {\n  if (!apiKey) {\n    log.error(\"Gemini API key not found for the active user\");\n    throw new Error(\"Gemini API key not found for the active user\");\n  }\n  await initializeGemini(apiKey);\n\n  if (!genAI) {\n    log.error(\"Gemini instance not initialized\");\n    throw new Error(\"Gemini instance not initialized\");\n  }\n\n  const model = genAI.getGenerativeModel({ model: \"gemini-pro\" });\n  const result = await model.generateContent(\"Hello, world!\");\n  const response = await result.response;\n  log.info(`Response: ${JSON.stringify(response)}`);\n  if (response.text()) {\n    return {\n      success: true,\n    };\n  }\n\n  return {\n    error: \"Gemini API key is invalid\",\n  };\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/openai.ts",
    "content": "import OpenAI from \"openai\";\nimport log from \"electron-log\";\nexport async function OpenAIProviderAPIKeyCheck(apiKey: string): Promise<{\n  error?: string;\n  success?: boolean;\n}> {\n  if (!apiKey) {\n    log.error(\"OpenAI API key not found for the active user\");\n    throw new Error(\"OpenAI API key not found for the active user\");\n  }\n  const openai = new OpenAI({ apiKey });\n\n  const response = await openai.chat.completions.create({\n    model: \"gpt-3.5-turbo\",\n    messages: [{ role: \"user\", content: \"Hello, world!\" }],\n    max_tokens: 10,\n  });\n  log.info(`Response: ${JSON.stringify(response)}`);\n  if (response.choices[0]?.message?.content) {\n    return {\n      success: true,\n    };\n  }\n\n  return {\n    error: \"OpenAI API key is invalid\",\n  };\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/openrouter.ts",
    "content": "import OpenAI from \"openai\";\nimport log from \"electron-log\";\nexport async function OpenRouterProviderAPIKeyCheck(\n  apiKey: string,\n  model?: string\n): Promise<{\n  error?: string;\n  success?: boolean;\n}> {\n  if (!apiKey) {\n    log.error(\"OpenRouter API key not found for the active user\");\n    throw new Error(\"OpenRouter API key not found for the active user\");\n  }\n  const openai = new OpenAI({\n    apiKey,\n    baseURL: \"https://openrouter.ai/api/v1\",\n  });\n\n  const response = await openai.chat.completions.create({\n    model: model || \"openai/gpt-3.5-turbo\",\n    messages: [{ role: \"user\", content: \"Hello, world!\" }],\n    max_tokens: 10,\n  });\n  log.info(`Response: ${JSON.stringify(response)}`);\n  if (response.choices[0]?.message?.content) {\n    return {\n      success: true,\n    };\n  }\n\n  return {\n    error: \"OpenRouter API key is invalid\",\n  };\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/apiCheckProviders/xai.ts",
    "content": "import OpenAI from \"openai\";\nimport log from \"electron-log\";\nlet openai: OpenAI;\n\nasync function initializeXAI(apiKey: string) {\n  openai = new OpenAI({ apiKey, baseURL: \"https://api.x.ai/v1\" });\n}\n\nexport async function XAIProviderAPIKeyCheck(apiKey: string): Promise<{\n  error?: string;\n  success?: boolean;\n}> {\n  if (!apiKey) {\n    log.error(\"XAI API key not found for the active user\");\n    throw new Error(\"XAI API key not found for the active user\");\n  }\n  await initializeXAI(apiKey);\n\n  if (!openai) {\n    log.error(\"XAI instance not initialized\");\n    throw new Error(\"XAI instance not initialized\");\n  }\n\n  const response = await openai.chat.completions.create({\n    model: \"grok-beta\",\n    messages: [{ role: \"user\", content: \"Hello, world!\" }],\n    max_tokens: 10,\n  });\n  log.info(`Response: ${JSON.stringify(response)}`);\n  if (response.choices[0]?.message?.content) {\n    return {\n      success: true,\n    };\n  }\n\n  return {\n    error: \"XAI API key is invalid\",\n  };\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/chatCompletion.ts",
    "content": "import OpenAI from \"openai\";\nimport db from \"../db.js\";\nimport { openAiChainOfThought } from \"./reasoningLayer/openAiChainOfThought.js\";\nimport { prepMessages } from \"./llmHelpers/prepMessages.js\";\nimport { returnSystemPrompt } from \"./llmHelpers/returnSystemPrompt.js\";\nimport { sendMessageChunk } from \"./llmHelpers/sendMessageChunk.js\";\nimport { truncateMessages } from \"./llmHelpers/truncateMessages.js\";\nimport { openAiAgent } from \"./agentLayer/openAiAgent.js\";\n\nexport async function chatCompletion(\n  openai: OpenAI,\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const {\n    messages,\n    userSettings,\n    collectionId,\n    data,\n    signal,\n    conversationId,\n    currentTitle,\n    prompt,\n    mainWindow,\n  } = params;\n\n  const maxOutputTokens = (userSettings.maxTokens as number) || 4096;\n  const userId = params.activeUser.id;\n  const userTools = db.getUserTools(userId);\n  let agentActions = null;\n  let webSearchResult = null;\n  // If the user has Web Search enabled, we need to do web search first\n  if (userTools.find((tool) => tool.tool_id === 1)?.enabled === 1) {\n    const { content: actions, webSearchResult: webResults } = await openAiAgent(\n      openai,\n      messages,\n      maxOutputTokens,\n      userSettings,\n      signal\n    );\n    agentActions = actions;\n    webSearchResult = webResults;\n  }\n\n  const newMessages = await prepMessages(messages);\n\n  let dataCollectionInfo;\n\n  if (collectionId) {\n    dataCollectionInfo = db.getCollection(Number(collectionId)) as Collection;\n  }\n\n  // If the user has COT enabled, we need to do reasoning second\n  let reasoning;\n\n  if (userSettings.cot) {\n    // Do reasoning first\n    reasoning = await openAiChainOfThought(\n      openai,\n      newMessages,\n      maxOutputTokens,\n      userSettings,\n      data ? data : null,\n      dataCollectionInfo ? dataCollectionInfo : null,\n      String(JSON.stringify(agentActions)),\n      webSearchResult ? webSearchResult : undefined,\n      signal,\n      mainWindow\n    );\n\n    // Send end of reasoning marker\n    if (mainWindow) {\n      mainWindow.webContents.send(\"reasoningEnd\");\n    }\n  }\n\n  const newSysPrompt = await returnSystemPrompt(\n    prompt,\n    dataCollectionInfo,\n    reasoning ? reasoning : null,\n    webSearchResult ? webSearchResult : undefined,\n    data\n  );\n  // Truncate messages to fit within token limits while preserving max output tokens\n  const truncatedMessages = truncateMessages(newMessages, maxOutputTokens);\n  truncatedMessages.unshift(newSysPrompt);\n  let stream;\n  if (userSettings.model === \"o3-mini-2025-01-31\") {\n    stream = await openai.chat.completions.create(\n      {\n        model: userSettings.model as string,\n        messages: truncatedMessages,\n        stream: true,\n        reasoning_effort: userSettings.reasoningEffort as ReasoningEffort,\n      },\n      { signal }\n    );\n  } else {\n    stream = await openai.chat.completions.create(\n      {\n        model: userSettings.model as string,\n        messages: truncatedMessages,\n        stream: true,\n        temperature: Number(userSettings.temperature),\n        max_tokens: Number(maxOutputTokens),\n      },\n      { signal }\n    );\n  }\n  const newMessage: Message = {\n    role: \"assistant\",\n    content: \"\",\n    timestamp: new Date(),\n    data_content: data ? JSON.stringify(data) : undefined,\n  };\n\n  try {\n    for await (const chunk of stream) {\n      if (signal?.aborted) {\n        throw new Error(\"AbortError\");\n      }\n      const content = chunk.choices[0]?.delta?.content || \"\";\n      newMessage.content += content;\n      sendMessageChunk(content, mainWindow);\n    }\n\n    if (mainWindow) {\n      mainWindow.webContents.send(\"streamEnd\");\n    }\n\n    return {\n      id: conversationId,\n      messages: [...messages, newMessage],\n      reasoning: reasoning || \"\",\n      title: currentTitle,\n      content: newMessage.content,\n      aborted: false,\n    };\n  } catch (error) {\n    if (\n      signal?.aborted ||\n      (error instanceof Error && error.message === \"AbortError\")\n    ) {\n      return {\n        id: conversationId,\n        messages: messages,\n        reasoning: reasoning || \"\",\n        title: currentTitle,\n        content: \"\",\n        aborted: true,\n      };\n    }\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/generateTitle.ts",
    "content": "import Anthropic from \"@anthropic-ai/sdk\";\nimport db from \"../db.js\";\nimport { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\n\nimport { GoogleGenerativeAI } from \"@google/generative-ai\";\nimport { providerInitialize } from \"./llmHelpers/providerInit.js\";\n\nasync function chatCompletionTitle(\n  input: string,\n  user: User,\n  provider: string,\n  model: string\n) {\n  const openai = await providerInitialize(provider, user);\n  let llmTitleRequest;\n  if (model === \"o3-mini-2025-01-31\") {\n    llmTitleRequest = await openai.chat.completions.create({\n      model: model,\n      messages: titleMessages(input),\n      max_completion_tokens: 20,\n    });\n  } else {\n    llmTitleRequest = await openai.chat.completions.create({\n      model: model,\n      messages: titleMessages(input),\n      max_tokens: 20,\n    });\n  }\n  return llmTitleRequest.choices[0]?.message?.content?.trim();\n}\n\nconst titleMessages = (input: string): ChatCompletionMessageParam[] => [\n  {\n    role: \"system\" as const,\n    content:\n      \"Generate a short, concise title (5 words or less) for a conversation based on the following message: Return the Title only and nothing else example response: 'Meeting with John' Return: 'Meeting with John'\",\n  },\n  { role: \"user\" as const, content: input },\n];\n\nasync function generateTitleOpenRouter(input: string, user: User) {\n  return chatCompletionTitle(input, user, \"openrouter\", \"openai/gpt-3.5-turbo\");\n}\n\nasync function generateTitleDeepSeek(input: string, user: User) {\n  return chatCompletionTitle(input, user, \"deepseek\", \"deepseek-chat\");\n}\n\nasync function generateTitleCustom(\n  input: string,\n  user: User,\n  userSettings: UserSettings\n) {\n  if (!userSettings.selectedCustomId) {\n    throw new Error(\"Custom API not found\");\n  }\n  const openai = await providerInitialize(\"custom\", user);\n  const stream = await openai.chat.completions.create({\n    model: userSettings.model || \"\",\n    messages: titleMessages(input),\n    stream: true,\n    temperature: 0.7,\n    max_tokens: 20,\n    top_p: 0.95,\n    presence_penalty: 0.1,\n    frequency_penalty: 0.1,\n  });\n  let generatedTitle = \"\";\n  for await (const chunk of stream) {\n    const content = chunk.choices[0]?.delta?.content || \"\";\n    generatedTitle += content;\n  }\n  return generatedTitle;\n}\n\nasync function generateTitleAnthropic(input: string, user: User) {\n  let apiKey = \"\";\n  try {\n    apiKey = db.getApiKey(user.id, \"anthropic\");\n  } catch (error) {\n    console.error(\"Error getting API key:\", error);\n  }\n  if (!apiKey) {\n    throw new Error(\"Anthropic API key not found for the active user\");\n  }\n  const anthropic = new Anthropic({ apiKey });\n  const llmTitleRequest = (await anthropic.messages.create({\n    model: \"claude-3-sonnet-20240229\",\n    max_tokens: 20,\n    system:\n      \"Generate a short, concise title (5 words or less) for a conversation based on the following message: Return the Title only and nothing else example response: 'Meeting with John' Return: 'Meeting with John'\",\n    messages: [\n      {\n        role: \"user\",\n        content: input,\n      },\n    ],\n  })) as unknown as {\n    content: { text: string }[];\n  };\n\n  const generatedTitle = llmTitleRequest.content[0].text;\n  return generatedTitle || \"New Conversation\";\n}\n\nasync function generateTitleGemini(input: string, user: User) {\n  let apiKey = \"\";\n  try {\n    apiKey = db.getApiKey(user.id, \"gemini\");\n  } catch (error) {\n    console.error(\"Error getting API key:\", error);\n  }\n  if (!apiKey) {\n    throw new Error(\"Gemini API key not found for the active user\");\n  }\n  const genAI = new GoogleGenerativeAI(apiKey);\n\n  const model = genAI.getGenerativeModel({ model: \"gemini-1.5-flash\" });\n  const titleResult = await model.generateContent(\n    \"Generate a short, concise title (5 words or less) for a conversation based on the following message: Return the Title only and nothing else example response: 'Meeting with John' Return: 'Meeting with John'\\n\\n\" +\n      input\n  );\n  const generatedTitle = titleResult.response.text().trim();\n\n  return generatedTitle ?? \"New Conversation\";\n}\n\nasync function generateTitleXAI(input: string, user: User) {\n  return chatCompletionTitle(input, user, \"xai\", \"grok-beta\");\n}\n\nasync function generateOllamaTitle(input: string, model: string) {\n  try {\n    const messages = titleMessages(input);\n    const response = await fetch(\"http://localhost:11434/api/chat\", {\n      method: \"POST\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n      },\n      body: JSON.stringify({\n        model: model,\n        messages: messages,\n        stream: false, // Disable streaming to get a single response\n      }),\n    });\n\n    if (!response.ok) {\n      throw new Error(\n        `Ollama API error: ${response.status} ${response.statusText}`\n      );\n    }\n\n    const text = await response.text();\n    // Ollama returns one JSON object per line\n    const lines = text.split(\"\\n\").filter((line) => line.trim());\n    const lastLine = lines[lines.length - 1];\n    const lastResponse = JSON.parse(lastLine);\n    if (!lastResponse.message?.content) {\n      console.warn(\"Empty response from Ollama:\", lastResponse);\n      return \"New Conversation\";\n    }\n\n    return lastResponse.message.content.trim() || \"New Conversation\";\n  } catch (error) {\n    console.error(\"Error generating title:\", error);\n    return \"New Conversation\";\n  }\n}\n\nasync function generateTitleOpenAI(input: string, user: User) {\n  return chatCompletionTitle(input, user, \"openai\", \"gpt-4o\");\n}\n\nasync function generateTitleAzureOpenAI(input: string, user: User) {\n  return chatCompletionTitle(input, user, \"azure open ai\", \"gpt-4o\");\n}\n\nasync function generateTitleLocalOpenAI(\n  input: string,\n  user: User,\n  userSettings: UserSettings\n) {\n  const openai = await providerInitialize(\"local\", user);\n  const stream = await openai.chat.completions.create({\n    model: userSettings.model || \"\",\n    messages: titleMessages(input),\n    stream: true,\n    temperature: 0.7,\n    max_tokens: 20,\n    top_p: 0.95,\n    presence_penalty: 0.1,\n    frequency_penalty: 0.1,\n  });\n  let generatedTitle = \"\";\n  for await (const chunk of stream) {\n    const content = chunk.choices[0]?.delta?.content || \"\";\n    generatedTitle += content;\n  }\n  return generatedTitle;\n}\n\nexport async function generateTitle(input: string, user: User) {\n  const userSettings = await db.getUserSettings(user.id);\n  switch (userSettings.provider?.toLowerCase()) {\n    case \"openai\":\n      return generateTitleOpenAI(input, user);\n    case \"openrouter\":\n      return generateTitleOpenRouter(input, user);\n    case \"azure open ai\":\n      return generateTitleAzureOpenAI(input, user);\n    case \"anthropic\":\n      return generateTitleAnthropic(input, user);\n    case \"gemini\":\n      return generateTitleGemini(input, user);\n    case \"xai\":\n      return generateTitleXAI(input, user);\n    case \"local\":\n      return generateTitleLocalOpenAI(input, user, userSettings);\n    case \"ollama\":\n      return generateOllamaTitle(input, userSettings.model || \"llama3.2\");\n    case \"custom\":\n      return generateTitleCustom(input, user, userSettings);\n    case \"deepseek\":\n      return generateTitleDeepSeek(input, user);\n    default:\n      return \"New Conversation\";\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/keyValidation.ts",
    "content": "import { OpenAIProviderAPIKeyCheck } from \"./apiCheckProviders/openai.js\";\nimport { AnthropicProviderAPIKeyCheck } from \"./apiCheckProviders/anthropic.js\";\nimport { GeminiProviderAPIKeyCheck } from \"./apiCheckProviders/gemini.js\";\nimport { XAIProviderAPIKeyCheck } from \"./apiCheckProviders/xai.js\";\nimport { OpenRouterProviderAPIKeyCheck } from \"./apiCheckProviders/openrouter.js\";\nimport log from \"electron-log\";\nimport { DeepSeekProviderAPIKeyCheck } from \"./apiCheckProviders/deepseek.js\";\nexport async function keyValidation({\n  apiKey,\n  inputProvider,\n}: {\n  apiKey: string;\n  inputProvider: string;\n}): Promise<{\n  error?: string;\n  success?: boolean;\n}> {\n  try {\n    let provider;\n    log.info(`Input provider: ${inputProvider}`);\n    switch (inputProvider.toLowerCase()) {\n      case \"deepseek\":\n        provider = DeepSeekProviderAPIKeyCheck;\n        break;\n      case \"openai\":\n        provider = OpenAIProviderAPIKeyCheck;\n        break;\n      case \"openrouter\":\n        provider = OpenRouterProviderAPIKeyCheck;\n        break;\n      case \"anthropic\":\n        provider = AnthropicProviderAPIKeyCheck;\n        break;\n      case \"gemini\":\n        provider = GeminiProviderAPIKeyCheck;\n        break;\n      case \"xai\":\n        provider = XAIProviderAPIKeyCheck;\n        break;\n      default:\n        throw new Error(\n          \"No AI provider selected. Please open Settings (top right) make sure you add an API key and select a provider under the 'AI Provider' tab.\"\n        );\n    }\n\n    const result = await provider(apiKey);\n    log.info(`Result: ${JSON.stringify(result)}`);\n    return {\n      ...result,\n    };\n  } catch (error) {\n    log.error(\"Error in chat request:\", error);\n    return {\n      error: \"Error in chat request\",\n    };\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/addAssistantMessage.ts",
    "content": "import db from \"../../db.js\";\n\nexport async function addAssistantMessage(\n  activeUser: User,\n  conversationId: bigint | number,\n  result: ProviderResponse,\n  collectionId?: number,\n  data?: {\n    top_k: number;\n    results: {\n      content: string;\n      metadata: string;\n    }[];\n  } | null\n) {\n  const assistantMessageId = db.addUserMessage(\n    activeUser.id,\n    Number(conversationId),\n    \"assistant\",\n    result.content,\n    result.reasoning,\n    collectionId ? Number(collectionId) : undefined\n  ).lastInsertRowid;\n  if (data !== null) {\n    db.addRetrievedData(Number(assistantMessageId), JSON.stringify(data));\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/addUserMessage.ts",
    "content": "import db from \"../../db.js\";\n\nexport async function addUserMessage(\n  activeUser: User,\n  conversationId: number,\n  messages: Message[]\n) {\n  db.addUserMessage(\n    activeUser.id,\n    Number(conversationId),\n    \"user\",\n    messages[messages.length - 1].content\n  );\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/collectionData.ts",
    "content": "import db from \"../../db.js\";\nimport { vectorstoreQuery } from \"../../embedding/vectorstoreQuery.js\";\nimport log from \"electron-log\";\nimport os from \"os\";\n\nexport async function ifCollection(\n  messages: Message[],\n  activeUser: User,\n  collectionId: bigint | number,\n  platform: string\n) {\n  let data: {\n    top_k: number;\n    results: {\n      content: string;\n      metadata: string;\n    }[];\n  } | null = null;\n\n  const collectionName = await db.getCollectionName(Number(collectionId));\n  log.info(`Collection name: ${collectionName}`);\n  try {\n    const vectorstoreData = await vectorstoreQuery({\n      query: messages[messages.length - 1].content,\n      userId: activeUser.id,\n      userName: activeUser.name,\n      collectionId: Number(collectionId),\n      collectionName: collectionName.name,\n    });\n\n    if (vectorstoreData.status === \"error\") {\n      if (vectorstoreData.message === \"Unauthorized\") {\n        const newMessage = {\n          role: \"assistant\",\n          content:\n            `There is an issue with the SECRET_KEY not being in sync across the front/backend.\\n\\n` +\n            `Please try the following steps:\\n` +\n            `1. Restart your PC\\n` +\n            `2. If the issue persists, check your logs at:\\n` +\n            `   ${\n              platform === \"darwin\"\n                ? os.homedir() + \"/Library/Application Support/notate/main.log\"\n                : platform === \"win32\"\n                ? os.homedir() + \"/AppData/Roaming/notate/main.log\"\n                : os.homedir() + \"~/.config/notate/main.log\"\n            }\\n\\n` +\n            `3. Open a GitHub issue at https://github.com/CNTRLAI/notate and include your logs`,\n          timestamp: new Date(),\n          data_content: undefined,\n        } as Message;\n        return {\n          id: -1,\n          messages: [...messages, newMessage],\n          title: \"Need API Key\",\n        };\n      }\n    }\n    if (vectorstoreData) {\n      data = {\n        top_k: vectorstoreData.results.length,\n        results: vectorstoreData.results,\n      };\n    }\n  } catch (error) {\n    const newMessage = {\n      role: \"assistant\",\n      content: `Error in vectorstore query: ${error}`,\n      timestamp: new Date(),\n      data_content: undefined,\n    } as Message;\n    log.error(`Error in vectorstore query: ${error}`);\n    return {\n      id: -1,\n      messages: [...messages, newMessage],\n      title: \"Error in vectorstore query\",\n    };\n  }\n  return { collectionData: data };\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/countMessageTokens.ts",
    "content": "import { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { encoding_for_model } from \"@dqbd/tiktoken\";\n\n// Helper function to count tokens in a message\nexport function countMessageTokens(\n  message: ChatCompletionMessageParam\n): number {\n  const encoder = encoding_for_model(\"gpt-3.5-turbo\");\n  const content = typeof message.content === \"string\" ? message.content : \"\";\n  const tokens = encoder.encode(content);\n  encoder.free(); // Free up memory\n  return tokens.length + 4; // 4 tokens for message format\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/getUserPrompt.ts",
    "content": "import db from \"../../db.js\";\n\nexport async function getUserPrompt(\n  activeUser: User,\n  userSettings: UserSettings,\n  prompt: string | undefined\n) {\n  const getPrompt = await db.getUserPrompt(\n    activeUser.id,\n    Number(userSettings.promptId)\n  );\n  if (getPrompt) {\n    prompt = getPrompt.prompt;\n  } else {\n    prompt = \"You are a helpful assistant.\";\n  }\n  return prompt;\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/ifNewConvo.ts",
    "content": "import log from \"electron-log\";\nimport db from \"../../db.js\";\nimport { generateTitle } from \"../generateTitle.js\";\n\nexport async function ifNewConversation(messages: Message[], activeUser: User) {\n  try {\n    const newTitle = await generateTitle(\n      messages[messages.length - 1].content,\n      activeUser\n    );\n    \n    const addConversation = await db.addUserConversation(\n      activeUser.id,\n      newTitle\n    );\n    return { cId: addConversation.id, title: newTitle };\n  } catch (error) {\n    log.error(\"Error in ifNewConversation:\", error);\n    return { conversationId: null, title: null };\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/prepMessages.ts",
    "content": "import type { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\n\nexport async function prepMessages(messages: Message[]) {\n  // Sort messages by timestamp to ensure proper chronological order\n  const sortedMessages = [...messages].sort((a, b) => {\n    const timeA = a.timestamp ? new Date(a.timestamp).getTime() : 0;\n    const timeB = b.timestamp ? new Date(b.timestamp).getTime() : 0;\n    return timeA - timeB; // Oldest first\n  });\n\n  // Add timestamp context to messages\n  const newMessages: ChatCompletionMessageParam[] = sortedMessages.map(\n    (msg, index) => {\n      const isLastMessage = index === sortedMessages.length - 1;\n      const timeStr = msg.timestamp\n        ? new Date(msg.timestamp).toLocaleTimeString()\n        : \"\";\n      let content = msg.content;\n\n      // Only add context to user messages\n      if (msg.role === \"user\") {\n        content = `[${timeStr}] ${content}${\n          isLastMessage ? \" (most recent message)\" : \"\"\n        }`;\n      }\n\n      return {\n        role: msg.role as \"user\" | \"assistant\" | \"system\",\n        content: content,\n      };\n    }\n  );\n  return newMessages;\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/providerInit.ts",
    "content": "import OpenAI, { AzureOpenAI } from \"openai\";\nimport db from \"../../db.js\";\nimport { getToken } from \"../../authentication/token.js\";\n\nexport async function providerInitialize(\n  providerName: string,\n  activeUser: User\n): Promise<OpenAI | AzureOpenAI> {\n  console.log(\"Initializing provider\", providerName);\n  if (providerName === \"ollama external\") {\n    console.log(\"Initializing External Ollama\");\n    return initializeExternalOllama(activeUser);\n  }\n  if (providerName === \"openrouter\") {\n    console.log(\"Initializing OpenRouter\");\n    return initializeOpenRouter(activeUser);\n  }\n  if (providerName === \"azure open ai\") {\n    console.log(\"Initializing Azure OpenAI\");\n    return initializeAzureOpenAI(activeUser);\n  }\n  if (providerName === \"deepseek\") {\n    console.log(\"Initializing DeepSeek\");\n    return initializeDeepSeek(activeUser);\n  }\n  if (providerName === \"xai\") {\n    console.log(\"Initializing XAI\");\n    return initializeXAI(activeUser);\n  }\n  if (providerName === \"custom\") {\n    console.log(\"Initializing Custom\");\n    return initializeCustom(activeUser);\n  }\n  if (providerName === \"local\") {\n    console.log(\"Initializing Local OpenAI\");\n    return initializeLocalOpenAI(activeUser);\n  }\n  console.log(\"Initializing OpenAI\");\n  const apiKey = db.getApiKey(activeUser.id, providerName);\n  const provider = new OpenAI({ apiKey });\n\n  if (!provider) {\n    throw new Error(`${providerName} instance not initialized`);\n  }\n  return provider;\n}\n\nasync function initializeExternalOllama(activeUser: User) {\n  const userSettings = await db.getUserSettings(activeUser.id);\n  if (!userSettings) {\n    throw new Error(\"User settings not found for the active user\");\n  }\n  if (!userSettings.selectedExternalOllamaId) {\n    throw new Error(\"External Ollama model not found for the active user\");\n  }\n  const externalOllama = db.getExternalOllama(activeUser.id);\n  const selectedExternalOllama = externalOllama.find(\n    (ollama) => ollama.id === userSettings.selectedExternalOllamaId\n  );\n  if (!selectedExternalOllama) {\n    throw new Error(\"External Ollama model not found for the active user\");\n  }\n  const openai = new OpenAI({\n    apiKey: selectedExternalOllama.api_key || \"ollama\",\n    baseURL: selectedExternalOllama.endpoint,\n  });\n  return openai;\n}\n\nasync function initializeOpenRouter(activeUser: User) {\n  const apiKey = db.getApiKey(activeUser.id, \"openrouter\");\n  const openai = new OpenAI({\n    apiKey,\n    baseURL: \"https://openrouter.ai/api/v1\",\n    defaultHeaders: {\n      \"HTTP-Referer\": \"https://notate.hairetsu.com\",\n      \"X-Title\": \"Notate\",\n    },\n  });\n  return openai;\n}\n\nasync function initializeAzureOpenAI(activeUser: User) {\n  const userSettings = await db.getUserSettings(activeUser.id);\n  if (!userSettings) {\n    throw new Error(\"User settings not found for the active user\");\n  }\n  if (!userSettings.selectedAzureId) {\n    throw new Error(\"Azure OpenAI model not found for the active user\");\n  }\n  const azureModel = db.getAzureOpenAIModel(\n    activeUser.id,\n    Number(userSettings.selectedAzureId)\n  );\n  if (!azureModel) {\n    throw new Error(\"Azure OpenAI model not found for the active user\");\n  }\n  const openai = new AzureOpenAI({\n    baseURL: azureModel.endpoint,\n    apiKey: azureModel.api_key,\n    deployment: azureModel.model,\n    apiVersion: \"2024-05-01-preview\",\n  });\n  return openai;\n}\n\nasync function initializeDeepSeek(activeUser: User) {\n  const apiKey = db.getApiKey(activeUser.id, \"deepseek\");\n  const openai = new OpenAI({\n    apiKey,\n    baseURL: \"https://api.deepseek.com\",\n    defaultHeaders: {\n      \"HTTP-Referer\": \"https://notate.hairetsu.com\",\n      \"X-Title\": \"Notate\",\n    },\n  });\n  return openai;\n}\n\nasync function initializeCustom(activeUser: User) {\n  let customAPIs;\n  const userSettings = await db.getUserSettings(activeUser.id);\n  if (userSettings.provider == \"custom\") {\n    customAPIs = db.getCustomAPI(activeUser.id);\n    if (customAPIs.length == 0 || userSettings.selectedCustomId == null) {\n      throw new Error(\"No custom API selected\");\n    }\n    const customAPI = customAPIs.find(\n      (api) => api.id == userSettings.selectedCustomId\n    );\n    if (!customAPI) {\n      throw new Error(\"Custom API not found\");\n    }\n    const openai = new OpenAI({\n      apiKey: customAPI.api_key,\n      baseURL: customAPI.endpoint,\n    });\n    return openai;\n  }\n  throw new Error(\"Custom API not found\");\n}\n\nasync function initializeLocalOpenAI(activeUser: User) {\n  const apiKey = await getToken({ userId: activeUser.id.toString() });\n  const openai = new OpenAI({\n    baseURL: \"http://127.0.0.1:47372\",\n    apiKey: apiKey,\n  });\n  return openai;\n}\n\nasync function initializeXAI(activeUser: User) {\n  const apiKey = db.getApiKey(activeUser.id, \"xai\");\n  const openai = new OpenAI({ apiKey, baseURL: \"https://api.x.ai/v1\" });\n  return openai;\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/providersMap.ts",
    "content": "import { OpenAIProvider } from \"../providers/openai.js\";\nimport { AzureOpenAIProvider } from \"../providers/azureOpenAI.js\";\nimport { OpenRouterProvider } from \"../providers/openrouter.js\";\nimport { AnthropicProvider } from \"../providers/anthropic.js\";\nimport { GeminiProvider } from \"../providers/gemini.js\";\nimport { XAIProvider } from \"../providers/xai.js\";\nimport { LocalModelProvider } from \"../providers/localModel.js\";\nimport { OllamaProvider } from \"../providers/ollama.js\";\nimport { CustomProvider } from \"../providers/customEndpoint.js\";\nimport { DeepSeekProvider } from \"../providers/deepseek.js\";\nimport { ExternalOllamaProvider } from \"../providers/externalOllama.js\";\n\nexport const providersMap = {\n  openai: OpenAIProvider,\n  openrouter: OpenRouterProvider,\n  \"azure open ai\": AzureOpenAIProvider,\n  anthropic: AnthropicProvider,\n  gemini: GeminiProvider,\n  xai: XAIProvider,\n  local: LocalModelProvider,\n  ollama: OllamaProvider,\n  custom: CustomProvider,\n  deepseek: DeepSeekProvider,\n  \"ollama external\": ExternalOllamaProvider,\n};\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/returnReasoningPrompt.ts",
    "content": "export async function returnReasoningPrompt(\n  data: {\n    top_k: number;\n    results: {\n      content: string;\n      metadata: string;\n    }[];\n  } | null,\n  dataCollectionInfo: Collection | null\n) {\n  const reasoningPrompt =\n    \"You are a reasoning engine. Your task is to analyze the question and outline your step-by-step reasoning process for how to answer it. Keep your reasoning concise and focused on the key logical steps. Only return the reasoning process, do not provide the final answer.\" +\n    (data\n      ? \"The following is the data that the user has provided via their custom data collection: \" +\n        `\\n\\n${JSON.stringify(data)}` +\n        `\\n\\nCollection/Store Name: ${dataCollectionInfo?.name}` +\n        `\\n\\nCollection/Store Files: ${dataCollectionInfo?.files}` +\n        `\\n\\nCollection/Store Description: ${dataCollectionInfo?.description}` +\n        `\\n\\n*** THIS IS THE END OF THE DATA COLLECTION ***`\n      : \"\");\n  return reasoningPrompt;\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/returnSystemPrompt.ts",
    "content": "import { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\n\nexport async function returnSystemPrompt(\n  prompt: string | undefined,\n  dataCollectionInfo?: Collection | null,\n  reasoning?: string | null,\n  webSearchResult?: WebSearchResult | null,\n  data?: {\n    top_k: number;\n    results: {\n      content: string;\n      metadata: string;\n    }[];\n  } | null\n) {\n  const sysPrompt: ChatCompletionMessageParam = {\n    role: \"system\",\n    content:\n      \"When asked about previous messages, only consider messages marked as '(most recent message)' as the last message. \" +\n      prompt +\n      (reasoning\n        ? \"\\n\\nUse this reasoning process to answer the question (Reasoning has already been provided, DO NOT RE-REASON): \" +\n          reasoning +\n          \"\\n\\n\"\n        : \"\") +\n      (webSearchResult\n        ? \"\\n\\n If you get asked to visit or go to a web url or web action you already have and this is the following web results from the agent web tool used in the reasoning: \" +\n          JSON.stringify(webSearchResult) +\n          \"\\n\\n\"\n        : \"\") +\n      (data\n        ? \"The following is the data that the user has provided via their custom data collection: \" +\n          `\\n\\n${JSON.stringify(data)}` +\n          `\\n\\nCollection/Store Name: ${dataCollectionInfo?.name}` +\n          `\\n\\nCollection/Store Files: ${dataCollectionInfo?.files}` +\n          `\\n\\nCollection/Store Description: ${dataCollectionInfo?.description}`\n        : \"\") +\n      `\\n\\n Please provide a visually pleasing and easy to read response. the output can display markdown and code blocks.`,\n  };\n\n  return sysPrompt;\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/sendMessageChunk.ts",
    "content": "import { BrowserWindow } from \"electron\";\n\nexport function sendMessageChunk(\n  content: string,\n  mainWindow: BrowserWindow | null\n) {\n  if (mainWindow) {\n    mainWindow.webContents.send(\"messageChunk\", content);\n  } else {\n    console.log(\"This no work cause Chunk not chunky\");\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llmHelpers/truncateMessages.ts",
    "content": "import { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { countMessageTokens } from \"./countMessageTokens.js\";\n\n// Helper function to truncate messages to fit within token limit\nexport function truncateMessages(\n  messages: ChatCompletionMessageParam[],\n  maxOutputTokens: number,\n  maxTotalTokens: number = 4096,\n  model?: string\n): ChatCompletionMessageParam[] {\n  const reservedTokens = 3; // Few tokens reserved for formatting\n\n  let totalTokens = messages.reduce(\n    (sum, msg) => sum + countMessageTokens(msg),\n    0\n  );\n\n  // Calculate available tokens for conversation\n  const availableTokens = maxTotalTokens - maxOutputTokens / 2 - reservedTokens;\n  const currentTokens = totalTokens;\n\n  // If we're under the limit and it's not deepseek-reasoner, return all messages unchanged\n  if (\n    currentTokens <= availableTokens &&\n    !model?.includes(\"deepseek-reasoner\")\n  ) {\n    return messages;\n  }\n\n  // Create a copy of messages for truncation\n  let truncatedMessages = [...messages];\n\n  // For deepseek-reasoner, ensure first message is from user\n  if (model?.includes(\"deepseek-reasoner\") && truncatedMessages.length > 0) {\n    // Find the first user message\n    const firstUserMsgIndex = truncatedMessages.findIndex(\n      (msg) => msg.role === \"user\"\n    );\n    if (firstUserMsgIndex > 0) {\n      // If we found a user message and it's not first, slice from there\n      truncatedMessages = truncatedMessages.slice(firstUserMsgIndex);\n    } else if (firstUserMsgIndex === -1) {\n      // If no user message found, we can't proceed with deepseek-reasoner\n      throw new Error(\"DeepSeek Reasoner requires at least one user message\");\n    }\n  }\n\n  // Only truncate if we're over the total token limit\n  while (currentTokens > availableTokens && truncatedMessages.length > 3) {\n    // Remove oldest messages first, keeping at least the last 3 messages\n    const removed = truncatedMessages.shift();\n    if (removed) {\n      totalTokens -= countMessageTokens(removed);\n    }\n  }\n\n  // Special case: if this is the first message, just return it\n  if (messages.length === 1 && messages[0].role === \"user\") {\n    return messages;\n  }\n\n  // For all other cases, ensure we have valid message pattern (user -> assistant -> user)\n  if (truncatedMessages.length >= 3) {\n    // Find last user message\n    const lastUserIndex = truncatedMessages.length - 1;\n    const secondLastUserIndex = truncatedMessages\n      .slice(0, -1)\n      .findLastIndex((msg) => msg.role === \"user\");\n    const assistantAfterSecondLastUser = truncatedMessages\n      .slice(secondLastUserIndex + 1)\n      .find((msg) => msg.role === \"assistant\");\n\n    if (\n      truncatedMessages[lastUserIndex].role !== \"user\" ||\n      !assistantAfterSecondLastUser\n    ) {\n      // If pattern is invalid, keep only the last 3 messages that match the pattern\n      const lastMessages = truncatedMessages.slice(-3);\n      if (\n        lastMessages[0].role === \"user\" &&\n        lastMessages[1].role === \"assistant\" &&\n        lastMessages[2].role === \"user\"\n      ) {\n        truncatedMessages = lastMessages;\n      } else {\n        throw new Error(\n          \"Cannot create a valid message pattern with user -> assistant -> user\"\n        );\n      }\n    }\n\n    // Trim from start to ensure first message is user\n    while (\n      truncatedMessages.length > 0 &&\n      truncatedMessages[0].role !== \"user\"\n    ) {\n      truncatedMessages.shift();\n    }\n  } else if (truncatedMessages.length !== 1) {\n    throw new Error(\n      \"Need at least 3 messages to maintain user -> assistant -> user pattern\"\n    );\n  }\n\n  return truncatedMessages;\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/llms.ts",
    "content": "import db from \"../db.js\";\nimport log from \"electron-log\";\nimport os from \"os\";\nimport { BrowserWindow } from \"electron\";\nimport { ifCollection } from \"./llmHelpers/collectionData.js\";\nimport { ifNewConversation } from \"./llmHelpers/ifNewConvo.js\";\nimport { getUserPrompt } from \"./llmHelpers/getUserPrompt.js\";\nimport { providersMap } from \"./llmHelpers/providersMap.js\";\nimport { addUserMessage } from \"./llmHelpers/addUserMessage.js\";\nimport { addAssistantMessage } from \"./llmHelpers/addAssistantMessage.js\";\n\nexport async function chatRequest(\n  messages: Message[],\n  activeUser: User,\n  mainWindow: BrowserWindow,\n  conversationId?: number,\n  title?: string,\n  collectionId?: bigint | number,\n  signal?: AbortSignal\n): Promise<ChatRequestResult> {\n  const platform = os.platform();\n\n  try {\n    const userSettings = await db.getUserSettings(activeUser.id);\n    let data;\n    if ((!title && conversationId) || (title === undefined && conversationId)) {\n      title = await db.getUserConversationTitle(conversationId, activeUser.id);\n    }\n\n    if (!conversationId) {\n      const { cId, title: newTitle } = await ifNewConversation(\n        messages,\n        activeUser\n      );\n      conversationId = cId;\n      title = newTitle;\n    }\n\n    if (collectionId) {\n      const { collectionData } = await ifCollection(\n        messages,\n        activeUser,\n        Number(collectionId),\n        platform\n      );\n      data = collectionData;\n    }\n\n    let prompt;\n    if (!prompt) {\n      prompt = await getUserPrompt(activeUser, userSettings, prompt);\n    } else {\n      prompt = \"You are a helpful assistant.\";\n    }\n\n    const provider =\n      providersMap[\n        userSettings?.provider?.toLowerCase() as keyof typeof providersMap\n      ];\n    if (!provider) {\n      throw new Error(\n        \"No AI provider selected. Please open Settings (top right) make sure you add an API key and select a provider under the 'AI Provider' tab.\"\n      );\n    }\n\n    /* Fallback Settings last ditch effort to save from a failure */\n    if (!title) {\n      title = messages[messages.length - 1].content.substring(0, 20);\n    }\n    if (!userSettings.temperature) {\n      userSettings.temperature = 0.5;\n    }\n    if (!conversationId) {\n      throw new Error(\"Conversation ID is required\");\n    }\n\n    const result = (await provider({\n      messages,\n      activeUser,\n      userSettings,\n      prompt,\n      conversationId,\n      mainWindow,\n      currentTitle: title,\n      collectionId: collectionId ? Number(collectionId) : undefined,\n      data: data ? data : undefined,\n      signal,\n    })) as ProviderResponse;\n\n    try {\n      await addUserMessage(activeUser, conversationId, messages);\n      await addAssistantMessage(\n        activeUser,\n        conversationId,\n        result,\n        collectionId ? Number(collectionId) : undefined,\n        data ? data : null\n      );\n    } catch (error) {\n      log.error(\"Error adding messages:\", error);\n    }\n\n    log.info(`Returning result`);\n\n    return {\n      ...result,\n      messages: result.messages.map((msg) => ({\n        ...msg,\n        reasoning_content:\n          msg.role === \"assistant\" ? result.reasoning : undefined,\n      })),\n      data_content: data ? JSON.stringify(data) : undefined,\n      reasoning_content: result.reasoning ? result.reasoning : undefined,\n      title: title || messages[messages.length - 1].content.substring(0, 20),\n    };\n  } catch (error) {\n    log.error(\"Error in chat request:\", error);\n\n    let errorMessage = \"An unexpected error occurred.\";\n\n    if (error instanceof Error) {\n      // Handle API key related errors\n      if (\n        error.message.includes(\"API key\") ||\n        error.message.includes(\"provider\")\n      ) {\n        errorMessage =\n          \"Please add an API key and select an AI Model in Settings.\";\n      }\n      // Handle aborted requests\n      else if (error.message.includes(\"aborted\")) {\n        errorMessage = \"The request was cancelled.\";\n      }\n      // Use the actual error message for other cases\n      else {\n        errorMessage = error.message;\n      }\n    }\n\n    const newMessage = {\n      role: \"assistant\",\n      content: errorMessage,\n      timestamp: new Date(),\n      data_content: undefined,\n    } as Message;\n\n    log.info(`New message: ${newMessage}`);\n\n    return {\n      id: -1,\n      messages: [...messages, newMessage],\n      title: \"Error\",\n    };\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/providers/anthropic.ts",
    "content": "import db from \"../../db.js\";\nimport Anthropic from \"@anthropic-ai/sdk\";\nimport { BrowserWindow } from \"electron\";\nimport { sendMessageChunk } from \"../llmHelpers/sendMessageChunk.js\";\nimport { truncateMessages } from \"../llmHelpers/truncateMessages.js\";\nimport type { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { returnSystemPrompt } from \"../llmHelpers/returnSystemPrompt.js\";\nimport { returnReasoningPrompt } from \"../llmHelpers/returnReasoningPrompt.js\";\nimport { anthropicAgent } from \"../agentLayer/anthropicAgent.js\";\n\nasync function chainOfThought(\n  anthropic: Anthropic,\n  messages: ChatCompletionMessageParam[],\n  maxOutputTokens: number,\n  userSettings: UserSettings,\n  data: {\n    top_k: number;\n    results: {\n      content: string;\n      metadata: string;\n    }[];\n  } | null,\n  dataCollectionInfo: Collection | null,\n  signal?: AbortSignal,\n  mainWindow: BrowserWindow | null = null\n) {\n  const reasoningPrompt = await returnReasoningPrompt(data, dataCollectionInfo);\n\n  const truncatedMessages = truncateMessages(\n    messages as Message[],\n    maxOutputTokens\n  );\n\n  const stream = await anthropic.messages.stream(\n    {\n      messages: truncatedMessages.map((msg) => ({\n        role: msg.role === \"assistant\" ? \"assistant\" : \"user\",\n        content: msg.content as string,\n      })),\n      system: reasoningPrompt,\n      model: userSettings.model as string,\n      max_tokens: Number(maxOutputTokens),\n      temperature: Number(userSettings.temperature),\n    },\n    { signal }\n  );\n\n  let reasoningContent = \"\";\n  try {\n    for await (const chunk of stream) {\n      if (signal?.aborted) {\n        throw new Error(\"AbortError\");\n      }\n      if (chunk.type === \"content_block_delta\") {\n        const content = \"text\" in chunk.delta ? chunk.delta.text : \"\";\n        reasoningContent += content;\n        sendMessageChunk(\"[REASONING]: \" + content, mainWindow);\n      }\n    }\n  } catch (error) {\n    if (\n      signal?.aborted ||\n      (error instanceof Error && error.message === \"AbortError\")\n    ) {\n      throw error;\n    }\n  }\n\n  return reasoningContent;\n}\n\nexport async function AnthropicProvider(\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const {\n    messages,\n    activeUser,\n    userSettings,\n    prompt,\n    conversationId,\n    mainWindow,\n    currentTitle,\n    collectionId,\n    data,\n    signal,\n  } = params;\n\n  const apiKey = db.getApiKey(activeUser.id, \"anthropic\");\n  if (!apiKey) {\n    throw new Error(\"Anthropic API key not found for the active user\");\n  }\n\n  const anthropic = new Anthropic({ apiKey });\n\n  const userTools = db.getUserTools(activeUser.id);\n\n  let agentActions = null;\n  let webSearchResult = null;\n  const maxOutputTokens = (userSettings.maxTokens as number) || 4096;\n\n  // If the user has Web Search enabled, we need to do web search first\n  if (userTools.find((tool) => tool.tool_id === 1)?.enabled === 1) {\n    const { content: actions, webSearchResult: webResults } =\n      await anthropicAgent(\n        anthropic,\n        messages,\n        maxOutputTokens,\n        signal,\n        mainWindow\n      );\n    agentActions = actions;\n    webSearchResult = webResults;\n  }\n  console.log(\"agentActions\", agentActions);\n  const newMessage: Message = {\n    role: \"assistant\",\n    content: \"\",\n    timestamp: new Date(),\n    data_content: data ? JSON.stringify(data) : undefined,\n  };\n\n  const newMessages = messages.map((msg) => ({\n    role: msg.role as \"user\" | \"assistant\" | \"system\",\n    content: msg.content,\n  })) as ChatCompletionMessageParam[];\n\n  let dataCollectionInfo;\n  if (collectionId) {\n    dataCollectionInfo = db.getCollection(collectionId) as Collection;\n  }\n\n  let reasoning: string | undefined;\n\n  if (userSettings.cot) {\n    // Do reasoning first\n    reasoning = await chainOfThought(\n      anthropic,\n      newMessages,\n      maxOutputTokens,\n      userSettings,\n      data ? data : null,\n      dataCollectionInfo ? dataCollectionInfo : null,\n      signal,\n      mainWindow\n    );\n\n    // Send end of reasoning marker\n    if (mainWindow) {\n      mainWindow.webContents.send(\"reasoningEnd\");\n    }\n  }\n\n  const sysPrompt = await returnSystemPrompt(\n    prompt,\n    dataCollectionInfo,\n    reasoning || null,\n    webSearchResult || undefined,\n    data\n  );\n  // Truncate messages to fit within token limits\n  const truncatedMessages = truncateMessages(newMessages, maxOutputTokens);\n\n  const stream = await anthropic.messages.stream(\n    {\n      temperature: Number(userSettings.temperature),\n      system: sysPrompt.content,\n      messages: truncatedMessages.map((msg) => ({\n        role: msg.role === \"assistant\" ? \"assistant\" : \"user\",\n        content: msg.content as string,\n      })),\n      model: userSettings.model as string,\n      max_tokens: Number(maxOutputTokens),\n    },\n    { signal }\n  );\n\n  try {\n    for await (const chunk of stream) {\n      if (signal?.aborted) {\n        throw new Error(\"AbortError\");\n      }\n      if (chunk.type === \"content_block_delta\") {\n        const content = \"text\" in chunk.delta ? chunk.delta.text : \"\";\n        newMessage.content += content;\n        sendMessageChunk(content, mainWindow);\n      }\n    }\n\n    if (mainWindow) {\n      mainWindow.webContents.send(\"streamEnd\");\n    }\n\n    return {\n      id: conversationId,\n      messages: [...messages, newMessage],\n      title: currentTitle,\n      content: newMessage.content,\n      reasoning: reasoning || \"\",\n      aborted: false,\n    };\n  } catch (error) {\n    if (\n      signal?.aborted ||\n      (error instanceof Error && error.message === \"AbortError\")\n    ) {\n      if (mainWindow) {\n        mainWindow.webContents.send(\"streamEnd\");\n      }\n      return {\n        id: conversationId,\n        messages: [...messages, { ...newMessage }],\n        title: currentTitle,\n        content: newMessage.content,\n        reasoning: reasoning || \"\",\n        aborted: true,\n      };\n    }\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/providers/azureOpenAI.ts",
    "content": "import { chatCompletion } from \"../chatCompletion.js\";\nimport { providerInitialize } from \"../llmHelpers/providerInit.js\";\nexport async function AzureOpenAIProvider(\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const openai = await providerInitialize(\"azure open ai\", params.activeUser);\n\n  return chatCompletion(openai, params);\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/providers/customEndpoint.ts",
    "content": "import { chatCompletion } from \"../chatCompletion.js\";\nimport { providerInitialize } from \"../llmHelpers/providerInit.js\";\n\nexport async function CustomProvider(\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const openai = await providerInitialize(\"custom\", params.activeUser);\n  return chatCompletion(openai, params);\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/providers/deepseek.ts",
    "content": "import OpenAI from \"openai\";\nimport db from \"../../db.js\";\nimport { sendMessageChunk } from \"../llmHelpers/sendMessageChunk.js\";\nimport { truncateMessages } from \"../llmHelpers/truncateMessages.js\";\nimport { returnSystemPrompt } from \"../llmHelpers/returnSystemPrompt.js\";\nimport { prepMessages } from \"../llmHelpers/prepMessages.js\";\nimport { openAiChainOfThought } from \"../reasoningLayer/openAiChainOfThought.js\";\nimport { providerInitialize } from \"../llmHelpers/providerInit.js\";\nimport { openAiAgent } from \"../agentLayer/openAiAgent.js\";\n\ninterface DeepSeekDelta\n  extends OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta {\n  reasoning_content?: string;\n}\n\nexport async function DeepSeekProvider(\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const {\n    messages,\n    activeUser,\n    userSettings,\n    prompt,\n    conversationId,\n    mainWindow,\n    currentTitle,\n    collectionId,\n    data,\n    signal,\n  } = params;\n\n  const openai = await providerInitialize(\"deepseek\", activeUser);\n\n  const maxOutputTokens = (userSettings.maxTokens as number) || 4096;\n  const newMessages = await prepMessages(messages);\n\n  const userTools = db.getUserTools(activeUser.id);\n\n  let agentActions = null;\n  let webSearchResult = null;\n  // If the user has Web Search enabled, we need to do web search first\n  if (userTools.find((tool) => tool.tool_id === 1)?.enabled === 1) {\n    const { content: actions, webSearchResult: webResults } = await openAiAgent(\n      openai,\n      messages,\n      maxOutputTokens,\n      userSettings,\n      signal\n    );\n    agentActions = actions;\n    webSearchResult = webResults;\n  }\n\n  let dataCollectionInfo;\n  if (collectionId) {\n    dataCollectionInfo = db.getCollection(collectionId) as Collection;\n  }\n\n  let reasoning;\n  // Only do manual CoT if not using deepseek-reasoner\n  if (userSettings.cot && !userSettings.model?.includes(\"deepseek-reasoner\")) {\n    // Do reasoning first\n    reasoning = await openAiChainOfThought(\n      openai,\n      newMessages,\n      maxOutputTokens,\n      userSettings,\n      data ? data : null,\n      dataCollectionInfo ? dataCollectionInfo : null,\n      String(agentActions),\n      webSearchResult ? webSearchResult : undefined,\n      signal,\n      mainWindow\n    );\n\n    // Send end of reasoning marker\n    if (mainWindow) {\n      mainWindow.webContents.send(\"reasoningEnd\");\n    }\n  }\n  const newSysPrompt = await returnSystemPrompt(\n    prompt,\n    dataCollectionInfo,\n    reasoning ? reasoning : null,\n    webSearchResult ? webSearchResult : undefined,\n    data\n  );\n\n  // Truncate messages to fit within token limits while preserving max output tokens\n  const truncatedMessages = truncateMessages(newMessages, maxOutputTokens);\n  truncatedMessages.unshift(newSysPrompt);\n  const stream = await openai.chat.completions.create(\n    {\n      model: userSettings.model as string,\n      messages: truncatedMessages,\n      stream: true,\n      temperature: Number(userSettings.temperature),\n      max_tokens: maxOutputTokens,\n    },\n    { signal }\n  );\n\n  const newMessage: Message = {\n    role: \"assistant\",\n    content: \"\",\n    timestamp: new Date(),\n    data_content: data ? JSON.stringify(data) : undefined,\n  };\n\n  let reasoningContent = \"\";\n\n  try {\n    for await (const chunk of stream) {\n      if (signal?.aborted) {\n        throw new Error(\"AbortError\");\n      }\n\n      const delta = chunk.choices[0]?.delta as DeepSeekDelta;\n\n      if (delta?.reasoning_content) {\n        reasoningContent += delta.reasoning_content;\n        sendMessageChunk(\"[REASONING]:\" + delta.reasoning_content, mainWindow);\n      } else if (delta?.content) {\n        const content = delta.content;\n        newMessage.content += content;\n        sendMessageChunk(content, mainWindow);\n      }\n    }\n\n    if (mainWindow) {\n      mainWindow.webContents.send(\"streamEnd\");\n    }\n\n    return {\n      id: conversationId,\n      messages: [...messages, { ...newMessage, content: newMessage.content }],\n      reasoning: reasoningContent || reasoning, // Use either deepseek-reasoner content or manual CoT reasoning\n      title: currentTitle,\n      content: newMessage.content,\n      aborted: false,\n    };\n  } catch (error) {\n    if (\n      signal?.aborted ||\n      (error instanceof Error && error.message === \"AbortError\")\n    ) {\n      return {\n        id: conversationId,\n        messages: messages,\n        reasoning: reasoningContent,\n        title: currentTitle,\n        content: \"\",\n        aborted: true,\n      };\n    }\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/providers/externalOllama.ts",
    "content": "import { chatCompletion } from \"../chatCompletion.js\";\nimport { providerInitialize } from \"../llmHelpers/providerInit.js\";\n\nexport async function ExternalOllamaProvider(\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const openai = await providerInitialize(\"ollama external\", params.activeUser);\n  return chatCompletion(openai, params);\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/providers/gemini.ts",
    "content": "import {\n  GoogleGenerativeAI,\n  GenerativeModel,\n  ChatSession,\n  Content,\n} from \"@google/generative-ai\";\nimport db from \"../../db.js\";\nimport { BrowserWindow } from \"electron\";\nimport { truncateMessages } from \"../llmHelpers/truncateMessages.js\";\nimport { sendMessageChunk } from \"../llmHelpers/sendMessageChunk.js\";\nimport type { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { returnSystemPrompt } from \"../llmHelpers/returnSystemPrompt.js\";\nimport { geminiAgent } from \"../agentLayer/geminiAgent.js\";\n\nlet genAI: GoogleGenerativeAI;\n\nasync function initializeGemini(apiKey: string) {\n  genAI = new GoogleGenerativeAI(apiKey);\n}\n\nasync function chainOfThought(\n  messages: ChatCompletionMessageParam[],\n  maxOutputTokens: number,\n  userSettings: UserSettings,\n  prompt: string,\n  data: {\n    top_k: number;\n    results: {\n      content: string;\n      metadata: string;\n    }[];\n  } | null,\n  dataCollectionInfo: Collection | null,\n  signal?: AbortSignal,\n  mainWindow: BrowserWindow | null = null\n) {\n  // Use reasoning-specific system prompt\n  const sysPromptContent =\n    \"You are a reasoning engine. Your task is to analyze the question and outline your step-by-step reasoning process for how to answer it. Keep your reasoning concise and focused on the key logical steps. Only return the reasoning process, do not provide the final answer.\" +\n    (data\n      ? \"\\n\\nThe following is the data that the user has provided via their custom data collection: \" +\n        `\\n\\n${JSON.stringify(data)}` +\n        `\\n\\nCollection/Store Name: ${dataCollectionInfo?.name}` +\n        `\\n\\nCollection/Store Files: ${dataCollectionInfo?.files}` +\n        `\\n\\nCollection/Store Description: ${dataCollectionInfo?.description}` +\n        `\\n\\n*** THIS IS THE END OF THE DATA COLLECTION ***`\n      : \"\");\n\n  const truncatedMessages = truncateMessages(messages, maxOutputTokens);\n\n  // Create a separate array for reasoning messages\n  const reasoningMessages = [...truncatedMessages];\n  if (reasoningMessages.length > 0) {\n    const firstMsg = reasoningMessages[0];\n    firstMsg.content = `${sysPromptContent}\\n\\n${firstMsg.content}`;\n  }\n\n  const chat = genAI\n    .getGenerativeModel({\n      model: userSettings.model as string,\n    })\n    .startChat({\n      history: reasoningMessages\n        .filter((msg) => msg.role !== \"system\")\n        .map((msg) => ({\n          role: msg.role === \"assistant\" ? \"model\" : \"user\",\n          parts: [{ text: msg.content as string }],\n        })),\n      generationConfig: {\n        temperature: Number(userSettings.temperature),\n        maxOutputTokens: maxOutputTokens,\n      },\n    });\n\n  let reasoningContent = \"\";\n  const result = await chat.sendMessageStream(\n    messages[messages.length - 1].content as string,\n    { signal }\n  );\n\n  for await (const chunk of result.stream) {\n    if (signal?.aborted) {\n      throw new Error(\"AbortError\");\n    }\n    let content = \"\";\n\n    if (typeof chunk.text === \"function\") {\n      content = chunk.text();\n    } else if (chunk.candidates && chunk.candidates.length > 0) {\n      const candidate = chunk.candidates[0];\n      if (candidate.content && candidate.content.parts) {\n        content = candidate.content.parts\n          .filter((part) => part.text)\n          .map((part) => part.text)\n          .join(\"\");\n      }\n    }\n\n    if (content) {\n      reasoningContent += content;\n      sendMessageChunk(\"[REASONING]: \" + content, mainWindow);\n    }\n  }\n\n  return reasoningContent;\n}\n\nexport async function GeminiProvider(\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const {\n    messages,\n    activeUser,\n    userSettings,\n    prompt,\n    conversationId,\n    mainWindow,\n    currentTitle,\n    collectionId,\n    data,\n    signal,\n  } = params;\n  const apiKey = db.getApiKey(activeUser.id, \"gemini\");\n  if (!apiKey) {\n    throw new Error(\"Gemini API key not found for the active user\");\n  }\n  await initializeGemini(apiKey);\n\n  if (!genAI) {\n    throw new Error(\"Gemini instance not initialized\");\n  }\n\n  let dataCollectionInfo;\n  if (collectionId) {\n    dataCollectionInfo = db.getCollection(collectionId) as Collection;\n  }\n\n  const model: GenerativeModel = genAI.getGenerativeModel({\n    model: userSettings.model as string,\n  });\n\n  const maxOutputTokens = (userSettings.maxTokens as number) || 4096;\n  let webSearchResult;\n  let agentActions;\n  const userTools = db.getUserTools(activeUser.id);\n  if (\n    userTools.length > 0 &&\n    userTools.some(\n      (tool: { tool_id: number; enabled: number }) =>\n        tool.tool_id === 1 && tool.enabled === 1\n    )\n  ) {\n    const { content, webSearchResult: webSearchResultFromAgent } =\n      await geminiAgent(\n        genAI,\n        messages,\n        maxOutputTokens,\n        userSettings,\n        signal,\n        mainWindow\n      );\n    webSearchResult = webSearchResultFromAgent;\n    agentActions = content;\n  }\n  console.log(agentActions);\n  const newMainMessages = messages.map((msg) => ({\n    role: msg.role as \"user\" | \"assistant\" | \"system\",\n    content: msg.content,\n  })) as ChatCompletionMessageParam[];\n\n  const newReasoningMessages = messages.map((msg) => ({\n    role: msg.role as \"user\" | \"assistant\" | \"system\",\n    content: msg.content,\n  })) as ChatCompletionMessageParam[];\n\n  // Truncate messages to fit within token limits\n  const truncatedMessages = truncateMessages(\n    newReasoningMessages,\n    maxOutputTokens\n  );\n\n  const temperature = Number(userSettings.temperature);\n\n  let reasoning;\n  if (userSettings.cot) {\n    // Do reasoning first\n    reasoning = await chainOfThought(\n      truncatedMessages,\n      maxOutputTokens,\n      userSettings,\n      prompt || \"You are a helpful assistant.\",\n      data ? data : null,\n      dataCollectionInfo ? dataCollectionInfo : null,\n      signal,\n      mainWindow\n    );\n\n    // Send end of reasoning marker\n    if (mainWindow) {\n      mainWindow.webContents.send(\"reasoningEnd\");\n    }\n  }\n\n  const newSysPrompt = await returnSystemPrompt(\n    prompt,\n    dataCollectionInfo,\n    reasoning || null,\n    webSearchResult || undefined,\n    data\n  );\n  // Create a fresh copy of messages for the main response\n  const mainMessages = [...newMainMessages];\n  // Add system prompt as first message if messages array is empty, otherwise update first message\n  if (mainMessages.length === 0) {\n    mainMessages.push({\n      role: \"user\",\n      content: JSON.stringify(newSysPrompt),\n    });\n  } else {\n    mainMessages[0] = {\n      ...mainMessages[0],\n      content: `${JSON.stringify(newSysPrompt)}\\n\\n${mainMessages[0].content}`,\n    };\n  }\n  const chat: ChatSession = model.startChat({\n    history: mainMessages\n      .filter((msg) => msg.role !== \"system\")\n      .map((msg) => ({\n        role: msg.role === \"assistant\" ? \"model\" : \"user\",\n        parts: [{ text: msg.content as string }],\n      })) as Content[],\n    generationConfig: {\n      temperature: temperature,\n      maxOutputTokens: maxOutputTokens,\n      topP: reasoning ? 0.1 : Number(userSettings.topP || 1),\n    },\n  });\n\n  const newMessage: Message = {\n    role: \"assistant\",\n    content: \"\",\n    timestamp: new Date(),\n    data_content: data ? JSON.stringify(data) : undefined,\n  };\n\n  try {\n    const result = await chat.sendMessageStream(\n      messages[messages.length - 1].content,\n      { signal }\n    );\n\n    let buffer = \"\";\n    for await (const chunk of result.stream) {\n      if (signal?.aborted) {\n        throw new Error(\"AbortError\");\n      }\n      let content = \"\";\n\n      if (typeof chunk.text === \"function\") {\n        content = chunk.text();\n      } else if (chunk.candidates && chunk.candidates.length > 0) {\n        const candidate = chunk.candidates[0];\n        if (candidate.content && candidate.content.parts) {\n          content = candidate.content.parts\n            .filter((part) => part.text)\n            .map((part) => part.text)\n            .join(\"\");\n        }\n      }\n\n      if (content) {\n        buffer += content;\n        while (buffer.length >= 1) {\n          const chunkToSend = buffer.slice(0, 1);\n          buffer = buffer.slice(1);\n          newMessage.content += chunkToSend;\n          sendMessageChunk(chunkToSend, mainWindow);\n        }\n      }\n    }\n\n    if (buffer.length > 0) {\n      newMessage.content += buffer;\n      sendMessageChunk(buffer, mainWindow);\n    }\n\n    if (mainWindow) {\n      mainWindow.webContents.send(\"streamEnd\");\n    }\n\n    return {\n      id: conversationId,\n      messages: [...messages, newMessage],\n      reasoning: reasoning || \"\",\n      title: currentTitle,\n      content: newMessage.content,\n      aborted: false,\n    };\n  } catch (error) {\n    if (\n      signal?.aborted ||\n      (error instanceof Error && error.message === \"AbortError\")\n    ) {\n      if (mainWindow) {\n        mainWindow.webContents.send(\"streamEnd\");\n      }\n      return {\n        id: conversationId,\n        messages: [...messages, { ...newMessage }],\n        reasoning: reasoning || \"\",\n        title: currentTitle,\n        content: newMessage.content,\n        aborted: true,\n      };\n    }\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/providers/localModel.ts",
    "content": "import { providerInitialize } from \"../llmHelpers/providerInit.js\";\nimport { chatCompletion } from \"../chatCompletion.js\";\n\nexport async function LocalModelProvider(\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const openai = await providerInitialize(\"local\", params.activeUser);\n  return chatCompletion(openai, params);\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/providers/ollama.ts",
    "content": "import { BrowserWindow } from \"electron\";\nimport db from \"../../db.js\";\nimport { sendMessageChunk } from \"../llmHelpers/sendMessageChunk.js\";\nimport { truncateMessages } from \"../llmHelpers/truncateMessages.js\";\nimport type { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { returnSystemPrompt } from \"../llmHelpers/returnSystemPrompt.js\";\nimport { prepMessages } from \"../llmHelpers/prepMessages.js\";\nimport { ollamaAgent } from \"../agentLayer/ollamaAgent.js\";\nimport ollama from \"ollama\";\n\nexport async function OllamaProvider(\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const {\n    messages,\n    userSettings,\n    prompt,\n    conversationId,\n    mainWindow,\n    currentTitle,\n    collectionId,\n    data,\n    signal,\n  } = params;\n  let dataCollectionInfo;\n  if (collectionId) {\n    dataCollectionInfo = db.getCollection(collectionId) as Collection;\n  }\n\n  // Truncate messages to fit within token limits\n  const maxOutputTokens = (userSettings.maxTokens as number) || 4096;\n  const newMessages = await prepMessages(messages);\n\n  const userTools = await db.getUserTools(params.activeUser.id);\n  let agentActions = null;\n  let agentsResults = null;\n  if (\n    userTools.some(\n      (tool) => tool.tool_id === 1 && tool.enabled === 1 && tool.enabled === 1\n    )\n  ) {\n    const { content, webSearchResult } = await ollamaAgent(\n      newMessages,\n      userSettings,\n      mainWindow\n    );\n    agentActions = content;\n    agentsResults = webSearchResult;\n  }\n\n  let reasoning;\n  if (userSettings.cot) {\n    const {\n      reasoning: reasoningContent,\n      actions,\n      results,\n    } = await chainOfThought(\n      newMessages,\n      maxOutputTokens,\n      userSettings,\n      \"\",\n      data ? data : null,\n      dataCollectionInfo ? dataCollectionInfo : null,\n      signal,\n      mainWindow,\n      agentActions,\n      agentsResults\n    );\n\n    reasoning = reasoningContent;\n    agentActions = actions;\n    agentsResults = results;\n    if (mainWindow) {\n      mainWindow.webContents.send(\"reasoningEnd\");\n    }\n  }\n  const newSysPrompt = await returnSystemPrompt(\n    prompt,\n    dataCollectionInfo,\n    reasoning || null,\n    agentsResults ? agentsResults : undefined,\n    data\n  );\n\n  const truncatedMessages = truncateMessages(newMessages, maxOutputTokens);\n  truncatedMessages.unshift(newSysPrompt);\n\n  const response = await ollama.chat({\n    model: userSettings.model || \"llama2\",\n    messages: truncatedMessages.map((msg) => ({\n      role: msg.role,\n      content: msg.content as string,\n    })),\n    stream: true,\n  });\n\n  const newMessage: Message = {\n    role: \"assistant\",\n    content: \"\",\n    timestamp: new Date(),\n    data_content: data ? JSON.stringify(data) : undefined,\n  };\n\n  for await (const part of response) {\n    sendMessageChunk(part.message.content, mainWindow);\n    newMessage.content += part.message.content;\n  }\n\n  try {\n    if (mainWindow) {\n      mainWindow.webContents.send(\"streamEnd\");\n    }\n\n    // Only return message if we have content and weren't aborted\n    if (newMessage.content) {\n      return {\n        id: conversationId,\n        messages: [...messages, newMessage],\n        title: currentTitle,\n        content: newMessage.content,\n        aborted: false,\n      };\n    }\n\n    return {\n      id: conversationId,\n      messages: messages,\n      title: currentTitle,\n      content: \"\",\n      reasoning: reasoning || \"\",\n      aborted: false,\n    };\n  } catch (error) {\n    if (mainWindow) {\n      mainWindow.webContents.send(\"streamEnd\");\n    }\n\n    if (\n      signal?.aborted ||\n      (error instanceof Error && error.message === \"AbortError\")\n    ) {\n      if (mainWindow) {\n        mainWindow.webContents.send(\"streamEnd\");\n      }\n      return {\n        id: conversationId,\n        messages: [...messages, { ...newMessage }],\n        title: currentTitle,\n        content: newMessage.content,\n        reasoning: reasoning || \"\",\n        aborted: true,\n      };\n    }\n    throw error;\n  }\n}\n\nasync function chainOfThought(\n  messages: ChatCompletionMessageParam[],\n  maxOutputTokens: number,\n  userSettings: UserSettings,\n  prompt: string,\n  data: {\n    top_k: number;\n    results: {\n      content: string;\n      metadata: string;\n    }[];\n  } | null,\n  dataCollectionInfo: Collection | null,\n  signal?: AbortSignal,\n  mainWindow: BrowserWindow | null = null,\n  agentActions: string | null = null,\n  agentsResults: {\n    metadata: {\n      title: string;\n      source: string;\n      description: string;\n      author: string;\n      keywords: string;\n      ogImage: string;\n    };\n    textContent: string;\n  } | null = null\n) {\n  const sysPrompt: ChatCompletionMessageParam = {\n    role: \"system\",\n    content:\n      \"You are a reasoning engine. Your task is to analyze the question and outline your step-by-step reasoning process for how to answer it. Keep your reasoning concise and focused on the key logical steps. Only return the reasoning process, do not provide the final answer.\" +\n      (agentActions\n        ? \"The following is the agent actions that the user has provided: \" +\n          `\\n\\n${agentActions}` +\n          `\\n\\nThe following is the web search results that the user has provided: ` +\n          `\\n\\n${JSON.stringify(agentsResults)}` +\n          `\\n\\n*** THIS IS THE END OF THE AGENT ACTIONS ***`\n        : \"\") +\n      (data\n        ? \"The following is the data that the user has provided via their custom data collection: \" +\n          `\\n\\n${JSON.stringify(data)}` +\n          `\\n\\nCollection/Store Name: ${dataCollectionInfo?.name}` +\n          `\\n\\nCollection/Store Files: ${dataCollectionInfo?.files}` +\n          `\\n\\nCollection/Store Description: ${dataCollectionInfo?.description}` +\n          `\\n\\n*** THIS IS THE END OF THE DATA COLLECTION ***`\n        : \"\"),\n  };\n\n  const truncatedMessages = truncateMessages(messages, maxOutputTokens);\n  const newMessages = [sysPrompt, ...truncatedMessages];\n\n  const response = await ollama.chat({\n    model: userSettings.model || \"llama2\",\n    messages: newMessages.map((msg) => ({\n      role: msg.role,\n      content: msg.content as string,\n    })),\n    stream: true,\n  });\n\n  let reasoningContent = \"\";\n  for await (const part of response) {\n    sendMessageChunk(\"[REASONING]: \" + part.message.content, mainWindow);\n    reasoningContent += part.message.content;\n  }\n\n  return {\n    reasoning: reasoningContent,\n    actions: agentActions,\n    results: agentsResults,\n  };\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/providers/openai.ts",
    "content": "import { chatCompletion } from \"../chatCompletion.js\";\nimport { providerInitialize } from \"../llmHelpers/providerInit.js\";\n\nexport async function OpenAIProvider(\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const openai = await providerInitialize(\"openai\", params.activeUser);\n  return chatCompletion(openai, params);\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/providers/openrouter.ts",
    "content": "import { providerInitialize } from \"../llmHelpers/providerInit.js\";\nimport { chatCompletion } from \"../chatCompletion.js\";\n\nexport async function OpenRouterProvider(\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const openai = await providerInitialize(\"openrouter\", params.activeUser);\n  return chatCompletion(openai, params);\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/providers/xai.ts",
    "content": "import { chatCompletion } from \"../chatCompletion.js\";\nimport { providerInitialize } from \"../llmHelpers/providerInit.js\";\n\nexport async function XAIProvider(\n  params: ProviderInputParams\n): Promise<ProviderResponse> {\n  const openai = await providerInitialize(\"xai\", params.activeUser);\n  return chatCompletion(openai, params);\n}\n"
  },
  {
    "path": "Frontend/src/electron/llms/reasoningLayer/openAiChainOfThought.ts",
    "content": "import { OpenAI } from \"openai\";\nimport { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport { truncateMessages } from \"../llmHelpers/truncateMessages.js\";\nimport { sendMessageChunk } from \"../llmHelpers/sendMessageChunk.js\";\nimport { BrowserWindow } from \"electron\";\n\nexport async function openAiChainOfThought(\n  provider: OpenAI,\n  messages: ChatCompletionMessageParam[],\n  maxOutputTokens: number,\n  userSettings: UserSettings,\n  data: {\n    top_k: number;\n    results: {\n      content: string;\n      metadata: string;\n    }[];\n  } | null,\n  dataCollectionInfo: Collection | null,\n  agentActions?: string,\n  webSearchResult?: WebSearchResult | null,\n  signal?: AbortSignal,\n  mainWindow: BrowserWindow | null = null\n) {\n  const sysPrompt: ChatCompletionMessageParam = {\n    role: \"system\",\n    content:\n      `You are a reasoning engine. Your task is to analyze the question and outline your step-by-step reasoning process for how\n      to answer it. Only return the reasoning process including important information from the agent's actions and web search results, \n      do not provide the final answer. The agent's actions are: ${agentActions}` +\n      (webSearchResult\n        ? `\\n\\nThe following is the web search results from the agent please include them in your reasoning process: ${JSON.stringify(\n            webSearchResult\n          )}`\n        : \"\") +\n      (data\n        ? \"The following is the data that the user has provided via their custom data collection: \" +\n          `\\n\\n${JSON.stringify(data)}` +\n          `\\n\\nCollection/Store Name: ${dataCollectionInfo?.name}` +\n          `\\n\\nCollection/Store Files: ${dataCollectionInfo?.files}` +\n          `\\n\\nCollection/Store Description: ${dataCollectionInfo?.description}` +\n          `\\n\\n*** THIS IS THE END OF THE DATA COLLECTION ***`\n        : \"\"),\n  };\n  const truncatedMessages = truncateMessages(messages, maxOutputTokens);\n  const newMessages = [sysPrompt, ...truncatedMessages];\n  let reasoning;\n\n  if (userSettings.model === \"o3-mini-2025-01-31\") {\n    reasoning = await provider.chat.completions.create(\n      {\n        model: userSettings.model as string,\n        messages: newMessages,\n        stream: true,\n      },\n      { signal }\n    );\n  } else {\n    reasoning = await provider.chat.completions.create(\n      {\n        model: userSettings.model as string,\n        messages: newMessages,\n        stream: true,\n        temperature: Number(userSettings.temperature),\n      },\n      { signal }\n    );\n  }\n\n  let reasoningContent = \"\";\n  for await (const chunk of reasoning) {\n    if (signal?.aborted) {\n      throw new Error(\"AbortError\");\n    }\n    const content = chunk.choices[0]?.delta?.content || \"\";\n    reasoningContent += content;\n    sendMessageChunk(\"[REASONING]: \" + content, mainWindow);\n  }\n\n  return webSearchResult\n    ? webSearchResult + reasoningContent\n    : reasoningContent;\n}\n"
  },
  {
    "path": "Frontend/src/electron/loadingWindow.ts",
    "content": "import { app, BrowserWindow, ipcMain, shell } from \"electron\";\nimport path from \"path\";\nimport { isDev } from \"./util.js\";\nimport fs from \"fs\";\nimport log from \"electron-log\";\nimport { fileURLToPath } from \"url\";\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = path.dirname(__filename);\n\nlog.transports.file.level = \"info\";\nlog.transports.file.resolvePathFn = () =>\n  path.join(app.getPath(\"userData\"), \"logs/main.log\");\n\nlet loadingWindow: BrowserWindow | null = null;\n\nexport function createLoadingWindow(icon?: Electron.NativeImage) {\n  const windowOptions: Electron.BrowserWindowConstructorOptions = {\n    width: 800,\n    height: 600,\n    frame: true,\n    webPreferences: {\n      nodeIntegration: true,\n      contextIsolation: false,\n    },\n    roundedCorners: true,\n    center: true,\n    title: app.getName(),\n    icon: icon || path.join(__dirname, \"../assets/icon.png\"),\n  };\n\n  loadingWindow = new BrowserWindow(windowOptions);\n\n  const appPath = app.getAppPath();\n  log.info(\"App Path:\", appPath);\n\n  // In production, loading.html should be in dist-react\n  const loadingPath = isDev()\n    ? `file://${path.join(path.dirname(__dirname), \"src\", \"loading.html\")}`\n    : `file://${path.join(appPath, \"dist-react\", \"src\", \"loading.html\")}`;\n\n  log.info(\"Loading Path:\", loadingPath);\n  log.info(\"Current directory:\", __dirname);\n  const dirPath = path.dirname(loadingPath.replace(\"file://\", \"\"));\n  try {\n    log.info(\"Files in s directory:\", fs.readdirSync(dirPath));\n    log.info(\"Files in directory:\", fs.readdirSync(__dirname));\n  } catch (error) {\n    log.error(\"Error reading directory:\", error);\n  }\n\n  // Use loadingPath directly instead of constructing a new path\n  loadingWindow.loadURL(loadingPath);\n\n  loadingWindow.once(\"ready-to-show\", () => {\n    if (loadingWindow) {\n      loadingWindow.show();\n    }\n  });\n\n  // Add IPC handlers for the loading window\n  ipcMain.on(\"open-logs\", () => {\n    const logPath = log.transports.file.getFile().path;\n    shell.showItemInFolder(logPath);\n  });\n\n  ipcMain.on(\"open-github-issue\", () => {\n    shell.openExternal(\n      \"https://github.com/CNTRLAI/Notate/issues/new?template=bug_report.md\"\n    );\n  });\n\n  // Add test failure handler\n  ipcMain.on(\"test-failure\", () => {\n    updateLoadingStatus(\"Test failure message\", 100, true);\n  });\n\n  return loadingWindow;\n}\n\nexport function updateLoadingText(text: string) {\n  if (loadingWindow) {\n    loadingWindow.webContents.send(\"update-status\", { text, progress: 0 });\n  }\n}\n\nexport function updateLoadingStatus(\n  text: string,\n  progress: number,\n  failed: boolean = false\n) {\n  if (loadingWindow && !loadingWindow.isDestroyed()) {\n    loadingWindow.webContents.send(\"update-status\", { text, progress, failed });\n  }\n}\n\nexport function closeLoadingWindow() {\n  if (loadingWindow && !loadingWindow.isDestroyed()) {\n    loadingWindow.close();\n    loadingWindow = null;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/localLLMs/getDirModels.ts",
    "content": "import fs from \"fs\";\nimport path from \"path\";\n\nexport async function getDirModels(payload: {\n  dirPath: string;\n}): Promise<Model[]> {\n  const { dirPath } = payload;\n\n  if (!fs.existsSync(dirPath)) {\n    console.log(\"Directory does not exist\");\n    return [];\n  }\n\n  const models: Model[] = [];\n\n  // List contents of the directory to debug\n  const contents = fs.readdirSync(dirPath);\n  // First try to handle as regular model directory\n  if (contents.length > 0) {\n    try {\n      // Process each publisher/author directory\n      for (const publisher of contents) {\n        if (publisher.startsWith(\".\")) continue;\n\n        const publisherPath = path.join(dirPath, publisher);\n        if (!fs.statSync(publisherPath).isDirectory()) continue;\n\n        // First check for GGUF files directly in the publisher directory\n        const publisherFiles = fs.readdirSync(publisherPath);\n        for (const file of publisherFiles) {\n          if (file.endsWith(\".gguf\")) {\n            const modelPath = path.join(publisherPath, file);\n            const stats = fs.statSync(modelPath);\n\n            // Remove the .gguf extension for the model name\n            const modelName = file.replace(\".gguf\", \"\");\n\n            models.push({\n              name: `${publisher}/${modelName}`,\n              type: \"llama.cpp\",\n              model_location: modelPath,\n              modified_at: stats.mtime.toISOString(),\n              size: stats.size,\n              digest: \"\",\n            });\n          }\n        }\n\n        // Then check subdirectories for other model types\n        for (const item of publisherFiles) {\n          if (item.startsWith(\".\")) continue;\n\n          const itemPath = path.join(publisherPath, item);\n          if (!fs.statSync(itemPath).isDirectory()) continue;\n\n          const stats = fs.statSync(itemPath);\n\n          // Check for common model files to determine type\n          const files = fs.readdirSync(itemPath);\n          let modelType = \"unknown\";\n\n          if (files.some((f) => f.endsWith(\".gguf\"))) {\n            modelType = \"llama.cpp\";\n          } else if (\n            files.some((f) => f === \"config.json\" || f === \"pytorch_model.bin\")\n          ) {\n            modelType = \"Transformers\";\n          }\n          if (modelType !== \"unknown\") {\n            if (item === \"granite-embedding\") continue;\n            models.push({\n              name: `${publisher}/${item}`,\n              type: modelType,\n              model_location: itemPath,\n              modified_at: stats.mtime.toISOString(),\n              size: stats.size,\n              digest: \"\",\n            });\n          }\n        }\n      }\n    } catch (err) {\n      console.error(\"Error reading regular model directory:\", err);\n    }\n  }\n\n  // Then check for Ollama models in manifests directory\n  const manifestsDir = path.join(dirPath, \"manifests\");\n\n  if (fs.existsSync(manifestsDir)) {\n    try {\n      // Check registry.ollama.ai/library for official models\n      const registryPath = path.join(\n        manifestsDir,\n        \"registry.ollama.ai\",\n        \"library\"\n      );\n      if (fs.existsSync(registryPath)) {\n        const entries = fs.readdirSync(registryPath, { withFileTypes: true });\n\n        for (const entry of entries) {\n          if (\n            entry.isDirectory() &&\n            !entry.name.startsWith(\".\") &&\n            !entry.name.includes(\"embedding\")\n          ) {\n            const modelPath = path.join(registryPath, entry.name);\n            const stats = fs.statSync(modelPath);\n\n            models.push({\n              name: entry.name,\n              type: \"ollama\",\n              model_location: modelPath,\n              modified_at: stats.mtime.toISOString(),\n              size: stats.size,\n              digest: \"\",\n            });\n          }\n        }\n      }\n\n      // Check hf.co for HuggingFace models\n      const hfPath = path.join(manifestsDir, \"hf.co\");\n      if (fs.existsSync(hfPath)) {\n        const processHFDir = (dir: string) => {\n          const entries = fs.readdirSync(dir, { withFileTypes: true });\n          for (const entry of entries) {\n            if (entry.isDirectory() && !entry.name.startsWith(\".\")) {\n              const subEntries = fs.readdirSync(path.join(dir, entry.name), {\n                withFileTypes: true,\n              });\n              for (const subEntry of subEntries) {\n                if (subEntry.isDirectory() && !subEntry.name.startsWith(\".\")) {\n                  const modelPath = path.join(dir, entry.name, subEntry.name);\n                  const stats = fs.statSync(modelPath);\n\n                  const ollamaModelName = `${entry.name}/${subEntry.name}`;\n\n                  models.push({\n                    name: ollamaModelName,\n                    type: \"ollama\",\n                    model_location: path.join(\n                      manifestsDir,\n                      \"hf.co\",\n                      entry.name,\n                      subEntry.name\n                    ),\n                    modified_at: stats.mtime.toISOString(),\n                    size: stats.size,\n                    digest: \"\",\n                  });\n                }\n              }\n            }\n          }\n        };\n        processHFDir(hfPath);\n      }\n    } catch (err) {\n      console.error(\"Error reading Ollama models directory:\", err);\n    }\n  } else {\n    console.log(\"No manifests directory found\");\n  }\n\n  return models;\n}\n"
  },
  {
    "path": "Frontend/src/electron/localLLMs/loadModel.ts",
    "content": "import { getToken } from \"../authentication/token.js\";\n\nexport async function loadModel(payload: {\n  model_location: string;\n  model_name: string;\n  model_type?: string;\n  user_id: number;\n}) {\n  try {\n    const token = await getToken({ userId: payload.user_id.toString() });\n    const response = await fetch(`http://localhost:47372/load-model`, {\n      method: \"POST\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n        Authorization: `Bearer ${token}`,\n      },\n\n      body: JSON.stringify({\n        model_path: payload.model_location,\n        model_name: payload.model_name,\n        model_type: payload.model_type,\n      }),\n    });\n\n    const data = await response.json();\n    return data;\n  } catch (error) {\n    console.error(\"Error loading model:\", error);\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/localLLMs/modelInfo.ts",
    "content": "import { getToken } from \"../authentication/token.js\";\n\nexport async function modelInfo(payload: {\n  model_location: string;\n  model_name: string;\n  model_type?: string;\n  user_id: number;\n}) {\n  try {\n    const token = await getToken({ userId: payload.user_id.toString() });\n    const response = await fetch(`http://localhost:47372/model-info`, {\n      method: \"GET\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n        Authorization: `Bearer ${token}`,\n      },\n    });\n\n    const data = await response.json();\n    return data;\n  } catch (error) {\n    console.error(\"Error getting model info:\", error);\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/localLLMs/unloadModel.ts",
    "content": "import { getToken } from \"../authentication/token.js\";\n\nexport async function unloadModel(payload: {\n  model_location: string;\n  model_name: string;\n  model_type?: string;\n  user_id: number;\n}) {\n  try {\n    const token = await getToken({ userId: payload.user_id.toString() });\n    const response = await fetch(`http://localhost:47372/unload-model`, {\n      method: \"POST\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n        Authorization: `Bearer ${token}`,\n      },\n\n      body: JSON.stringify({\n        model_location: payload.model_location,\n        model_name: payload.model_name,\n        model_type: payload.model_type,\n      }),\n    });\n\n    const data = await response.json();\n    return data;\n  } catch (error) {\n    console.error(\"Error unloading model:\", error);\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/main.ts",
    "content": "import { app } from \"electron\";\nimport path from \"path\";\nimport { isDev } from \"./util.js\";\nimport { pollResource } from \"./resourceManager.js\";\nimport { createTray } from \"./tray.js\";\nimport { createMenu } from \"./menu.js\";\nimport db from \"./db.js\";\nimport { createLoadingWindow, updateLoadingStatus } from \"./loadingWindow.js\";\nimport fs from \"fs\";\nimport log from \"electron-log\";\nimport { setupCollectionHandlers } from \"./handlers/collectionHandlers.js\";\nimport { setupIpcHandlers } from \"./handlers/ipcHandlers.js\";\nimport { setupDbHandlers } from \"./handlers/dbHandlers.js\";\nimport { createMainWindow } from \"./mainWindow.js\";\nimport { handleCloseEvents } from \"./handlers/closeEventHandler.js\";\nimport { setupChatHandlers } from \"./handlers/chatHandlers.js\";\nimport {\n  startPythonServer,\n  stopPythonServer,\n} from \"./python/startAndStopPython.js\";\nimport { setupMenuHandlers } from \"./handlers/menuHandlers.js\";\nimport { setupOllamaHandlers } from \"./handlers/ollamaHandlers.js\";\nimport { nativeImage } from \"electron\";\nimport { setupVttHandlers } from \"./handlers/voiceHandlers.js\";\nimport { setupFileHandlers } from \"./handlers/fileHandlers.js\";\nimport { getDevSecretPath } from \"./authentication/devApi.js\";\nimport { setupOpenRouterHandlers } from \"./handlers/openRouterHandlers.js\";\nimport { setupLocalModelHandlers } from \"./handlers/localModelHandlers.js\";\nimport crypto from \"crypto\";\nimport { setupAzureOpenAI } from \"./handlers/azureHandlers.js\";\nimport { setupCustomApiHandlers } from \"./handlers/customApiHandlers.js\";\n\n// Configure logging first\nlog.transports.file.level = \"debug\";\nlog.transports.file.resolvePathFn = () => {\n  const logPath = app.getPath(\"userData\");\n  // Ensure the log directory exists\n  if (!fs.existsSync(logPath)) {\n    fs.mkdirSync(logPath, { recursive: true });\n  }\n  return path.join(logPath, \"main.log\");\n};\n\nlog.errorHandler.startCatching();\n\n// Ensure dev secret exists\nconst devSecretPath = getDevSecretPath();\nif (!fs.existsSync(devSecretPath)) {\n  const secret = crypto.randomBytes(32).toString(\"base64\");\n  fs.writeFileSync(devSecretPath, secret);\n  log.info(\"Created dev secret file at:\", devSecretPath);\n}\n\n// Add early startup logging\nprocess.on(\"uncaughtException\", (error) => {\n  console.error(\"Uncaught Exception:\", error);\n  log.error(\"Uncaught Exception:\", error);\n  if (error.stack) {\n    log.error(\"Stack trace:\", error.stack);\n  }\n});\n\nprocess.on(\"unhandledRejection\", (error) => {\n  console.error(\"Unhandled Rejection:\", error);\n  log.error(\"Unhandled Rejection:\", error);\n  if (error instanceof Error && error.stack) {\n    log.error(\"Stack trace:\", error.stack);\n  }\n});\n\n// Log startup\nlog.info(\"Application starting...\");\nlog.info(\"Process arguments:\", process.argv);\nlog.info(\"Working directory:\", process.cwd());\nlog.info(\"Is Dev:\", isDev());\nlog.info(\"Executable path:\", process.execPath);\nlog.info(\"Resource path:\", process.resourcesPath);\n\n// Set app metadata before anything else\napp.setName(\"Notate\");\nif (isDev()) {\n  app.setPath(\"userData\", path.join(app.getPath(\"userData\"), \"development\"));\n}\n\n// Log paths\nlog.info(\"User Data Path:\", app.getPath(\"userData\"));\nlog.info(\"App Path:\", app.getAppPath());\n\n// Set app metadata\nconst iconPath = isDev()\n  ? path.resolve(process.cwd(), \"linux.png\")\n  : path.join(process.resourcesPath, \"build/icons/256x256.png\");\n\nlog.info(\"Icon Path:\", iconPath);\n\n// Create native image if the icon exists\nlet icon: Electron.NativeImage | undefined;\ntry {\n  if (fs.existsSync(iconPath)) {\n    icon = nativeImage.createFromPath(iconPath);\n    log.info(\"Icon loaded successfully\");\n  } else {\n    log.warn(\"Icon file not found at:\", iconPath);\n    // Try alternate paths\n    const altPaths = [\n      path.join(process.resourcesPath, \"linux.png\"),\n      path.join(app.getAppPath(), \"build/icons/256x256.png\"),\n      \"/usr/share/icons/hicolor/256x256/apps/notate.png\",\n    ];\n\n    for (const altPath of altPaths) {\n      if (fs.existsSync(altPath)) {\n        icon = nativeImage.createFromPath(altPath);\n        log.info(\"Icon loaded from alternate path:\", altPath);\n        break;\n      }\n    }\n  }\n} catch (error) {\n  log.error(\"Error loading icon:\", error);\n}\n\n// Set platform-specific icon\nif (process.platform === \"darwin\" && icon) {\n  app.dock.setIcon(icon);\n}\n\nconst getResourceDirectory = () => {\n  const resourceDir =\n    process.env.NODE_ENV === \"development\"\n      ? path.join(process.cwd())\n      : path.join(process.resourcesPath, \"app.asar.unpacked\");\n  return resourceDir;\n};\n\napp.on(\"ready\", async () => {\n  try {\n    log.info(\"App ready event triggered\");\n    const loadingWin = createLoadingWindow(icon);\n    log.info(\"Loading window created\");\n\n    // Make sure the window is ready before proceeding\n    await new Promise<void>((resolve) => {\n      if (!loadingWin) {\n        log.warn(\"Loading window not created\");\n        resolve();\n        return;\n      }\n\n      loadingWin.webContents.on(\"did-finish-load\", () => {\n        log.info(\"Loading window loaded\");\n        loadingWin.show();\n        resolve();\n      });\n\n      loadingWin.webContents.on(\n        \"did-fail-load\",\n        (event, errorCode, errorDescription) => {\n          log.error(\n            \"Loading window failed to load:\",\n            errorCode,\n            errorDescription\n          );\n          resolve();\n        }\n      );\n    });\n\n    // Add a small delay to ensure the window is visible\n    await new Promise((resolve) => setTimeout(resolve, 500));\n\n    try {\n      updateLoadingStatus(\"Starting Python server...\", 10);\n      log.info(\"Attempting to start Python server\");\n      await startPythonServer();\n      log.info(\"Python server started successfully\");\n    } catch (error) {\n      log.error(\"Failed to start Python server:\", error);\n      if (loadingWin && !loadingWin.isDestroyed()) {\n        updateLoadingStatus(`Failed to start Python server: ${error}`, 100);\n        await new Promise((resolve) => setTimeout(resolve, 3000));\n      }\n      throw error;\n    }\n\n    const mainWindow = createMainWindow(icon);\n    log.info(\"Main window created\");\n    db.init();\n    log.info(\"Database initialized\");\n\n    pollResource(mainWindow);\n    createTray(mainWindow);\n    createMenu(mainWindow);\n    setupIpcHandlers(mainWindow);\n    setupVttHandlers();\n    setupDbHandlers();\n    setupChatHandlers(mainWindow);\n    setupCollectionHandlers();\n    setupMenuHandlers(mainWindow);\n    setupOllamaHandlers();\n    setupAzureOpenAI();\n    setupFileHandlers();\n    setupCustomApiHandlers();\n    setupOpenRouterHandlers();\n    handleCloseEvents(mainWindow);\n    setupLocalModelHandlers();\n    await new Promise((resolve) => setTimeout(resolve, 1000));\n    mainWindow.show();\n\n    // Only close loading window if it still exists and isn't destroyed\n    if (loadingWin && !loadingWin.isDestroyed()) {\n      loadingWin.close();\n    }\n\n    app.setAboutPanelOptions({\n      applicationName: app.name,\n      applicationVersion: app.getVersion(),\n      iconPath: path.resolve(getResourceDirectory(), \"linux.png\"),\n    });\n  } catch (error) {\n    log.error(`Failed to start application: ${error}`);\n    console.error(\"Failed to start application:\", error);\n    if (error instanceof Error) {\n      log.error(\"Error stack:\", error.stack);\n    }\n    updateLoadingStatus(`Failed to start: ${error}`, 100, true);\n    await new Promise((resolve) => setTimeout(resolve, 10000)); // Give user time to see error\n    app.quit();\n  }\n});\n\napp.on(\"will-quit\", () => {\n  if (!isDev()) {\n    const tempPath = path.join(app.getPath(\"temp\"), \"notate-backend\");\n    fs.rmSync(tempPath, { recursive: true, force: true });\n  }\n});\n\napp.on(\"window-all-closed\", () => {\n  stopPythonServer();\n  if (process.platform !== \"darwin\") {\n    app.quit();\n  }\n});\n\napp.on(\"before-quit\", () => {\n  stopPythonServer();\n});\n"
  },
  {
    "path": "Frontend/src/electron/mainWindow.test.ts",
    "content": "import { test, expect, vi, Mock } from \"vitest\";\nimport { createMainWindow } from \"./mainWindow.js\";\nimport { BrowserWindow } from \"electron\";\nimport { closeLoadingWindow } from \"./loadingWindow.js\";\n\n// Mock process.env for development mode test\nvi.stubEnv(\"NODE_ENV\", \"development\");\n\n// Mock closeLoadingWindow\nvi.mock(\"./loadingWindow.js\", () => ({\n  closeLoadingWindow: vi.fn(),\n}));\n\n// Mock electron\nvi.mock(\"electron\", () => ({\n  BrowserWindow: vi.fn().mockImplementation(() => ({\n    loadURL: vi.fn(),\n    loadFile: vi.fn(),\n    on: vi.fn(),\n    once: vi.fn(),\n    center: vi.fn(),\n    webContents: {\n      on: vi.fn(),\n      session: {\n        setSpellCheckerLanguages: vi.fn(),\n      },\n    },\n    show: vi.fn(),\n    maximize: vi.fn(),\n  })),\n  app: {\n    getPath: vi.fn().mockReturnValue(\"/mock/path\"),\n    getAppPath: vi.fn().mockReturnValue(\"/mock/app/path\"),\n  },\n}));\n\ntest(\"createMainWindow creates window with correct configuration in dev mode\", () => {\n  const window = createMainWindow();\n  \n  // Verify BrowserWindow was called with correct config\n  expect(BrowserWindow).toHaveBeenCalledWith(\n    expect.objectContaining({\n      width: 800,\n      height: 600,\n      resizable: true,\n      frame: false,\n      show: false,\n      center: true,\n      webPreferences: expect.objectContaining({\n        spellcheck: true,\n        preload: expect.stringContaining(\"/dist-electron/preload.cjs\"),\n      }),\n    })\n  );\n  \n  // Verify window methods were called\n  expect(window.loadURL).toHaveBeenCalledWith(\"http://localhost:5131\");\n  \n  // Verify ready-to-show handler\n  const readyToShowHandler = (window.once as Mock).mock.calls.find(\n    call => call[0] === \"ready-to-show\"\n  )?.[1];\n  expect(readyToShowHandler).toBeDefined();\n  \n  // Execute ready-to-show handler\n  if (readyToShowHandler) {\n    readyToShowHandler();\n    expect(closeLoadingWindow).toHaveBeenCalled();\n    expect(window.show).toHaveBeenCalled();\n    expect(window.center).toHaveBeenCalled();\n  }\n});\n\ntest(\"createMainWindow creates window with correct configuration in production mode\", () => {\n  // Set NODE_ENV to production\n  vi.stubEnv(\"NODE_ENV\", \"production\");\n  \n  const window = createMainWindow();\n  \n  // Verify window methods for production mode\n  expect(window.loadFile).toHaveBeenCalled();\n  expect(window.loadURL).not.toHaveBeenCalled();\n  \n  // Reset NODE_ENV\n  vi.stubEnv(\"NODE_ENV\", \"development\");\n}); "
  },
  {
    "path": "Frontend/src/electron/mainWindow.ts",
    "content": "import { BrowserWindow, app } from \"electron\";\nimport { getPreloadPath, getUIPath } from \"./pathResolver.js\";\nimport { isDev } from \"./util.js\";\nimport path from \"path\";\nimport { closeLoadingWindow } from \"./loadingWindow.js\";\n\nexport function createMainWindow(icon?: Electron.NativeImage) {\n  // For Linux, ensure proper window class and icon handling\n  const options: Electron.BrowserWindowConstructorOptions = {\n    width: 800,\n    height: 800,\n    minWidth: 400,\n    minHeight: 300,\n    resizable: true,\n    frame: false,\n    show: false,\n    center: true,\n    roundedCorners: true,\n    webPreferences: {\n      spellcheck: true,\n      preload: getPreloadPath(),\n    },\n    // These properties are important for Linux integration\n    title: \"Notate\",\n    icon: icon || path.join(__dirname, \"../../src/assets/icon.png\"),\n  };\n\n  if (process.platform === \"linux\") {\n    // This helps with proper taskbar grouping and icon handling\n    app.name = \"Notate\";\n    // Ensure proper desktop integration\n    options.autoHideMenuBar = true;\n  }\n\n  const mainWindow = new BrowserWindow(options);\n\n  if (isDev()) {\n    mainWindow.loadURL(\"http://localhost:5131\");\n  } else {\n    mainWindow.loadFile(getUIPath());\n  }\n\n  mainWindow.once(\"ready-to-show\", () => {\n    closeLoadingWindow();\n    mainWindow.show();\n    mainWindow.center();\n  });\n\n  return mainWindow;\n}\n"
  },
  {
    "path": "Frontend/src/electron/menu.ts",
    "content": "import { BrowserWindow, Menu, MenuItem } from \"electron\";\nimport { app } from \"electron\";\nimport { ipcWebContentsSend, isDev } from \"./util.js\";\n\nexport function createMenu(mainWindow: BrowserWindow) {\n  const template: (Electron.MenuItemConstructorOptions | MenuItem)[] = [\n    {\n      label: \"File\",\n      submenu: [\n        {\n          label: \"Change User\",\n          click: () => {\n            mainWindow.webContents.send(\"resetUserState\");\n            setTimeout(() => {\n              mainWindow.webContents.send(\"changeView\", \"SelectAccount\");\n            }, 100);\n          },\n        },\n        {\n          label: \"Restart\",\n          click: () => {\n            app.relaunch();\n            app.exit(0);\n          },\n        },\n        {\n          label: \"DevTools\",\n          click: () => mainWindow.webContents.openDevTools(),\n        },\n        {\n          label: \"Quit\",\n          accelerator: \"CmdOrCtrl+Q\",\n          click: () => app.quit(),\n        },\n      ],\n    },\n    {\n      label: \"Edit\",\n      submenu: [\n        { label: \"Undo\", accelerator: \"CmdOrCtrl+Z\", role: \"undo\" },\n        { label: \"Redo\", accelerator: \"Shift+CmdOrCtrl+Z\", role: \"redo\" },\n        { type: \"separator\" },\n        { label: \"Cut\", accelerator: \"CmdOrCtrl+X\", role: \"cut\" },\n        { label: \"Copy\", accelerator: \"CmdOrCtrl+C\", role: \"copy\" },\n        { label: \"Paste\", accelerator: \"CmdOrCtrl+V\", role: \"paste\" },\n        { label: \"Delete\", role: \"delete\" },\n        { type: \"separator\" },\n        {\n          label: \"Select All\",\n          accelerator: \"CmdOrCtrl+A\",\n          role: \"selectAll\",\n        },\n      ],\n    },\n    {\n      label: \"View\",\n      submenu: [\n        {\n          label: \"Chat\",\n          click: () =>\n            ipcWebContentsSend(\"changeView\", mainWindow.webContents, \"Chat\"),\n        },\n        {\n          label: \"History\",\n          click: () =>\n            ipcWebContentsSend(\"changeView\", mainWindow.webContents, \"History\"),\n        },\n        {\n          label: \"File Explorer\",\n          click: () =>\n            ipcWebContentsSend(\n              \"changeView\",\n              mainWindow.webContents,\n              \"FileExplorer\"\n            ),\n        },\n        { type: \"separator\" },\n      ],\n    },\n  ];\n\n  if (isDev()) {\n    (template[2].submenu as Electron.MenuItemConstructorOptions[]).push(\n      { type: \"separator\" },\n      {\n        label: \"Temp DevTools\",\n        click: () => mainWindow.webContents.openDevTools(),\n      }\n    );\n  }\n\n  const menu = Menu.buildFromTemplate(template);\n  Menu.setApplicationMenu(menu);\n\n  // For Windows, we also set the menu on the window itself\n  if (process.platform === \"win32\") {\n    mainWindow.setMenu(menu);\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/ollama/checkOllama.ts",
    "content": "import { ExecException } from \"child_process\";\nimport { exec } from \"child_process\";\nimport { platform } from \"os\";\nimport log from \"electron-log\";\n\nexport async function checkOllama(): Promise<boolean> {\n  log.info(\"Checking if Ollama is installed...\");\n  return new Promise((resolve) => {\n    try {\n      // Try common Ollama installation paths based on platform\n      const ollamaPath =\n        platform() === \"darwin\" ? \"/usr/local/bin/ollama\" : \"ollama\"; // fallback to PATH lookup\n\n      exec(`${ollamaPath} ps`, (error: ExecException | null) => {\n        if (error) {\n          log.info(`Ollama installation check failed: ${error?.message}`);\n          // If the direct path fails on macOS, try PATH lookup as fallback\n          if (platform() === \"darwin\") {\n            log.info(\"Trying PATH lookup for Ollama...\");\n            exec(\"ollama ps\", (fallbackError: ExecException | null) => {\n              resolve(!fallbackError);\n            });\n            return;\n          }\n          resolve(false);\n          return;\n        }\n        resolve(true);\n      });\n    } catch {\n      // Catch any unexpected errors and resolve false\n      resolve(false);\n    }\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/ollama/fetchLocalModels.ts",
    "content": "import { ExecException, exec } from \"child_process\";\nimport { platform } from \"os\";\nimport log from \"electron-log\";\n\nconst getOllamaPath = () => platform() === \"darwin\" ? \"/usr/local/bin/ollama\" : \"ollama\";\n\nexport async function fetchOllamaModels(): Promise<string[]> {\n  log.info(\"Fetching Ollama models...\");\n  try {\n    return new Promise((resolve) => {\n      exec(`${getOllamaPath()} list`, (error: ExecException | null, stdout: string) => {\n        if (error) {\n          log.info(`Error executing ollama list: ${error?.message}`);\n          // If the direct path fails on macOS, try PATH lookup as fallback\n          if (platform() === \"darwin\") {\n            exec(\"ollama list\", (fallbackError: ExecException | null, fallbackStdout: string) => {\n              if (fallbackError) {\n                log.info(\"Error executing ollama list:\", fallbackError);\n                resolve([]);\n                return;\n              }\n              const models = fallbackStdout\n                .split(\"\\n\")\n                .slice(1)\n                .filter((line) => line.trim())\n                .map((line) => line.split(/\\s+/)[0]);\n              log.info(`Fetched models: ${models}`);\n              resolve(models);\n            });\n            return;\n          }\n          log.info(\"Error executing ollama list:\", error);\n          resolve([]);\n          return;\n        }\n\n        const models = stdout\n          .split(\"\\n\")\n          .slice(1)\n          .filter((line) => line.trim())\n          .map((line) => line.split(/\\s+/)[0]);\n\n        resolve(models);\n      });\n    });\n  } catch (error) {\n    console.error(\"Failed to fetch Ollama models:\", error);\n    return [];\n  }\n}"
  },
  {
    "path": "Frontend/src/electron/ollama/getRunningModels.ts",
    "content": "import { spawn } from \"child_process\";\nimport { getOllamaPath } from \"./ollamaPath.js\";\n\nexport async function getRunningModels(): Promise<string[]> {\n  try {\n    return new Promise((resolve) => {\n      const ps = spawn(getOllamaPath(), [\"ps\"], {\n        stdio: [\"ignore\", \"pipe\", \"pipe\"],\n      });\n\n      let output = \"\";\n      ps.stdout.on(\"data\", (data) => {\n        output += data.toString();\n      });\n\n      ps.on(\"close\", () => {\n        // Parse the output to get model names\n        const lines = output.split(\"\\n\").slice(1); // Skip header line\n        const models = lines\n          .map((line) => line.trim())\n          .filter((line) => line) // Remove empty lines\n          .map((line) => line.split(/\\s+/)[0]); // Get first column (NAME)\n        resolve(models);\n      });\n    });\n  } catch (error) {\n    console.error(\"Error getting running models:\", error);\n    return [];\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/ollama/isOllamaRunning.ts",
    "content": "import log from \"electron-log\";\nimport { spawn } from \"child_process\";\n\nexport async function isOllamaServerRunning(): Promise<boolean> {\n  try {\n    return new Promise((resolve) => {\n        const check = spawn(\"curl\", [\"http://localhost:11434/api/version\"], {\n          stdio: [\"ignore\", \"ignore\", \"ignore\"],\n        });\n  \n        check.on(\"close\", (code) => {\n          resolve(code === 0);\n        });\n      });\n    } catch (error) {\n      log.error(\"Error checking if Ollama server is running:\", error);\n      return false;\n    }\n  }"
  },
  {
    "path": "Frontend/src/electron/ollama/ollamaPath.ts",
    "content": "import { platform } from \"os\";\n\nexport const getOllamaPath = () =>\n  platform() === \"darwin\" ? \"/usr/local/bin/ollama\" : \"ollama\";\n"
  },
  {
    "path": "Frontend/src/electron/ollama/pullModel.ts",
    "content": "import log from \"electron-log\";\nimport { spawn } from \"child_process\";\n\nexport async function pullModel(model: string): Promise<void> {\n  log.info(`Pulling model ${model}...`);\n  try {\n    return new Promise((resolve, reject) => {\n      const pull = spawn(\n        \"curl\",\n        [\n          \"-X\",\n          \"POST\",\n          \"http://localhost:11434/api/pull\",\n          \"-d\",\n          `{\"name\": \"${model}\"}`,\n        ],\n        {\n          stdio: [\"ignore\", \"pipe\", \"pipe\"],\n        }\n      );\n\n      pull.stdout.on(\"data\", (data) => {\n        const output = data.toString();\n        log.info(`Pull output: ${output}`);\n        // Emit progress event\n        if (global.mainWindow) {\n          global.mainWindow.webContents.send(\"ollama-progress\", {\n            type: \"pull\",\n            output: output,\n          });\n        }\n      });\n\n      pull.stderr.on(\"data\", (data) => {\n        const error = data.toString();\n        log.info(`Pull progress: ${error}`);\n        // Emit progress event for stderr as well\n        if (global.mainWindow) {\n          global.mainWindow.webContents.send(\"ollama-progress\", {\n            type: \"pull\",\n            output: error,\n          });\n        }\n      });\n\n      pull.on(\"error\", (error) => {\n        log.error(`Pull error: ${error.message}`);\n        reject(error);\n      });\n\n      pull.on(\"close\", (code) => {\n        if (code === 0) {\n          log.info(\"Model pull completed successfully\");\n          resolve();\n        } else {\n          log.error(`Pull failed with code ${code}`);\n          reject(\n            new Error(`Failed to pull model ${model} (exit code ${code})`)\n          );\n        }\n      });\n    });\n  } catch (error) {\n    log.error(\"Error pulling model:\", error);\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/ollama/runOllama.ts",
    "content": "import { spawn } from \"child_process\";\nimport { ChildProcess } from \"child_process\";\nimport { BrowserWindow } from \"electron\";\nimport log from \"electron-log\";\nimport { pullModel } from \"./pullModel.js\";\nimport { isOllamaServerRunning } from \"./isOllamaRunning.js\";\nimport { unloadAllModels } from \"./unloadAllModels.js\";\nimport { getOllamaPath } from \"./ollamaPath.js\";\n\ndeclare global {\n  // eslint-disable-next-line no-var\n  var mainWindow: BrowserWindow | null;\n}\n\nasync function startOllamaServer(): Promise<void> {\n  try {\n    log.info(\"Starting Ollama server...\");\n    const server = spawn(getOllamaPath(), [\"serve\"], {\n      detached: true,\n      stdio: [\"ignore\", \"pipe\", \"pipe\"],\n    });\n\n    return new Promise((resolve) => {\n      // Wait for server to start\n      let output = \"\";\n      server.stdout?.on(\"data\", (data) => {\n        output += data.toString();\n        if (output.includes(\"Starting Ollama\")) {\n          log.info(\"Ollama server started successfully\");\n          resolve();\n        }\n      });\n\n      server.stderr?.on(\"data\", (data) => {\n        const error = data.toString();\n        log.error(\"Server error:\", error);\n        if (error.includes(\"address already in use\")) {\n          resolve(); // Server is already running\n        }\n      });\n\n      // Give it some time to start\n      setTimeout(() => {\n        resolve();\n      }, 2000);\n    });\n  } catch (error) {\n    log.error(\"Error starting Ollama server:\", error);\n    throw error;\n  }\n}\n\nasync function createOllamaProcess(\n  model: string\n): Promise<{ process: ChildProcess; verified: boolean }> {\n  try {\n    // First, verify the model is responsive via API\n    const verify = spawn(\n      \"curl\",\n      [\n        \"-X\",\n        \"POST\",\n        \"-s\", // Silent mode\n        \"http://localhost:11434/api/embeddings\",\n        \"-d\",\n        `{\"model\": \"${model}\", \"prompt\": \"test\"}`,\n      ],\n      {\n        stdio: [\"ignore\", \"pipe\", \"pipe\"],\n      }\n    );\n\n    let verifyOutput = \"\";\n    await new Promise((resolve) => {\n      verify.stdout.on(\"data\", (data) => {\n        verifyOutput += data.toString();\n        if (global.mainWindow) {\n          global.mainWindow.webContents.send(\"ollama-progress\", {\n            type: \"verify\",\n            output: data.toString(),\n          });\n        }\n      });\n\n      verify.stderr.on(\"data\", (data) => {\n        // Emit progress event for stderr\n        if (global.mainWindow) {\n          global.mainWindow.webContents.send(\"ollama-progress\", {\n            type: \"verify\",\n            output: data.toString(),\n          });\n        }\n      });\n\n      verify.on(\"close\", (code) => {\n        if (code === 0 && verifyOutput) {\n          console.log(\"Model verified via API\");\n          resolve(null);\n        } else {\n          console.log(\"Model verification failed, will try direct process\");\n          resolve(null);\n        }\n      });\n    });\n\n    // Now start the actual process\n    console.log(\"Starting Ollama process...\");\n    const ollamaProcess = spawn(getOllamaPath(), [\"run\", model], {\n      stdio: [\"pipe\", \"pipe\", \"pipe\"],\n      detached: false,\n    });\n\n    (ollamaProcess.stdin as NodeJS.WriteStream).setEncoding(\"utf-8\");\n    return {\n      process: ollamaProcess,\n      verified: verifyOutput.includes(\"embedding\"),\n    };\n  } catch (error) {\n    console.error(\"Error creating Ollama process:\", error);\n    throw error;\n  }\n}\n\nexport async function runOllama({\n  model,\n}: {\n  model: string;\n}): Promise<ChildProcess> {\n  try {\n    console.log(`Using model: ${model}`);\n\n    // Ensure Ollama server is running\n    const serverRunning = await isOllamaServerRunning();\n    if (!serverRunning) {\n      console.log(\"Ollama server not running, starting it...\");\n      await startOllamaServer();\n      // Wait a bit for the server to be ready\n      await new Promise((resolve) => setTimeout(resolve, 2000));\n    }\n\n    // Unload all running models first\n    try {\n      await unloadAllModels();\n    } catch (error) {\n      console.log(\"Error unloading models (this is not critical):\", error);\n    }\n\n    try {\n      // Always try to pull the model first\n      await pullModel(model);\n    } catch (error) {\n      console.error(\"Error pulling model:\", error);\n      throw error;\n    }\n\n    let ollamaProcess: ChildProcess;\n    let isVerified = false;\n    try {\n      console.log(\"Creating Ollama process...\");\n      const result = await createOllamaProcess(model);\n      ollamaProcess = result.process;\n      isVerified = result.verified;\n    } catch (error) {\n      console.error(\"Error creating Ollama process:\", error);\n      throw error;\n    }\n\n    return new Promise((resolve, reject) => {\n      if (\n        !ollamaProcess.stdout ||\n        !ollamaProcess.stderr ||\n        !ollamaProcess.stdin\n      ) {\n        reject(new Error(\"Failed to create process streams\"));\n        return;\n      }\n\n      // For embedding models, we consider them loaded once the process starts\n      if (model.includes(\"embedding\")) {\n        console.log(\"Embedding model detected, considering it loaded\");\n        resolve(ollamaProcess);\n        return;\n      }\n\n      // If we got this far and the model was verified via API, resolve immediately\n      if (isVerified) {\n        console.log(\"Model already verified via API, resolving immediately\");\n        resolve(ollamaProcess);\n        return;\n      }\n\n      let isModelLoaded = false;\n      let startupOutput = \"\";\n\n      ollamaProcess.stdout.on(\"data\", (data) => {\n        const output = data.toString();\n        startupOutput += output;\n        console.log(`Ollama output: ${output}`);\n\n        if (!isModelLoaded) {\n          isModelLoaded = true;\n          console.log(\"Model loaded successfully\");\n          resolve(ollamaProcess);\n        }\n      });\n\n      ollamaProcess.stderr.on(\"data\", (data) => {\n        const error = data.toString();\n        startupOutput += error;\n        console.error(`Ollama error: ${error}`);\n\n        // Also consider loading animation as success\n        if (\n          !isModelLoaded &&\n          (error.includes(\"⠋\") ||\n            error.includes(\"⠙\") ||\n            error.includes(\"⠹\") ||\n            error.includes(\"⠸\"))\n        ) {\n          isModelLoaded = true;\n          console.log(\n            \"Model loaded successfully (detected from loading animation)\"\n          );\n          resolve(ollamaProcess);\n        }\n      });\n\n      ollamaProcess.on(\"error\", (error) => {\n        console.error(`Failed to start Ollama: ${error.message}`);\n        reject(error);\n      });\n\n      ollamaProcess.on(\"exit\", (code) => {\n        if (!isModelLoaded) {\n          console.error(`Process exit with startup output: ${startupOutput}`);\n          reject(\n            new Error(\n              `Ollama process exited with code ${code}. Full output: ${startupOutput}`\n            )\n          );\n        }\n      });\n\n      // Add a timeout to prevent hanging\n      setTimeout(() => {\n        if (!isModelLoaded) {\n          try {\n            ollamaProcess.kill();\n          } catch (error) {\n            console.error(\"Error killing process:\", error);\n          }\n          reject(\n            new Error(\n              `Timeout waiting for Ollama model ${model} to load. Output: ${startupOutput}`\n            )\n          );\n        }\n      }, 30000); // 30 second timeout\n    });\n  } catch (error) {\n    console.error(\"Error in runOllama:\", error);\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/ollama/unloadAllModels.ts",
    "content": "import { getRunningModels } from \"./getRunningModels.js\"\nimport { unloadModel } from \"./unloadModel.js\";\n\nexport async function unloadAllModels(): Promise<void> {\n  try {\n    const runningModels = await getRunningModels();\n      console.log(\"Currently running models:\", runningModels);\n  \n      for (const model of runningModels) {\n        try {\n          await unloadModel(model);\n        } catch (error) {\n          console.log(`Error unloading model ${model}:`, error);\n        }\n      }\n    } catch (error) {\n      console.error(\"Error unloading all models:\", error);\n    }\n  }"
  },
  {
    "path": "Frontend/src/electron/ollama/unloadModel.ts",
    "content": "import { spawn } from \"child_process\";\n\nexport async function unloadModel(model: string): Promise<void> {\n  try {\n    console.log(`Unloading model ${model}...`);\n    return new Promise((resolve, reject) => {\n      const unload = spawn(\n        \"curl\",\n        [\n          \"-X\",\n          \"POST\",\n          \"http://localhost:11434/api/generate\",\n          \"-d\",\n          `{\"model\": \"${model}\", \"keep_alive\": 0}`,\n        ],\n        {\n          stdio: [\"ignore\", \"pipe\", \"pipe\"],\n        }\n      );\n\n      unload.stdout.on(\"data\", (data) => {\n        const output = data.toString();\n        console.log(`Unload output: ${output}`);\n      });\n\n      unload.stderr.on(\"data\", (data) => {\n        const error = data.toString();\n        console.log(`Unload error: ${error}`);\n      });\n\n      unload.on(\"error\", (error) => {\n        console.error(`Unload error: ${error.message}`);\n        reject(error);\n      });\n\n      unload.on(\"close\", (code) => {\n        if (code === 0) {\n          console.log(\"Model unloaded successfully\");\n          resolve();\n        } else {\n          console.error(`Unload failed with code ${code}`);\n          reject(\n            new Error(`Failed to unload model ${model} (exit code ${code})`)\n          );\n        }\n      });\n    });\n  } catch (error) {\n    console.error(\"Error unloading model:\", error);\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/pathResolver.ts",
    "content": "import { isDev } from \"./util.js\";\nimport path from \"path\";\nimport { app } from \"electron\";\n\nexport function getPreloadPath() {\n  if (isDev()) {\n    return path.join(app.getAppPath(), \"./dist-electron/preload.cjs\");\n  } else {\n    return path.join(app.getAppPath(), \"../dist-electron/preload.cjs\");\n  }\n}\n\nexport function getUIPath() {\n  return path.join(app.getAppPath(), \"/dist-react/index.html\");\n}\n\nexport function getAssetsPath() {\n  return path.join(app.getAppPath(), isDev() ? \"./src/assets\" : \"../assets\");\n}\n"
  },
  {
    "path": "Frontend/src/electron/preload.cts",
    "content": "const electron = require(\"electron\");\n\n// Set higher max listeners limit for IPC events\nelectron.ipcRenderer.setMaxListeners(20);\n\ntype IpcCallback<T> = (event: Electron.IpcRendererEvent, payload: T) => void;\n\nelectron.contextBridge.exposeInMainWorld(\"electron\", {\n  subscribeStatistics: (callback) =>\n    ipcOn(\"statistics\", (_, stats) => {\n      callback(stats);\n    }),\n  getStaticData: () => ipcInvoke(\"getStaticData\"),\n  subscribeChangeView: (callback) =>\n    ipcOn(\"changeView\", (_, view) => {\n      callback(view);\n    }),\n  subscribeResetUserState: (callback) =>\n    ipcOn(\"resetUserState\", () => {\n      callback();\n    }),\n  pullModel: async (model: string) => {\n    await ipcInvoke(\"pullModel\", { model });\n  },\n  openDirectory: () => ipcInvoke(\"openDirectory\"),\n  deleteCollection: (\n    collectionId: number,\n    collectionName: string,\n    userId: number\n  ) =>\n    ipcInvoke(\"deleteCollection\", {\n      userId,\n      id: collectionId,\n      collectionName,\n    }).then((response) => ({\n      collectionId: response.id,\n      collectionName: response.collectionName,\n      userId: response.userId,\n    })),\n  openDevTools: () => ipcSend(\"openDevTools\"),\n  sendFrameAction: (payload) => ipcSend(\"frameWindowAction\", payload),\n  resizeWindow: (width, height) => ipcSend(\"resizeWindow\", { width, height }),\n  setApiKey: (apiKey) => ipcInvoke(\"setApiKey\", { success: true, apiKey }),\n  runOllama: (model: string, user: User) =>\n    ipcInvoke(\"runOllama\", {\n      model,\n      user,\n    }) as unknown as Promise<{\n      success: boolean;\n      error?: string;\n    }>,\n  chatRequest: (\n    messages,\n    activeUser,\n    conversationId,\n    collectionId,\n    title,\n    requestId\n  ) =>\n    ipcInvoke(\"chatRequest\", {\n      messages,\n      activeUser,\n      conversationId,\n      collectionId,\n      title,\n      requestId: requestId || Date.now(),\n    }) as unknown as Promise<{\n      messages: Message[];\n      id: bigint | number;\n      title: string;\n    }>,\n  cancelWebcrawl: (userId: number) =>\n    ipcInvoke(\"cancelWebcrawl\", { userId }) as unknown as Promise<{\n      userId: number;\n      result: boolean;\n    }>,\n  abortChatRequest: (requestId: number) =>\n    ipcSend(\"abortChatRequest\", requestId),\n  onMessageChunk: (callback) =>\n    ipcOn(\"messageChunk\", (_, chunk) => callback(chunk)),\n  offMessageChunk: (callback) =>\n    ipcOff(\"messageChunk\", (_, chunk) => callback(chunk)),\n  onStreamEnd: (callback) => ipcOn(\"streamEnd\", () => callback()),\n  offStreamEnd: (callback) => ipcOff(\"streamEnd\", () => callback()),\n  getUsers: () => ipcInvoke(\"getUsers\"),\n  addUser: (name: string) =>\n    ipcInvoke(\"addUser\", { name }) as Promise<{\n      name: string;\n      error?: string;\n    }>,\n  updateUserSettings: (userSettings: UserSettings) =>\n    ipcInvoke(\n      \"updateUserSettings\",\n      userSettings\n    ) as unknown as Promise<UserSettings>,\n  getUserSettings: (userId: number) =>\n    ipcInvoke(\"getUserSettings\", {\n      userId,\n    }) as unknown as Promise<UserSettings>,\n  getUserPrompts: (userId: number) =>\n    ipcInvoke(\"getUserPrompts\", { userId }) as unknown as Promise<{\n      prompts: UserPrompts[];\n    }>,\n  getConversationMessages: (userId: number, conversationId: number) =>\n    ipcInvoke(\"getConversationMessages\", {\n      userId,\n      conversationId,\n    }) as unknown as Promise<{ messages: Message[] }>,\n  getUserConversations: (userId: number) =>\n    ipcInvoke(\"getUserConversations\", { userId }) as unknown as Promise<{\n      conversations: Conversation[];\n    }>,\n  addUserPrompt: (userId: number, name: string, prompt: string) =>\n    ipcInvoke(\"addUserPrompt\", {\n      userId,\n      name,\n      prompt,\n    }) as Promise<UserPrompts>,\n  updateUserPrompt: (\n    userId: number,\n    id: number,\n    name: string,\n    prompt: string\n  ) => ipcInvoke(\"updateUserPrompt\", { userId, id, name, prompt }),\n  addAPIKey: (userId: number, key: string, provider: string) =>\n    ipcInvoke(\"addAPIKey\", { userId, key, provider }),\n  youtubeIngest: (\n    url: string,\n    userId: number,\n    userName: string,\n    collectionId: number,\n    collectionName: string\n  ) =>\n    ipcInvoke(\"youtubeIngest\", {\n      url,\n      userId,\n      userName,\n      collectionId,\n      collectionName,\n    }),\n  createCollection: (\n    userId: number,\n    name: string,\n    description: string,\n    type: string,\n    isLocal: boolean,\n    localEmbeddingModel: string\n  ) =>\n    ipcInvoke(\"createCollection\", {\n      userId,\n      name,\n      description,\n      type,\n      isLocal,\n      localEmbeddingModel,\n    }) as unknown as Promise<{\n      id: number;\n      name: string;\n      description: string;\n      type: string;\n    }>,\n  getDirModels: (dirPath: string) =>\n    ipcInvoke(\"getDirModels\", { dirPath }) as unknown as Promise<{\n      dirPath: string;\n      models: Model[];\n    }>,\n  getOpenRouterModel: (userId: number) =>\n    ipcInvoke(\"getOpenRouterModel\", { userId }) as unknown as Promise<{\n      model: string;\n    }>,\n  downloadModel: (payload: {\n    modelId: string;\n    dirPath: string;\n    hfToken?: string;\n  }) => ipcInvoke(\"downloadModel\", payload) as unknown as Promise<void>,\n  cancelDownload: () =>\n    ipcInvoke(\"cancelDownload\") as unknown as Promise<{ success: boolean }>,\n  addOpenRouterModel: (userId: number, model: string) =>\n    ipcInvoke(\"addOpenRouterModel\", {\n      userId,\n      model,\n    }) as unknown as Promise<void>,\n  deleteOpenRouterModel: (userId: number, id: number) =>\n    ipcInvoke(\"deleteOpenRouterModel\", {\n      userId,\n      id,\n    }) as unknown as Promise<void>,\n  getOpenRouterModels: (userId: number) =>\n    ipcInvoke(\"getOpenRouterModels\", { userId }) as unknown as Promise<{\n      models: string[];\n    }>,\n  webcrawl: async (payload: {\n    base_url: string;\n    user_id: number;\n    user_name: string;\n    collection_id: number;\n    collection_name: string;\n    max_workers: number;\n  }) => {\n    try {\n      const result = await ipcInvoke(\"webcrawl\", payload);\n      return {\n        ...result,\n        status: \"success\",\n      } as {\n        base_url: string;\n        user_id: number;\n        user_name: string;\n        collection_id: number;\n        collection_name: string;\n        max_workers: number;\n        status: string;\n      };\n    } catch (error) {\n      console.error(\"Error in webcrawl:\", error);\n      throw error;\n    }\n  },\n  loadModel: (payload: {\n    model_location: string;\n    model_name: string;\n    model_type?: string;\n    user_id: number;\n  }) => ipcInvoke(\"loadModel\", payload) as unknown as Promise<void>,\n  fetchOllamaModels: () =>\n    ipcInvoke(\"fetchOllamaModels\") as unknown as Promise<{\n      models: OllamaModel[];\n    }>,\n  changeUser: () => ipcInvoke(\"changeUser\"),\n  quit: () => ipcInvoke(\"quit\"),\n  undo: () => ipcInvoke(\"undo\"),\n  redo: () => ipcInvoke(\"redo\"),\n  cut: () => ipcInvoke(\"cut\"),\n  copy: () => ipcInvoke(\"copy\"),\n  paste: () => ipcInvoke(\"paste\"),\n  delete: () => ipcInvoke(\"delete\"),\n  selectAll: () => ipcInvoke(\"selectAll\"),\n  print: () => ipcInvoke(\"print\"),\n  chat: () => ipcInvoke(\"chat\"),\n  history: () => ipcInvoke(\"history\"),\n  getUserCollections: (userId: number) =>\n    ipcInvoke(\"getUserCollections\", { userId }) as unknown as Promise<{\n      collections: Collection[];\n    }>,\n  vectorstoreQuery: (\n    userId: number,\n    userName: string,\n    collectionId: number,\n    collectionName: string,\n    query: string,\n    conversationId: number\n  ) =>\n    ipcInvoke(\"vectorstoreQuery\", {\n      userId,\n      userName,\n      collectionId,\n      collectionName,\n      query,\n      conversationId,\n    }) as unknown as Promise<{\n      results: {\n        content: string;\n        source: string;\n      }[];\n      status: string;\n      conversationId: number;\n    }>,\n  checkIfFFMPEGInstalled: () =>\n    ipcInvoke(\"checkIfFFMPEGInstalled\") as unknown as Promise<{\n      success: boolean;\n      message: boolean;\n    }>,\n  deleteConversation: (userId: number, conversationId: number) =>\n    ipcInvoke(\"deleteConversation\", { userId, conversationId }) as Promise<{\n      userId: number;\n      conversationId: number;\n    }>,\n  addFileToCollection: (\n    userId: number,\n    userName: string,\n    collectionId: number,\n    collectionName: string,\n    fileName: string,\n    fileContent: string\n  ) =>\n    ipcInvoke(\"addFileToCollection\", {\n      userId,\n      userName,\n      collectionId,\n      collectionName,\n      fileName,\n      fileContent,\n    }) as unknown as Promise<{\n      result: {\n        success: boolean;\n      };\n    }>,\n  getFilesInCollection: (userId: number, collectionId: number) =>\n    ipcInvoke(\"getFilesInCollection\", {\n      userId,\n      collectionId,\n    }) as unknown as Promise<{\n      files: string[];\n    }>,\n  getUserApiKeys: (userId: number) =>\n    ipcInvoke(\"getUserApiKeys\", { userId }) as unknown as Promise<{\n      apiKeys: ApiKey[];\n    }>,\n  addUserConversation: (userId: number, input: string) =>\n    ipcInvoke(\"addUserConversation\", { userId, input }) as Promise<{\n      userId: number;\n      input: string;\n      id: number;\n      title: string;\n    }>,\n  openCollectionFolder: (filepath: string) =>\n    ipcInvoke(\"openCollectionFolder\", { filepath }),\n  getConversationMessagesWithData: (userId: number, conversationId: number) =>\n    ipcInvoke(\"getConversationMessagesWithData\", {\n      userId,\n      conversationId,\n    }) as unknown as Promise<{ messages: Message[] }>,\n  getPlatform: () =>\n    ipcInvoke(\"getPlatform\") as unknown as Promise<{\n      platform: \"win32\" | \"darwin\" | \"linux\";\n    }>,\n  keyValidation: ({\n    apiKey,\n    inputProvider,\n  }: {\n    apiKey: string;\n    inputProvider: string;\n  }) =>\n    ipcInvoke(\"keyValidation\", { apiKey, inputProvider }) as Promise<{\n      error?: string;\n      success?: boolean;\n    }>,\n  on: (\n    channel: \"ingest-progress\" | \"ollama-progress\" | \"download-model-progress\",\n    func: (event: Electron.IpcRendererEvent, message: any) => void\n  ) => electron.ipcRenderer.on(channel, func),\n  removeListener: (\n    channel: \"ingest-progress\" | \"ollama-progress\" | \"download-model-progress\",\n    func: (event: Electron.IpcRendererEvent, message: any) => void\n  ) => electron.ipcRenderer.removeListener(channel, func),\n  cancelEmbed: (payload: { userId: number }) =>\n    ipcInvoke(\"cancelEmbed\", payload) as unknown as Promise<void>,\n  systemSpecs: () =>\n    ipcInvoke(\"systemSpecs\") as unknown as Promise<{\n      cpu: string;\n      vram: string;\n      GPU_Manufacturer?: string;\n    }>,\n  checkOllama: () =>\n    ipcInvoke(\"checkOllama\") as unknown as Promise<{\n      isOllamaRunning: boolean;\n    }>,\n  getEmbeddingsModels: () =>\n    ipcInvoke(\"getEmbeddingsModels\") as unknown as Promise<{\n      models: Model[];\n    }>,\n  getCustomAPI: (userId: number) =>\n    ipcInvoke(\"getCustomAPI\", { userId }) as unknown as Promise<{\n      api: {\n        id: number;\n        user_id: number;\n        name: string;\n        endpoint: string;\n        api_key: string;\n        model: string;\n      }[];\n    }>,\n  addCustomAPI: (\n    userId: number,\n    name: string,\n    endpoint: string,\n    api_key: string,\n    model: string\n  ) =>\n    ipcInvoke(\"addCustomAPI\", {\n      userId,\n      name,\n      endpoint,\n      api_key,\n      model,\n    }) as unknown as Promise<{ id: number }>,\n  deleteCustomAPI: (userId: number, id: number) =>\n    ipcInvoke(\"deleteCustomAPI\", { userId, id }) as unknown as Promise<void>,\n  websiteFetch: (\n    url: string,\n    userId: number,\n    userName: string,\n    collectionId: number,\n    collectionName: string\n  ) =>\n    ipcInvoke(\"websiteFetch\", {\n      url,\n      userId,\n      userName,\n      collectionId,\n      collectionName,\n    }) as unknown as Promise<{\n      success: boolean;\n      content?: string;\n      textContent?: string;\n      metadata?: {\n        title: string;\n        description: string;\n        author: string;\n        keywords: string;\n        ogImage: string;\n      };\n    }>,\n  transcribeAudio: (audioData: ArrayBuffer, userId: number) =>\n    ipcInvoke(\"transcribeAudio\", {\n      audioData: Buffer.from(audioData),\n      userId: userId,\n    }) as unknown as Promise<{\n      success: boolean;\n      filepath?: string;\n      error?: string;\n    }>,\n  onIngestProgress: (\n    callback: (event: Electron.IpcRendererEvent, message: any) => void\n  ) => electron.ipcRenderer.on(\"ingest-progress\", callback),\n  addDevAPIKey: (userId: number, name: string, expiration: string | null) =>\n    ipcInvoke(\"addDevAPIKey\", {\n      userId,\n      name,\n      expiration,\n    }) as unknown as Promise<Keys>,\n  getDevAPIKeys: (userId: number) =>\n    ipcInvoke(\"getDevAPIKeys\", { userId }) as unknown as Promise<{\n      keys: Keys[];\n    }>,\n  deleteDevAPIKey: (userId: number, id: number) =>\n    ipcInvoke(\"deleteDevAPIKey\", { userId, id }) as unknown as Promise<{\n      userId: number;\n      id: number;\n      result: boolean;\n    }>,\n  getUserCollectionFiles: (userId: number, userName: string) =>\n    ipcInvoke(\"getUserCollectionFiles\", {\n      userId,\n      userName,\n    }) as unknown as Promise<{\n      files: string[];\n    }>,\n  removeFileorFolder: (userId: number, userName: string, file: string) =>\n    ipcInvoke(\"removeFileorFolder\", { userId, userName, file }).then(\n      (result) => ({\n        ...result,\n        success: true,\n      })\n    ) as unknown as Promise<{\n      userId: number;\n      userName: string;\n      file: string;\n      success: boolean;\n    }>,\n  renameFile: (\n    userId: number,\n    userName: string,\n    file: string,\n    newName: string\n  ) =>\n    ipcInvoke(\"renameFile\", {\n      userId,\n      userName,\n      file,\n      newName,\n      success: true,\n    }) as Promise<{\n      userId: number;\n      userName: string;\n      file: string;\n      newName: string;\n      success: boolean;\n    }>,\n  openCollectionFolderFromFileExplorer: (filepath: string) =>\n    ipcInvoke(\"openCollectionFolderFromFileExplorer\", { filepath }) as Promise<{\n      filepath: string;\n    }>,\n  getModelInfo: (payload: {\n    model_location: string;\n    model_name: string;\n    model_type?: string;\n    user_id: number;\n  }) =>\n    ipcInvoke(\"getModelInfo\", payload) as unknown as Promise<{\n      model_info: Model;\n    }>,\n  unloadModel: (payload: {\n    model_location: string;\n    model_name: string;\n    model_type?: string;\n    user_id: number;\n  }) => ipcInvoke(\"unloadModel\", payload) as unknown as Promise<void>,\n  deleteAzureOpenAIModel: (userId: number, id: number) =>\n    ipcInvoke(\"deleteAzureOpenAIModel\", { userId, id }) as unknown as Promise<{\n      userId: number;\n      id: number;\n      success: boolean;\n    }>,\n  getAzureOpenAIModels: (userId: number) =>\n    ipcInvoke(\"getAzureOpenAIModels\", { userId }) as unknown as Promise<{\n      models: {\n        id: number;\n        name: string;\n        model: string;\n        endpoint: string;\n        api_key: string;\n      }[];\n    }>,\n  getCustomAPIs: (userId: number) =>\n    ipcInvoke(\"getCustomAPIs\", { userId }) as unknown as Promise<{\n      api: {\n        id: number;\n        user_id: number;\n        name: string;\n        endpoint: string;\n        api_key: string;\n      }[];\n    }>,\n  getAzureOpenAIModel: (userId: number, id: number) =>\n    ipcInvoke(\"getAzureOpenAIModel\", { userId, id }) as unknown as Promise<{\n      id: number;\n      name: string;\n      model: string;\n      endpoint: string;\n      api_key: string;\n    }>,\n  addAzureOpenAIModel: (\n    userId: number,\n    name: string,\n    model: string,\n    endpoint: string,\n    api_key: string\n  ) =>\n    ipcInvoke(\"addAzureOpenAIModel\", {\n      userId,\n      name,\n      model,\n      endpoint,\n      api_key,\n    }) as unknown as Promise<{\n      id: number;\n    }>,\n  getCustomModels: (userId: number) =>\n    ipcInvoke(\"getCustomModels\", { userId }) as unknown as Promise<{\n      models: {\n        id: number;\n        user_id: number;\n        name: string;\n        endpoint: string;\n        api_key: string;\n        model: string;\n      }[];\n    }>,\n  getModelsPath: () => ipcInvoke(\"getModelsPath\") as unknown as Promise<string>,\n  getUserTools: (userId: number) =>\n    ipcInvoke(\"getUserTools\", { userId }) as unknown as Promise<{\n      tools: {\n        id: number;\n        name: string;\n        description: string;\n        enabled: number;\n        docked: number;\n      }[];\n    }>,\n  addUserTool: (\n    userId: number,\n    toolId: number,\n    enabled: number,\n    docked: number\n  ) =>\n    ipcInvoke(\"addUserTool\", {\n      userId,\n      toolId,\n      enabled,\n      docked,\n    }) as unknown as Promise<{\n      result: number;\n    }>,\n  removeUserTool: (userId: number, toolId: number) =>\n    ipcInvoke(\"removeUserTool\", { userId, toolId }) as unknown as Promise<{\n      result: boolean;\n    }>,\n  updateUserTool: (\n    userId: number,\n    toolId: number,\n    enabled: number,\n    docked: number\n  ) =>\n    ipcInvoke(\"updateUserTool\", {\n      userId,\n      toolId,\n      enabled,\n      docked,\n    }) as unknown as Promise<{\n      result: boolean;\n    }>,\n  getTools: () =>\n    ipcInvoke(\"getTools\") as unknown as Promise<{\n      tools: {\n        id: number;\n        name: string;\n        description: string;\n      }[];\n    }>,\n  addExternalOllama: (\n    userId: number,\n    name: string,\n    endpoint: string,\n    api_key: string,\n    model: string\n  ) =>\n    ipcInvoke(\"addExternalOllama\", {\n      userId,\n      name,\n      endpoint,\n      api_key,\n      model,\n    }) as unknown as Promise<{\n      id: number;\n    }>,\n  getExternalOllama: (userId: number) =>\n    ipcInvoke(\"getExternalOllama\", { userId }) as unknown as Promise<{\n      ollama: ExternalOllama[];\n    }>,\n} satisfies Window[\"electron\"]);\n\nfunction ipcInvoke<Key extends keyof EventPayloadMapping>(\n  key: Key,\n  payload?: EventPayloadMapping[Key]\n): Promise<EventPayloadMapping[Key]> {\n  return electron.ipcRenderer.invoke(key, payload);\n}\n\nfunction ipcOn<Key extends keyof EventPayloadMapping>(\n  key: Key,\n  callback: IpcCallback<EventPayloadMapping[Key]>\n) {\n  electron.ipcRenderer.on(key, callback);\n  return () => electron.ipcRenderer.off(key, callback);\n}\n\nfunction ipcOff<Key extends keyof EventPayloadMapping>(\n  key: Key,\n  callback: IpcCallback<EventPayloadMapping[Key]>\n) {\n  electron.ipcRenderer.off(key, callback);\n}\n\nfunction ipcSend<Key extends keyof EventPayloadMapping>(\n  key: Key,\n  payload?: EventPayloadMapping[Key]\n) {\n  electron.ipcRenderer.send(key, payload);\n}\n"
  },
  {
    "path": "Frontend/src/electron/python/ensurePythonAndVenv.ts",
    "content": "import { dialog, shell } from \"electron\";\nimport { execSync } from \"child_process\";\nimport path from \"path\";\nimport log from \"electron-log\";\nimport { runWithPrivileges } from \"./runWithPrivileges.js\";\nimport fs from \"fs\";\nimport { getLinuxPackageManager } from \"./getLinuxPackageManager.js\";\nimport { updateLoadingStatus } from \"../loadingWindow.js\";\nimport { installDependencies } from \"./installDependencies.js\";\n\nexport async function ensurePythonAndVenv(backendPath: string) {\n  updateLoadingStatus(\"Installing Python and Virtual Environment...\", 0.5);\n  const venvPath = path.join(backendPath, \"venv\");\n  const pythonCommands =\n    process.platform === \"win32\"\n      ? [\"python3.11\", \"py -3.11\", \"python\"]\n      : process.platform === \"darwin\"\n      ? [\"/opt/homebrew/bin/python3.12\", \"python3.12\", \"python3\"]\n      : [\"python3.12\", \"python3\"];\n\n  let pythonCommand: string | null = null;\n  let pythonVersion: string | null = null;\n\n  // First ensure Python is installed\n  for (const cmd of pythonCommands) {\n    try {\n      log.info(`Trying Python command: ${cmd}`);\n      updateLoadingStatus(`Trying Python command: ${cmd}`, 1.5);\n      const version = execSync(`${cmd} --version`).toString().trim();\n      log.info(`Version output: ${version}`);\n      updateLoadingStatus(`Version output: ${version}`, 2.0);\n      if ((process.platform === \"win32\" && version.includes(\"3.11\")) || \n          (process.platform !== \"win32\" && version.includes(\"3.12\"))) {\n        pythonCommand = cmd;\n        pythonVersion = version;\n        log.info(`Found valid Python command: ${cmd} with version ${version}`);\n        updateLoadingStatus(\n          `Found valid Python command: ${cmd} with version ${version}`,\n          4.5\n        );\n        break;\n      }\n    } catch (error: unknown) {\n      if (error instanceof Error) {\n        log.info(`Failed to execute ${cmd}: ${error.message}`);\n        updateLoadingStatus(`Failed to execute ${cmd}: ${error.message}`, 3.5);\n      }\n      continue;\n    }\n  }\n\n  if (!pythonCommand) {\n    log.error(process.platform === \"win32\" ? \"Python 3.11 is not installed or not in PATH\" : \"Python 3.12 is not installed or not in PATH\");\n    updateLoadingStatus(process.platform === \"win32\" ? \"Python 3.11 is not installed or not in PATH\" : \"Python 3.12 is not installed or not in PATH\", 3.5);\n    const response = await dialog.showMessageBox({\n      type: \"question\",\n      buttons: [\"Install Python\", \"Cancel\"],\n      defaultId: 0,\n      title: process.platform === \"win32\" ? \"Python 3.11 Required\" : \"Python 3.12 Required\",\n      message: process.platform === \"win32\" ? \"Python 3.11 is required but not found on your system.\" : \"Python 3.12 is required but not found on your system.\",\n      detail:\n        process.platform === \"win32\" \n          ? \"Would you like to open the Python download page to install Python 3.11?\"\n          : \"Would you like to open the Python download page to install Python 3.12?\",\n    });\n\n    if (response.response === 0) {\n      updateLoadingStatus(\"Opening Python download page...\", 4.5);\n      await shell.openExternal(\n        process.platform === \"win32\"\n          ? \"https://www.python.org/downloads/release/python-3118/\"\n          : \"https://www.python.org/downloads/release/python-3128/\"\n      );\n      updateLoadingStatus(\n        process.platform === \"win32\"\n          ? \"Please restart the application after installing Python 3.11\"\n          : \"Please restart the application after installing Python 3.12\",\n        8.5\n      );\n      throw new Error(\n        process.platform === \"win32\"\n          ? \"Please restart the application after installing Python 3.11\"\n          : \"Please restart the application after installing Python 3.12\"\n      );\n    } else {\n      updateLoadingStatus(\"Installation cancelled\", 4.5);\n      throw new Error(\n        process.platform === \"win32\"\n          ? \"Python 3.11 is required to run this application. Installation was cancelled.\"\n          : \"Python 3.12 is required to run this application. Installation was cancelled.\"\n      );\n    }\n  }\n\n  log.info(`Using ${pythonVersion}`);\n  updateLoadingStatus(`Using ${pythonVersion}`, 5.5);\n  const venvPython =\n    process.platform === \"win32\"\n      ? path.join(venvPath, \"Scripts\", \"python.exe\")\n      : path.join(venvPath, \"bin\", \"python\");\n\n  // Create virtual environment if it doesn't exist\n  if (!fs.existsSync(venvPath)) {\n    log.info(process.platform === \"win32\" \n      ? \"Creating virtual environment with Python 3.11...\"\n      : \"Creating virtual environment with Python 3.12...\");\n    updateLoadingStatus(\n      process.platform === \"win32\"\n        ? \"Creating virtual environment with Python 3.11...\"\n        : \"Creating virtual environment with Python 3.12...\",\n      10.5\n    );\n    if (process.platform === \"linux\") {\n      try {\n        const packageManager = getLinuxPackageManager();\n        log.info(`Using package manager: ${packageManager.command}`);\n        updateLoadingStatus(\n          `Using package manager: ${packageManager.command}`,\n          11.5\n        );\n        const pythonFullPath = execSync(`which ${pythonCommand}`)\n          .toString()\n          .trim();\n        log.info(`Full Python path: ${pythonFullPath}`);\n        updateLoadingStatus(`Full Python path: ${pythonFullPath}`, 6.5);\n        await runWithPrivileges([\n          packageManager.installCommand,\n          `${pythonFullPath} -m venv \"${venvPath}\"`,\n          `chown -R ${process.env.USER}:${process.env.USER} \"${venvPath}\"`,\n        ]);\n        updateLoadingStatus(\"Virtual environment created successfully\", 6.5);\n        log.info(\"Virtual environment created successfully\");\n      } catch (error: unknown) {\n        if (error instanceof Error) {\n          log.error(\"Failed to create virtual environment\", error);\n          updateLoadingStatus(\"Failed to create virtual environment\", 7.5);\n          throw error;\n        }\n        updateLoadingStatus(\n          \"Unknown error while creating virtual environment\",\n          15.5\n        );\n        throw new Error(\"Unknown error while creating virtual environment\");\n      }\n    } else {\n      try {\n        execSync(`${pythonCommand} -m venv \"${venvPath}\"`);\n        log.info(\"Virtual environment created successfully\");\n        updateLoadingStatus(\"Virtual environment created successfully\", 7.5);\n      } catch (error: unknown) {\n        if (error instanceof Error) {\n          log.error(\"Failed to create virtual environment\", error);\n          updateLoadingStatus(\"Failed to create virtual environment\", 7.5);\n          throw new Error(\"Failed to create virtual environment\");\n        } else {\n          log.error(\"Unknown error in ensurePythonAndVenv\", error);\n          updateLoadingStatus(\"Unknown error in ensurePythonAndVenv\", 7.5);\n          throw new Error(\"Unknown error in ensurePythonAndVenv\");\n        }\n      }\n    }\n  }\n\n  // Check for NVIDIA GPU and CUDA first\n  let hasNvidiaGpu = false;\n  let cudaAvailable = false;\n\n  // Force CPU-only mode for laptops and non-NVIDIA systems\n  if (process.platform === \"darwin\") {\n    hasNvidiaGpu = false;\n    cudaAvailable = false;\n    log.info(\"MacOS detected, using CPU-only mode\");\n    updateLoadingStatus(\"Using CPU-only mode for MacOS\", 7.5);\n  } else {\n    try {\n      if (process.platform === \"linux\" || process.platform === \"win32\") {\n        updateLoadingStatus(\"Checking for NVIDIA GPU...\", 8.5);\n        const gpuInfo = execSync(\"nvidia-smi\").toString();\n        // Only enable CUDA if this is a dedicated GPU (not a laptop integrated GPU)\n        if (\n          !gpuInfo.toLowerCase().includes(\"notebook\") &&\n          !gpuInfo.toLowerCase().includes(\"laptop\")\n        ) {\n          hasNvidiaGpu = true;\n          updateLoadingStatus(\"Dedicated NVIDIA GPU detected\", 9.5);\n        } else {\n          log.info(\"Laptop GPU detected, using CPU-only mode\");\n          updateLoadingStatus(\"Using CPU-only mode for laptop GPU\", 9.5);\n          hasNvidiaGpu = false;\n        }\n      }\n    } catch {\n      log.info(\n        \"No NVIDIA GPU detected or nvidia-smi not available, using CPU-only mode\"\n      );\n      updateLoadingStatus(\"Using CPU-only mode\", 9.5);\n      hasNvidiaGpu = false;\n    }\n  }\n\n  // Skip CUDA checks if we're in CPU-only mode\n  if (hasNvidiaGpu) {\n    try {\n      updateLoadingStatus(\"Checking for CUDA installation...\", 10.5);\n      const cudaCheckCommands = [\n        \"nvcc --version\",\n        process.platform === \"win32\"\n          ? \"where cuda-install-samples-*.exe\"\n          : \"which nvcc\",\n        process.platform === \"win32\"\n          ? 'dir /b \"%CUDA_PATH%\\\\bin\\\\nvcc.exe\"'\n          : \"ls -l /usr/local/cuda/bin/nvcc\",\n      ];\n\n      for (const cmd of cudaCheckCommands) {\n        try {\n          const output = execSync(cmd).toString();\n          if (output) {\n            cudaAvailable = true;\n            break;\n          }\n        } catch (e) {\n          log.debug(\n            `CUDA check command failed: ${\n              e instanceof Error ? e.message : String(e)\n            }`\n          );\n          continue;\n        }\n      }\n\n      // If CUDA is not available on Linux, try to install it\n      if (!cudaAvailable && process.platform === \"linux\") {\n        log.info(\n          \"CUDA not found on Linux, attempting to install CUDA toolkit...\"\n        );\n        updateLoadingStatus(\n          \"CUDA not found on Linux, attempting to install CUDA toolkit...\",\n          10.5\n        );\n\n        const packageManager = getLinuxPackageManager();\n\n        // Check if we're on Fedora - if so, handle CUDA installation in ifFedora.ts\n        if (fs.existsSync(\"/etc/fedora-release\")) {\n          // Re-check CUDA availability after Fedora-specific installation\n          updateLoadingStatus(\n            \"Re-checking CUDA availability after Fedora-specific installation\",\n            11.5\n          );\n          try {\n            const nvccVersion = execSync(\"nvcc --version\").toString();\n            if (nvccVersion) {\n              updateLoadingStatus(\n                \"CUDA toolkit installed successfully via Fedora-specific process\",\n                12.5\n              );\n              log.info(\n                \"CUDA toolkit installed successfully via Fedora-specific process\"\n              );\n              cudaAvailable = true;\n            }\n          } catch (error) {\n            log.error(\"Failed to verify CUDA installation on Fedora:\", error);\n          }\n        } else {\n          // Non-Fedora Linux systems\n          try {\n            updateLoadingStatus(\n              \"Installing CUDA toolkit and development tools...\",\n              11.5\n            );\n            await runWithPrivileges([\n              // Update package list\n              `${packageManager.command} update`,\n              // Install CUDA toolkit and development tools\n              `${packageManager.installCommand} nvidia-cuda-toolkit build-essential`,\n            ]);\n            updateLoadingStatus(\"CUDA toolkit installed successfully\", 12.5);\n            // Verify installation\n            const nvccVersion = execSync(\"nvcc --version\").toString();\n            if (nvccVersion) {\n              log.info(\"CUDA toolkit installed successfully\");\n              updateLoadingStatus(\"CUDA toolkit installed successfully\", 13.5);\n              cudaAvailable = true;\n            }\n          } catch (error) {\n            log.error(\"Failed to install CUDA toolkit:\", error);\n            updateLoadingStatus(\"Failed to install CUDA toolkit\", 13.5);\n            // Continue without CUDA support\n          }\n        }\n      }\n    } catch (error) {\n      log.info(\"Failed to detect CUDA installation details\", error);\n      updateLoadingStatus(\"Failed to detect CUDA installation details\", 13.5);\n    }\n  } else {\n    cudaAvailable = false;\n  }\n\n  // When you reach the dependency installation part, call the new async function:\n  return await installDependencies(venvPython, hasNvidiaGpu, cudaAvailable);\n}\n"
  },
  {
    "path": "Frontend/src/electron/python/extractFromAsar.ts",
    "content": "import fs from \"fs\";\nimport path from \"path\";\nimport log from \"electron-log\";\nimport { app } from \"electron\";\n\nexport function extractFromAsar(sourcePath: string, destPath: string) {\n  const basePath = app.isPackaged ? process.resourcesPath : app.getAppPath();\n  const resolvedSourcePath = path.isAbsolute(sourcePath)\n    ? sourcePath\n    : path.join(basePath, sourcePath);\n  const resolvedDestPath = path.isAbsolute(destPath)\n    ? destPath\n    : path.join(basePath, destPath);\n\n  log.info(`Base path: ${basePath}`);\n  log.info(`Extracting from ${resolvedSourcePath} to ${resolvedDestPath}`);\n  try {\n    if (!fs.existsSync(resolvedSourcePath)) {\n      throw new Error(`Source path does not exist: ${resolvedSourcePath}`);\n    }\n    if (!fs.existsSync(resolvedDestPath)) {\n      log.info(`Creating directory: ${resolvedDestPath}`);\n      fs.mkdirSync(resolvedDestPath, { recursive: true });\n    }\n\n    const files = fs.readdirSync(resolvedSourcePath);\n    log.info(`Files in source: ${files.join(\", \")}`);\n    files.forEach((file) => {\n      const fullSourcePath = path.join(resolvedSourcePath, file);\n      const fullDestPath = path.join(resolvedDestPath, file);\n\n      if (fs.statSync(fullSourcePath).isDirectory()) {\n        log.info(`Extracting directory: ${file}`);\n        extractFromAsar(fullSourcePath, fullDestPath);\n      } else {\n        log.info(`Copying file: ${file}`);\n        fs.copyFileSync(fullSourcePath, fullDestPath);\n      }\n    });\n    log.info(`Extraction completed for ${resolvedSourcePath}`);\n  } catch (error: unknown) {\n    if (error instanceof Error) {\n      log.error(`Error in extractFromAsar: ${error.message}`);\n      log.error(`Stack trace: ${error.stack}`);\n    } else {\n      log.error(`Unknown error in extractFromAsar: ${error}`);\n    }\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/python/getLinuxPackageManager.ts",
    "content": "import { execSync } from \"child_process\";\nimport fs from \"fs\";\nimport log from \"electron-log\";\n\nexport function getLinuxPackageManager(): {\n  command: string;\n  installCommand: string;\n} {\n  // Check for Fedora-based system first\n  if (fs.existsSync(\"/etc/fedora-release\")) {\n    try {\n      execSync(\"which dnf\");\n      return {\n        command: \"dnf\",\n        installCommand: \"dnf -y update && dnf install -y python3-devel gcc gcc-c++\",\n      };\n    } catch {\n      log.info(\"Fedora-based system detected but dnf not found\");\n    }\n  }\n\n  try {\n    // Check for apt-get (Debian/Ubuntu/Mint)\n    execSync(\"which apt-get\");\n    return {\n      command: \"apt-get\",\n      installCommand:\n        \"apt-get update && apt-get install -y python3-venv python3-dev build-essential\",\n    };\n  } catch {\n    try {\n      // Check for DNF (other RHEL-based systems)\n      execSync(\"which dnf\");\n      return {\n        command: \"dnf\",\n        installCommand: \"dnf install -y python3-devel gcc gcc-c++\",\n      };\n    } catch {\n      try {\n        // Check for zypper (openSUSE)\n        execSync(\"which zypper\");\n        return {\n          command: \"zypper\",\n          installCommand:\n            \"zypper install -y python3-venv python3-devel gcc gcc-c++\",\n        };\n      } catch {\n        // Check for pacman (Arch Linux)\n        try {\n          execSync(\"which pacman\");\n          return {\n            command: \"pacman\",\n            installCommand:\n              \"pacman -S --noconfirm python-virtualenv python gcc\",\n          };\n        } catch {\n          throw new Error(\"No supported package manager found\");\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/python/ifFedora.ts",
    "content": "import { execSync } from \"child_process\";\nimport fs from \"fs\";\nimport log from \"electron-log\";\n\nexport async function ifFedora() {\n  if (fs.existsSync(\"/etc/fedora-release\")) {\n    try {\n      log.info(\"Fedora system detected, checking CUDA toolkit\");\n      try {\n        execSync(\"which nvcc\");\n        log.info(\"CUDA toolkit already installed\");\n        return;\n      } catch {\n        log.info(\"Installing CUDA toolkit for Fedora\");\n\n        // Check if CUDA is already installed at target location\n        if (fs.existsSync(\"/usr/local/cuda-12.6\")) {\n          log.info(\"CUDA 12.6 found at /usr/local/cuda-12.6, ensuring proper setup\");\n          \n          // Create symlink if it doesn't exist\n          if (!fs.existsSync(\"/usr/local/cuda\")) {\n            execSync(\"sudo ln -sf /usr/local/cuda-12.6 /usr/local/cuda\");\n          }\n\n          // Add CUDA paths to environment\n          const cudaPathsFile = \"/etc/profile.d/cuda.sh\";\n          if (!fs.existsSync(cudaPathsFile)) {\n            execSync(`sudo bash -c 'echo \"export PATH=/usr/local/cuda/bin:\\\\$PATH\" > ${cudaPathsFile}'`);\n            execSync(`sudo bash -c 'echo \"export LD_LIBRARY_PATH=/usr/local/cuda/lib64:\\\\$LD_LIBRARY_PATH\" >> ${cudaPathsFile}'`);\n            execSync(\"sudo chmod +x \" + cudaPathsFile);\n          }\n\n          // Update current session environment\n          process.env.PATH = `/usr/local/cuda/bin:${process.env.PATH}`;\n          process.env.LD_LIBRARY_PATH = `/usr/local/cuda/lib64:${process.env.LD_LIBRARY_PATH || \"\"}`;\n\n          // Clean up duplicate entries in ld.so.conf.d\n          if (fs.existsSync(\"/etc/ld.so.conf.d/cuda.conf\")) {\n            execSync(\"sudo rm -f /etc/ld.so.conf.d/cuda.conf\");\n          }\n          execSync('sudo bash -c \\'echo \"/usr/local/cuda/lib64\" > /etc/ld.so.conf.d/cuda.conf\\'');\n          execSync(\"sudo ldconfig\");\n\n          // Verify CUDA installation\n          try {\n            execSync(\"nvcc --version\");\n            log.info(\"CUDA toolkit properly configured\");\n            return;\n          } catch {\n            log.info(\"CUDA installation incomplete, proceeding with full installation\");\n          }\n        }\n\n        // Install RPM Fusion repositories\n        const match = fs\n          .readFileSync(\"/etc/fedora-release\", \"utf8\")\n          .match(/\\d+/);\n        if (!match) throw new Error(\"Could not determine Fedora version\");\n        const fedoraVersion = match[0];\n        execSync(\n          `sudo dnf install -y https://mirrors.rpmfusion.org/free/fedora/rpmfusion-free-release-${fedoraVersion}.noarch.rpm https://mirrors.rpmfusion.org/nonfree/fedora/rpmfusion-nonfree-release-${fedoraVersion}.noarch.rpm`\n        );\n\n        // Install NVIDIA drivers and CUDA support\n        execSync(\"sudo dnf install -y akmod-nvidia xorg-x11-drv-nvidia-cuda\");\n\n        // Install GCC 13 for CUDA compatibility\n        execSync(\"sudo dnf install -y gcc13-c++\");\n\n        // Download and install CUDA toolkit\n        const cudaInstaller = \"cuda_12.6.2_560.35.03_linux.run\";\n        if (!fs.existsSync(cudaInstaller)) {\n          execSync(\n            `wget https://developer.download.nvidia.com/compute/cuda/12.6.2/local_installers/${cudaInstaller}`\n          );\n        }\n\n        // Run CUDA installer with toolkit-only options\n        execSync(\n          `sudo sh ${cudaInstaller} --toolkit --toolkitpath=/usr/local/cuda-12.6 --silent --override`\n        );\n\n        // Create symlink and set up environment\n        execSync(\"sudo ln -sf /usr/local/cuda-12.6 /usr/local/cuda\");\n        \n        // Add CUDA paths to environment\n        const cudaPathsFile = \"/etc/profile.d/cuda.sh\";\n        execSync(`sudo bash -c 'echo \"export PATH=/usr/local/cuda/bin:\\\\$PATH\" > ${cudaPathsFile}'`);\n        execSync(`sudo bash -c 'echo \"export LD_LIBRARY_PATH=/usr/local/cuda/lib64:\\\\$LD_LIBRARY_PATH\" >> ${cudaPathsFile}'`);\n        execSync(\"sudo chmod +x \" + cudaPathsFile);\n\n        // Update current session environment\n        process.env.PATH = `/usr/local/cuda/bin:${process.env.PATH}`;\n        process.env.LD_LIBRARY_PATH = `/usr/local/cuda/lib64:${process.env.LD_LIBRARY_PATH || \"\"}`;\n        process.env.CUDA_HOME = \"/usr/local/cuda\";\n        process.env.CUDACXX = \"/usr/local/cuda/bin/nvcc\";\n\n        // Set NVCC to use GCC 13\n        process.env.NVCC_PREPEND_FLAGS = \"-ccbin /usr/bin/g++-13\";\n\n        // Configure library paths\n        execSync('sudo bash -c \\'echo \"/usr/local/cuda/lib64\" > /etc/ld.so.conf.d/cuda.conf\\'');\n        execSync(\"sudo ldconfig\");\n\n        // Clean up installer\n        fs.unlinkSync(cudaInstaller);\n      }\n    } catch (error) {\n      log.error(\"Error during CUDA installation:\", error);\n      throw error;\n    }\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/python/installDependencies.ts",
    "content": "import { spawnAsync } from \"../helpers/spawnAsync.js\";\nimport log from \"electron-log\";\nimport { updateLoadingStatus } from \"../loadingWindow.js\";\nimport { installLlamaCpp } from \"./installLlamaCpp.js\";\n\nexport async function installDependencies(\n  venvPython: string,\n  hasNvidiaGpu: boolean,\n  cudaAvailable: boolean\n) {\n  try {\n    // Upgrade pip first\n    await spawnAsync(venvPython, [\"-m\", \"pip\", \"install\", \"--upgrade\", \"pip\"]);\n    log.info(\"Pip upgraded successfully\");\n    updateLoadingStatus(\"Pip upgraded successfully\", 14.5);\n\n    // Install wheel and setuptools first with specific versions and no dependencies\n    await spawnAsync(venvPython, [\n      \"-m\",\n      \"pip\",\n      \"install\",\n      \"--no-deps\",\n      \"wheel>=0.42.0\",\n      \"setuptools>=69.0.3\",\n    ]);\n    log.info(\"Wheel and setuptools installed successfully\");\n    updateLoadingStatus(\"Basic build dependencies installed successfully\", 15);\n\n    // Install pkg_resources separately (needed for some builds)\n    await spawnAsync(venvPython, [\n      \"-m\",\n      \"pip\",\n      \"install\",\n      \"--no-deps\",\n      \"setuptools>=69.0.3\",\n      \"packaging>=23.2\",\n    ]);\n    log.info(\"Additional build dependencies installed successfully\");\n\n    // Install NumPy with Python 3.12 compatible version\n    await spawnAsync(venvPython, [\n      \"-m\",\n      \"pip\",\n      \"install\",\n      \"numpy>=1.26.0\",  // This version supports Python 3.12\n      \"--no-cache-dir\",\n    ]);\n    log.info(\"NumPy installed successfully\");\n    updateLoadingStatus(\"NumPy installed successfully\", 15.5);\n\n    // Install llvmlite and numba before whisper\n    await spawnAsync(venvPython, [\n      \"-m\",\n      \"pip\",\n      \"install\",\n      \"--no-cache-dir\",\n      \"llvmlite>=0.42.0\",  // Python 3.12 compatible version\n      \"numba>=0.59.0\",     // Python 3.12 compatible version\n    ]);\n    log.info(\"Numba and llvmlite installed successfully\");\n\n    // Install FastAPI and dependencies with build isolation disabled\n    const fastApiDeps =\n      process.platform === \"darwin\"\n        ? [\n            \"fastapi==0.115.6\",\n            \"pydantic>=2.9.0,<3.0.0\",\n            \"uvicorn[standard]==0.27.0\",\n            \"PyJWT==2.10.1\",\n          ]\n        : [\n            \"fastapi>=0.115.6\",\n            \"pydantic>=2.5.0\",\n            \"uvicorn[standard]>=0.27.0\",\n            \"PyJWT==2.10.1\",\n          ];\n\n    await spawnAsync(venvPython, [\n      \"-m\",\n      \"pip\",\n      \"install\",\n      \"--no-cache-dir\",\n      ...fastApiDeps,\n    ]);\n    log.info(\"FastAPI and dependencies installed successfully\");\n    updateLoadingStatus(\"FastAPI and dependencies installed successfully\", 16.5);\n\n    // Install PyTorch\n    if (hasNvidiaGpu && cudaAvailable) {\n      log.info(\"Installing PyTorch with CUDA support\");\n      await spawnAsync(venvPython, [\n        \"-m\",\n        \"pip\",\n        \"install\",\n        \"--no-cache-dir\",\n        \"torch\",\n        \"torchvision\",\n        \"torchaudio\",\n        \"--index-url\",\n        \"https://download.pytorch.org/whl/cu121\",\n      ]);\n    } else {\n      log.info(\"Installing CPU-only PyTorch\");\n      await spawnAsync(venvPython, [\n        \"-m\",\n        \"pip\",\n        \"install\",\n        \"--no-cache-dir\",\n        \"torch\",\n        \"torchvision\",\n        \"torchaudio\",\n        \"--index-url\",\n        \"https://download.pytorch.org/whl/cpu\",\n      ]);\n    }\n    log.info(\"PyTorch installed successfully\");\n    updateLoadingStatus(\"PyTorch installed successfully\", 20.5);\n\n    // Install transformers and related packages\n    await spawnAsync(venvPython, [\n      \"-m\",\n      \"pip\",\n      \"install\",\n      \"--no-cache-dir\",\n      \"transformers==4.48.0\",\n      \"sentence-transformers==3.3.1\",\n    ]);\n    log.info(\"Transformers installed successfully\");\n    updateLoadingStatus(\"Transformers installed successfully\", 21.5);\n\n    await installLlamaCpp(venvPython, hasNvidiaGpu, cudaAvailable);\n\n    updateLoadingStatus(\"Dependencies installed successfully\", 30.5);\n    return { venvPython, hasNvidiaGpu };\n  } catch (error) {\n    log.error(\"Failed to install dependencies\", error);\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/python/installLlamaCpp.ts",
    "content": "import { spawnAsync } from \"../helpers/spawnAsync.js\";\nimport log from \"electron-log\";\nimport { ifFedora } from \"./ifFedora.js\";\nimport { dialog, shell } from \"electron\";\nimport { updateLoadingStatus } from \"../loadingWindow.js\";\n\nconst cudaLoadingMessages = [\n  \"Herding CUDA llamas into the pen...\",\n  \"Teaching llamas quantum physics...\",\n  \"Boy, these CUDA llamas take forever to train...\",\n  \"Convincing llamas that parallel processing is fun...\",\n  \"Feeding llamas their favorite CUDA treats...\",\n  \"Still working... llamas are notoriously stubborn...\",\n  \"Optimizing llama performance (they're a bit lazy)...\",\n  \"Running llama benchmarks (they're on a coffee break)...\",\n  \"Almost there! Just waking up some sleepy llamas...\",\n  \"Turns out llamas need a lot of CUDA cores...\",\n  \"Negotiating with llamas for better compute rates...\",\n  \"Explaining parallel processing to skeptical llamas...\",\n  \"Llamas are attending their mandatory CUDA training...\",\n  \"Debugging llama logic (they're not very logical)...\",\n  \"Waiting for llamas to finish their GPU meditation...\",\n  \"Converting llama thoughts to tensor operations...\",\n  \"Llamas are studying the CUDA documentation...\",\n  \"Teaching llamas to count in parallel...\",\n  \"Llamas insist on taking another GPU coffee break...\",\n  \"Optimizing llama memory allocation patterns...\",\n  \"Convincing llamas that GPUs aren't scary...\",\n  \"Llamas are doing their morning CUDA yoga...\",\n  \"Synchronizing llama thread schedules...\",\n  \"Llamas are debating quantum superposition...\",\n  \"Installing llama-friendly CUDA drivers...\",\n  \"Waiting for llamas to finish their GPU snack...\",\n  \"Llamas are practicing their parallel humming...\",\n  \"Teaching llamas about memory bandwidth...\",\n  \"Llamas are computing their optimal nap times...\",\n  \"Running anti-spitting protocols on CUDA llamas...\",\n  \"Llamas are calibrating their tensor wool...\",\n  \"Scheduling llama GPU time-sharing meetings...\",\n  \"Defragmenting llama memory banks...\",\n  \"Llamas are reviewing their CUDA certification...\",\n  \"Installing advanced llama parallel-spitting modules...\",\n  \"Llamas are optimizing their cache coherency...\",\n  \"Running llama-approved stress tests on GPU...\",\n  \"Llamas insist on following proper CUDA protocols...\",\n  \"Upgrading llama neural pathways to CUDA spec...\",\n  \"Llamas are performing their GPU diagnostics dance...\",\n];\n\nlet messageInterval: NodeJS.Timeout | null = null;\nconst usedMessageIndices: Set<number> = new Set();\n\nfunction getNextMessage(): string {\n  // If we've used all messages, reset the tracking\n  if (usedMessageIndices.size === cudaLoadingMessages.length) {\n    usedMessageIndices.clear();\n  }\n\n  // Get available indices that haven't been used\n  const availableIndices = Array.from(\n    { length: cudaLoadingMessages.length },\n    (_, i) => i\n  ).filter((i) => !usedMessageIndices.has(i));\n\n  // Select random index from available ones\n  const randomIndex = Math.floor(Math.random() * availableIndices.length);\n  const selectedIndex = availableIndices[randomIndex];\n\n  // Mark this index as used\n  usedMessageIndices.add(selectedIndex);\n\n  return cudaLoadingMessages[selectedIndex];\n}\n\nfunction startRotatingMessages(baseProgress: number) {\n  if (messageInterval) clearInterval(messageInterval);\n\n  messageInterval = setInterval(() => {\n    updateLoadingStatus(\n      \"Installing CUDA llama-cpp-python (this may take a while) \" +\n        getNextMessage(),\n      baseProgress\n    );\n  }, 15000); // Rotate message every 15 seconds\n}\n\nfunction stopRotatingMessages() {\n  if (messageInterval) {\n    clearInterval(messageInterval);\n    messageInterval = null;\n    usedMessageIndices.clear(); // Reset tracking when stopped\n  }\n}\n\nexport async function installLlamaCpp(\n  venvPython: string,\n  hasNvidiaGpu: boolean,\n  cudaAvailable: boolean\n) {\n  try {\n    if (hasNvidiaGpu && cudaAvailable) {\n      // Install build dependencies for CUDA\n      updateLoadingStatus(\"Installing build dependencies for CUDA\", 22.5);\n      await spawnAsync(venvPython, [\n        \"-m\",\n        \"pip\",\n        \"install\",\n        \"setuptools\",\n        \"wheel\",\n        \"scikit-build-core\",\n        \"cmake\",\n        \"ninja\",\n      ]);\n      updateLoadingStatus(\n        \"Installing typing-extensions, numpy, diskcache, msgpack\",\n        23.5\n      );\n      await spawnAsync(venvPython, [\n        \"-m\",\n        \"pip\",\n        \"install\",\n        \"typing-extensions\",\n        \"numpy\",\n        \"diskcache\",\n        \"msgpack\",\n      ]);\n\n      // Check for Fedora and install CUDA toolkit if needed\n      await ifFedora();\n      updateLoadingStatus(\"Installing CUDA toolkit for Fedora\", 24.5);\n      process.env.CMAKE_ARGS = \"-DGGML_CUDA=ON\";\n      process.env.FORCE_CMAKE = \"1\";\n      process.env.LLAMA_CUDA = \"1\";\n      process.env.GGML_CUDA_FORCE_MMQ = \"1\";\n      process.env.GGML_CUDA_F16 = \"1\";\n      process.env.GGML_CUDA_ENABLE_UNIFIED_MEMORY = \"1\";\n\n      log.info(\"Installing llama-cpp-python with CUDA support\");\n      updateLoadingStatus(\n        \"Installing llama-cpp-python with CUDA support (this may take a while)\",\n        25.5\n      );\n      try {\n        startRotatingMessages(25.5);\n\n        // Check if we're on Linux and need to handle GCC version\n        const envVars: Record<string, string> = {\n          ...process.env,\n          FORCE_CMAKE: \"1\",\n          CMAKE_ARGS: \"-DGGML_CUDA=ON\",\n          LLAMA_CUDA: \"1\",\n          VERBOSE: \"1\",\n          CMAKE_BUILD_PARALLEL_LEVEL: \"8\",\n        };\n\n        if (process.platform === \"linux\") {\n          // Add CUDA compiler flags to allow newer GCC versions\n          envVars.NVCC_PREPEND_FLAGS = \"-allow-unsupported-compiler\";\n        }\n\n        if (process.platform === \"win32\") {\n          envVars.NVCC_PREPEND_FLAGS = \"-ccbin /usr/bin/g++-13\";\n        }\n\n        await spawnAsync(\n          venvPython,\n          [\n            \"-m\",\n            \"pip\",\n            \"install\",\n            \"--no-cache-dir\",\n            \"--verbose\",\n            \"llama-cpp-python\",\n          ],\n          { env: envVars }\n        );\n\n        stopRotatingMessages();\n        updateLoadingStatus(\"llama-cpp-python installed successfully\", 30.5);\n      } catch (error) {\n        if (process.platform === \"win32\") {\n          log.error(\n            \"Failed to install llama-cpp-python with CUDA support\",\n            error\n          );\n          updateLoadingStatus(\n            \"Failed to install llama-cpp-python with CUDA support. Asking user to install CPU version.\",\n            30.5\n          );\n          const { response } = await dialog.showMessageBox({\n            type: \"error\",\n            title: \"CUDA Installation Error\",\n            message:\n              \"Failed to install llama-cpp-python with CUDA support. Would you like to proceed with CPU-only version instead?\",\n            detail:\n              \"This could be due to missing Visual Studio 2022 with C++ Desktop Development Tools. Would you like to proceed with CPU-only version instead?\\n\\nNote: You can install Visual Studio and try CUDA installation again later.\",\n            buttons: [\n              \"Install CPU Version\",\n              \"Open Visual Studio Download Page\",\n            ],\n            defaultId: 0,\n            cancelId: 1,\n            noLink: true,\n          });\n          if (response === 0) {\n            log.info(\n              \"Falling back to CPU-only installation using pre-built wheel\"\n            );\n            updateLoadingStatus(\n              \"Falling back to CPU-only installation using pre-built wheel\",\n              31.5\n            );\n            await spawnAsync(venvPython, [\n              \"-m\",\n              \"pip\",\n              \"install\",\n              \"--only-binary\",\n              \":all:\",\n              \"llama-cpp-python\",\n              \"--extra-index-url\",\n              \"https://abetlen.github.io/llama-cpp-python/whl/cpu\",\n              \"--no-cache-dir\",\n              \"--verbose\",\n            ]);\n            updateLoadingStatus(\"CPU-only installation completed\", 32.5);\n          } else {\n            // Open Visual Studio download page\n            await shell.openExternal(\n              \"https://visualstudio.microsoft.com/vs/community/\"\n            );\n            throw error;\n          }\n        } else {\n          // For Linux and other platforms, try falling back to CPU version\n          log.error(\n            \"Failed to install CUDA version, falling back to CPU version\",\n            error\n          );\n          stopRotatingMessages();\n\n          updateLoadingStatus(\n            \"CUDA installation failed, falling back to CPU version\",\n            30.5\n          );\n\n          await spawnAsync(venvPython, [\n            \"-m\",\n            \"pip\",\n            \"install\",\n            \"--only-binary\",\n            \":all:\",\n            \"llama-cpp-python\",\n            \"--extra-index-url\",\n            \"https://abetlen.github.io/llama-cpp-python/whl/cpu\",\n            \"--no-cache-dir\",\n            \"--verbose\",\n          ]);\n\n          updateLoadingStatus(\"CPU-only installation completed\", 32.5);\n        }\n      }\n    } else {\n      // CPU-only installation\n      updateLoadingStatus(\"Installing CPU-only llama-cpp-python\", 26.5);\n      log.info(\"Installing CPU-only llama-cpp-python\");\n      try {\n        if (process.platform === \"darwin\") {\n          // On macOS, build from source\n          await spawnAsync(venvPython, [\n            \"-m\",\n            \"pip\",\n            \"install\",\n            \"setuptools\",\n            \"wheel\",\n            \"scikit-build-core\",\n            \"cmake\",\n            \"ninja\",\n          ]);\n          updateLoadingStatus(\n            \"Installing CPU-only llama-cpp-python (this may take a while)\",\n            27.5\n          );\n          await spawnAsync(venvPython, [\n            \"-m\",\n            \"pip\",\n            \"install\",\n            \"--verbose\",\n            \"--no-cache-dir\",\n            \"llama-cpp-python\",\n          ]);\n          updateLoadingStatus(\"CPU-only installation completed\", 28.5);\n        } else {\n          // For other platforms, try pre-built wheel first\n          await spawnAsync(venvPython, [\n            \"-m\",\n            \"pip\",\n            \"install\",\n            \"--only-binary\",\n            \":all:\",\n            \"llama-cpp-python\",\n            \"--extra-index-url\",\n            \"https://abetlen.github.io/llama-cpp-python/whl/cpu\",\n            \"--no-cache-dir\",\n            \"--verbose\",\n          ]);\n          updateLoadingStatus(\"CPU-only installation completed\", 28.5);\n        }\n      } catch (error) {\n        if (process.platform === \"win32\") {\n          log.error(\"Failed to install llama-cpp-python\", error);\n          await dialog.showMessageBox({\n            type: \"error\",\n            title: \"Installation Error\",\n            message: \"Failed to install llama-cpp-python\",\n            detail:\n              \"An error occurred while installing the CPU version of llama-cpp-python. Please try again or check your internet connection.\",\n            buttons: [\"OK\"],\n          });\n        }\n        throw error;\n      }\n    }\n  } catch (error) {\n    log.error(\"Error during llama-cpp-python installation:\", error);\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/python/killProcessOnPort.ts",
    "content": "import { exec } from \"child_process\";\nimport log from \"electron-log\";\n\nexport async function killProcessOnPort(port: number): Promise<void> {\n  return new Promise((resolve) => {\n    const command =\n      process.platform === \"win32\"\n        ? `netstat -ano | findstr :${port} | findstr LISTENING`\n        : `lsof -i :${port} | grep LISTEN`;\n\n    exec(command, async (error, stdout) => {\n      if (error) {\n        log.error(`Failed to find process on port ${port}: ${error}`);\n        resolve(); // Resolve anyway since there might not be a process\n        return;\n      }\n\n      if (!stdout) {\n        resolve();\n        return;\n      }\n\n      // Extract PID based on platform\n      let pid: string | null = null;\n      if (process.platform === \"win32\") {\n        // On Windows, try to get all PIDs that might be using the port\n        const lines = stdout.split(\"\\n\");\n        for (const line of lines) {\n          const match = line.match(/\\s+(\\d+)\\s*$/);\n          if (match) {\n            pid = match[1];\n            // Kill each process we find\n            const killCommand = `taskkill /F /PID ${pid}`;\n            try {\n              await new Promise((resolve, reject) => {\n                exec(killCommand, (killError) => {\n                  if (killError) {\n                    log.error(`Failed to kill process ${pid}: ${killError}`);\n                    reject(killError);\n                  } else {\n                    log.info(\n                      `Successfully killed process ${pid} on port ${port}`\n                    );\n                    resolve(true);\n                  }\n                });\n              });\n            } catch (e) {\n              log.error(`Error killing process: ${e}`);\n            }\n          }\n        }\n      } else {\n        const match = stdout.match(/\\S+\\s+(\\d+)/);\n        if (match) {\n          pid = match[1];\n          // Kill the process\n          const killCommand = `kill -9 ${pid}`;\n          exec(killCommand, (killError) => {\n            if (killError) {\n              log.error(`Failed to kill process ${pid}: ${killError}`);\n            } else {\n              log.info(`Successfully killed process ${pid} on port ${port}`);\n            }\n          });\n        }\n      }\n\n      // Add a small delay before resolving to ensure process cleanup\n      setTimeout(() => {\n        resolve();\n      }, 1000);\n    });\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/python/python.test.ts",
    "content": "import { test, expect, vi, beforeEach } from \"vitest\";\nimport { dialog, shell } from \"electron\";\nimport { spawn, execSync } from \"child_process\";\nimport fs from \"fs\";\nimport { startPythonServer } from \"./startAndStopPython.js\";\nimport type { Mock } from \"vitest\";\n\n// Mock all external dependencies\nvi.mock(\"electron\", () => ({\n  app: {\n    getAppPath: vi.fn().mockReturnValue(\"/mock/app/path\"),\n    getPath: vi.fn().mockReturnValue(\"/mock/temp\"),\n  },\n  dialog: {\n    showMessageBox: vi.fn(),\n  },\n  shell: {\n    openExternal: vi.fn(),\n  },\n}));\n\nvi.mock(\"child_process\", () => ({\n  spawn: vi.fn(),\n  execSync: vi.fn(),\n}));\n\nvi.mock(\"fs\", () => ({\n  default: {\n    existsSync: vi.fn(),\n    mkdirSync: vi.fn(),\n    readdirSync: vi.fn(),\n    statSync: vi.fn(),\n    copyFileSync: vi.fn(),\n  },\n}));\n\nvi.mock(\"../util.js\", () => ({\n  isDev: vi.fn(),\n}));\n\nvi.mock(\"../loadingWindow.js\", () => ({\n  updateLoadingStatus: vi.fn(),\n}));\n\n// Mock EventEmitter for spawn process\nconst mockEventEmitter = {\n  on: vi.fn((event: string, callback: (arg: number) => void) => {\n    if (event === \"close\") callback(0);\n  }),\n  stdout: {\n    on: vi.fn((event: string, callback: (data: Buffer) => void) => {\n      if (event === \"data\") {\n        callback(Buffer.from(\"Application startup complete.\"));\n      }\n    }),\n  },\n  stderr: {\n    on: vi.fn(),\n  },\n  pid: 12345,\n};\n\nbeforeEach(() => {\n  vi.clearAllMocks();\n});\n\ntest(\"successfully starts python server in dev mode\", async () => {\n  const isDev = await import(\"../util.js\");\n  (isDev.isDev as Mock).mockReturnValue(true);\n  (spawn as unknown as Mock).mockReturnValue(mockEventEmitter);\n  (fs.existsSync as Mock).mockReturnValue(true);\n  (execSync as Mock).mockReturnValue(Buffer.from(\"Python 3.12.0\"));\n\n  await startPythonServer();\n\n  expect(spawn).toHaveBeenCalledTimes(2); // Once for deps, once for server\n  expect(execSync).toHaveBeenCalled(); // Python version check\n});\n\ntest(\"handles missing Python 3.10 installation\", async () => {\n  (execSync as Mock).mockImplementation(() => {\n    throw new Error(\"Python not found\");\n  });\n  (dialog.showMessageBox as Mock).mockResolvedValue({ response: 0 });\n\n  await expect(startPythonServer()).rejects.toThrow(\n    \"Please restart the application after installing Python 3.12\"\n  );\n  expect(shell.openExternal).toHaveBeenCalledWith(\n    \"https://www.python.org/downloads/release/python-3128/\"\n  );\n});\n\ntest(\"handles dependency installation failure\", async () => {\n  const isDev = await import(\"../util.js\");\n  (isDev.isDev as Mock).mockReturnValue(true);\n  (execSync as Mock).mockReturnValue(Buffer.from(\"Python 3.12.0\")); // Mock successful Python check\n  const failingEventEmitter = {\n    ...mockEventEmitter,\n    on: vi.fn((event: string, callback: (code: number) => void) => {\n      if (event === \"close\") callback(1);\n    }),\n  };\n  (spawn as unknown as Mock).mockReturnValue(failingEventEmitter);\n\n  await expect(startPythonServer()).rejects.toThrow();\n});\n\ntest(\"extracts backend in production mode when needed\", async () => {\n  const isDev = await import(\"../util.js\");\n  (isDev.isDev as Mock).mockReturnValue(false);\n  (execSync as Mock).mockReturnValue(Buffer.from(\"Python 3.12.0\")); // Mock successful Python check\n\n  // Mock file system checks\n  (fs.existsSync as Mock)\n    .mockReturnValueOnce(false) // unpacked backend doesn't exist\n    .mockReturnValueOnce(true) // source path exists\n    .mockReturnValueOnce(false) // destination directory doesn't exist\n    .mockReturnValue(true); // subsequent checks return true\n\n  (fs.readdirSync as Mock).mockReturnValue([\"main.py\", \"requirements.txt\"]);\n  (fs.statSync as Mock).mockReturnValue({ isDirectory: () => false });\n  (spawn as unknown as Mock).mockReturnValue(mockEventEmitter);\n\n  await startPythonServer();\n\n  // Verify file system operations\n  expect(fs.mkdirSync).toHaveBeenCalledWith(\"/mock/temp/notate-backend\", {\n    recursive: true,\n  });\n  expect(fs.copyFileSync).toHaveBeenCalledWith(\n    \"/mock/app/path/Backend/main.py\",\n    \"/mock/temp/notate-backend/main.py\"\n  );\n  expect(fs.copyFileSync).toHaveBeenCalledWith(\n    \"/mock/app/path/Backend/requirements.txt\",\n    \"/mock/temp/notate-backend/requirements.txt\"\n  );\n});\n"
  },
  {
    "path": "Frontend/src/electron/python/runWithPrivileges.ts",
    "content": "import { execSync } from \"child_process\";\nimport { dialog } from \"electron\";\nimport log from \"electron-log\";\n\nexport async function runWithPrivileges(\n  commands: string | string[]\n): Promise<void> {\n  if (process.platform !== \"linux\") return;\n\n  const commandArray = Array.isArray(commands) ? commands : [commands];\n\n  try {\n    // Try without privileges first\n    for (const cmd of commandArray) {\n      execSync(cmd);\n    }\n  } catch {\n    log.info(\"Failed to run commands, requesting privileges...\");\n\n    const response = await dialog.showMessageBox({\n      type: \"question\",\n      buttons: [\"Grant Privileges\", \"Cancel\"],\n      defaultId: 0,\n      title: \"Administrator Privileges Required\",\n      message:\n        \"Creating the Python environment requires administrator privileges.\",\n      detail:\n        \"This is needed to install required system dependencies and create the virtual environment. This will only be needed once.\",\n    });\n\n    if (response.response === 0) {\n      try {\n        // Use sudo -n to prevent password prompt if sudo is configured with NOPASSWD\n        try {\n          execSync(\"sudo -n true\");\n          // If sudo -n succeeds, use sudo\n          const combinedCommand = commandArray.join(\" && \");\n          execSync(`sudo sh -c '${combinedCommand}'`);\n        } catch {\n          // If sudo -n fails, fall back to pkexec\n          const combinedCommand = commandArray.join(\" && \");\n          execSync(`pkexec sh -c 'DEBIAN_FRONTEND=noninteractive ${combinedCommand}'`);\n        }\n      } catch (error) {\n        log.error(\"Failed to run commands with privileges\", error);\n        throw new Error(\"Failed to run commands with elevated privileges\");\n      }\n    } else {\n      throw new Error(\n        \"User declined to grant administrator privileges. Cannot continue.\"\n      );\n    }\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/python/startAndStopPython.ts",
    "content": "import { app } from \"electron\";\nimport { spawn, ChildProcess, SpawnOptions } from \"child_process\";\nimport path from \"path\";\nimport { isDev } from \"../util.js\";\nimport { updateLoadingStatus } from \"../loadingWindow.js\";\nimport fs from \"fs\";\nimport log from \"electron-log\";\nimport ffmpegStatic from \"ffmpeg-static\";\nimport { generateSecret } from \"../authentication/secret.js\";\nimport { getSecret } from \"../authentication/devApi.js\";\nimport { ensurePythonAndVenv } from \"./ensurePythonAndVenv.js\";\nimport { extractFromAsar } from \"./extractFromAsar.js\";\nimport { killProcessOnPort } from \"./killProcessOnPort.js\";\n\nlog.transports.file.level = \"info\";\nlog.transports.file.resolvePathFn = () =>\n  path.join(app.getPath(\"userData\"), \"logs/main.log\");\n\nlet pythonProcess: ChildProcess | null = null;\n\nexport async function startPythonServer() {\n  log.info(\"Application starting...\");\n  log.info(\"Creating window...\");\n  const appPath = app.getAppPath();\n  log.info(`App path: ${appPath}`);\n\n  // Generate JWT secret before starting the server\n  const jwtSecret = generateSecret();\n\n  let backendPath;\n  if (isDev()) {\n    // In dev mode, Backend is one level up from the Frontend directory\n    backendPath = path.join(appPath, \"..\", \"Backend\");\n    log.info(`Dev mode: Backend path set to ${backendPath}`);\n  } else {\n    // In production, try both \"Backend\" and \"backend\" paths\n    const backendPaths = [\n      path.join(process.resourcesPath, \"Backend\"),\n      path.join(process.resourcesPath, \"backend\"),\n    ];\n\n    for (const testPath of backendPaths) {\n      if (fs.existsSync(testPath)) {\n        backendPath = testPath;\n        log.info(`Prod mode: Found backend at ${backendPath}`);\n        break;\n      }\n    }\n\n    if (!backendPath) {\n      const tempPath = path.join(app.getPath(\"temp\"), \"notate-backend\");\n      log.info(`Prod mode: Temp path set to ${tempPath}`);\n\n      // Try both capitalization variants in ASAR\n      const asarBackendPaths = [\n        path.join(appPath, \"Backend\"),\n        path.join(appPath, \"backend\"),\n      ];\n\n      let asarBackendPath;\n      for (const testPath of asarBackendPaths) {\n        if (fs.existsSync(testPath)) {\n          asarBackendPath = testPath;\n          log.info(`Found ASAR backend at ${asarBackendPath}`);\n          break;\n        }\n      }\n\n      if (!asarBackendPath) {\n        const error = new Error(\"Backend not found in any expected location\");\n        log.error(error);\n        throw error;\n      }\n\n      try {\n        extractFromAsar(asarBackendPath, tempPath);\n        log.info(`Successfully extracted from ASAR to ${tempPath}`);\n        backendPath = tempPath;\n      } catch (error) {\n        log.error(`Failed to extract from ASAR: ${error}`);\n        throw error;\n      }\n    }\n  }\n\n  // Use path.join for constructing paths to scripts\n  const dependencyScript = path.join(backendPath, \"ensure_dependencies.py\");\n  const mainScript = path.join(backendPath, \"main.py\");\n\n  return new Promise((resolve, reject) => {\n    let totalPackages = 0;\n    let installedPackages = 0;\n    let retryCount = 0;\n    const MAX_RETRIES = 3;\n\n    ensurePythonAndVenv(backendPath)\n      .then(({ venvPython, hasNvidiaGpu }) => {\n        log.info(`Venv Python: ${venvPython}`);\n        log.info(`CUDA enabled: ${hasNvidiaGpu}`);\n\n        // Define spawn options with proper typing\n        const spawnOptions: SpawnOptions = {\n          stdio: \"pipe\",\n          env: {\n            ...process.env,\n            USE_CUDA: hasNvidiaGpu ? \"1\" : \"0\",\n            FFMPEG_PATH: app.isPackaged\n              ? path.join(\n                  process.resourcesPath,\n                  \"ffmpeg\" + (process.platform === \"win32\" ? \".exe\" : \"\")\n                )\n              : typeof ffmpegStatic === \"string\"\n              ? ffmpegStatic\n              : \"\",\n            JWT_SECRET: jwtSecret,\n            IS_DEV: isDev() ? \"1\" : \"0\",\n            SECRET_KEY: getSecret(),\n          },\n        };\n\n        // Pass the GPU status and FFmpeg path to the dependency script\n        const depProcess = spawn(venvPython, [dependencyScript], spawnOptions);\n\n        if (!depProcess.stdout || !depProcess.stderr) {\n          throw new Error(\"Failed to create process with stdio pipes\");\n        }\n\n        log.info(`Dependency process started: ${depProcess.pid}`);\n\n        depProcess.stdout.on(\"data\", (data: Buffer) => {\n          const message = data.toString().trim();\n          log.info(`Dependency process output: ${message}`);\n\n          if (message.startsWith(\"Total packages:\")) {\n            totalPackages = parseInt(\n              message.split(\"|\")[0].split(\":\")[1].trim()\n            );\n          } else {\n            const [text, progress] = message.split(\"|\");\n            if (progress) {\n              updateLoadingStatus(text, parseFloat(progress));\n            } else {\n              updateLoadingStatus(\n                text,\n                (installedPackages / totalPackages) * 35\n              );\n            }\n\n            if (text.includes(\"Installing\")) {\n              installedPackages++;\n            }\n          }\n        });\n\n        depProcess.stderr.on(\"data\", (data: Buffer) => {\n          const errorMessage = data.toString().trim();\n          // Don't treat these as errors since they're actually info messages from uvicorn\n          if (errorMessage.includes(\"INFO:\")) {\n            log.info(`Python info: ${errorMessage}`);\n          } else if (errorMessage.includes(\"Download complete.\")) {\n            log.error(`Dependency check error: ${errorMessage}`);\n            updateLoadingStatus(`${errorMessage}`, -1);\n          } else if (errorMessage.includes(\"Downloading\")) {\n            log.error(`Dependency check error: ${errorMessage}`);\n            updateLoadingStatus(`${errorMessage}`, -1);\n          } else {\n            log.error(`Dependency check error: ${errorMessage}`);\n            updateLoadingStatus(`Error: ${errorMessage}`, -1);\n          }\n        });\n\n        depProcess.on(\"close\", async (code: number | null) => {\n          log.info(`Dependency process closed with code ${code}`);\n          if (code === 0) {\n            updateLoadingStatus(\"Starting application server...\", 99);\n\n            const startServer = async () => {\n              // Create Python process with same options\n              pythonProcess = spawn(venvPython, [mainScript], spawnOptions);\n\n              if (\n                !pythonProcess ||\n                !pythonProcess.stdout ||\n                !pythonProcess.stderr\n              ) {\n                reject(\n                  new Error(\"Failed to create Python process with stdio pipes\")\n                );\n                return;\n              }\n\n              log.info(`Python process spawned with PID: ${pythonProcess.pid}`);\n              let serverStarting = true;\n\n              pythonProcess.stdout.on(\"data\", (data: Buffer) => {\n                const message = data.toString().trim();\n                log.info(`Python stdout: ${message}`);\n                if (\n                  message.includes(\"Application startup complete.\") ||\n                  message.includes(\"Uvicorn running on http://127.0.0.1:47372\")\n                ) {\n                  serverStarting = false;\n                  updateLoadingStatus(\"Application server ready!\", 100);\n                  resolve(true);\n                }\n              });\n\n              pythonProcess.stderr.on(\"data\", async (data: Buffer) => {\n                const errorMessage = data.toString().trim();\n                if (\n                  errorMessage.includes(\"address already in use\") ||\n                  errorMessage.includes(\"[Errno 10048]\")\n                ) {\n                  log.info(\n                    \"Port 47372 is in use, attempting to kill existing process\"\n                  );\n                  await killProcessOnPort(47372);\n                  // Wait for the port to be fully released\n                  await new Promise((resolve) => setTimeout(resolve, 5000));\n                  // Retry starting the server after a delay\n                  if (retryCount < MAX_RETRIES) {\n                    retryCount++;\n                    log.info(`Retry attempt ${retryCount} of ${MAX_RETRIES}`);\n                    setTimeout(() => startServer(), 2000);\n                  } else {\n                    reject(\n                      new Error(\n                        `Failed to start server after ${MAX_RETRIES} retries`\n                      )\n                    );\n                  }\n                  return;\n                }\n\n                // Don't treat uvicorn startup messages as errors\n                if (errorMessage.includes(\"INFO\")) {\n                  log.info(`Python info: ${errorMessage}`);\n                  if (\n                    errorMessage.includes(\"Application startup complete.\") ||\n                    errorMessage.includes(\n                      \"Uvicorn running on http://127.0.0.1:47372\"\n                    )\n                  ) {\n                    serverStarting = false;\n                    updateLoadingStatus(\"Application server ready!\", 100);\n                    resolve(true);\n                  }\n                } else {\n                  log.error(`Python stderr: ${errorMessage}`);\n                }\n              });\n\n              pythonProcess.on(\"error\", (error: Error) => {\n                const errorMessage = `Failed to start Python server: ${error.message}`;\n                log.error(errorMessage);\n                updateLoadingStatus(errorMessage, -1);\n                if (!serverStarting) {\n                  reject(error);\n                }\n              });\n\n              pythonProcess.on(\"close\", (code: number | null) => {\n                if (code !== 0 && !serverStarting) {\n                  const errorMessage = `Python server exited with code ${code}`;\n                  log.error(errorMessage);\n                  updateLoadingStatus(errorMessage, -1);\n                  reject(new Error(errorMessage));\n                }\n              });\n            };\n\n            await startServer();\n          } else {\n            const errorMessage = `Dependency installation failed with code ${code}`;\n            log.error(errorMessage);\n            updateLoadingStatus(errorMessage, -1);\n            reject(new Error(errorMessage));\n          }\n        });\n      })\n      .catch((error) => {\n        log.error(\"Failed to start Python server\", error);\n        reject(error);\n      });\n  });\n}\n\nexport function stopPythonServer() {\n  if (pythonProcess) {\n    pythonProcess.kill();\n    pythonProcess = null;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/resourceManager.ts",
    "content": "import osUtils from \"os-utils\";\nimport fs from \"fs\";\nimport os from \"os\";\nimport { BrowserWindow } from \"electron\";\nimport { ipcWebContentsSend } from \"./util.js\";\n\nconst POLLING_INTERVAL = 500;\n\nexport function pollResource(mainWindow: BrowserWindow) {\n  setInterval(async () => {\n    const cpuUsage = await getCpuUsage();\n    const memoryUsage = getMemoryUsage();\n    const diskUsage = getDiskUsage();\n    ipcWebContentsSend(\"statistics\", mainWindow.webContents, {\n      cpuUsage,\n      memoryUsage,\n      storageUsage: diskUsage.usedGB,\n    });\n  }, POLLING_INTERVAL);\n}\n\nfunction getCpuUsage(): Promise<number> {\n  return new Promise<number>((resolve) => {\n    osUtils.cpuUsage((cpuUsage) => {\n      resolve(cpuUsage);\n    });\n  });\n}\n\nfunction getMemoryUsage() {\n  return osUtils.freememPercentage();\n}\n\nfunction getDiskUsage() {\n  const stats = fs.statfsSync(process.platform === \"win32\" ? \"C:\\\\\" : \"/\");\n  const total = stats.bsize * stats.blocks;\n  const free = stats.bfree * stats.bsize;\n  return {\n    totalGB: Math.floor(total / 1_000_000_000),\n    usedGB: 1 - free / total,\n  };\n}\n\nexport async function getStaticData(): Promise<StaticData> {\n  const totalStorage = getDiskUsage().totalGB;\n  const cpuModel = os.cpus()[0].model;\n  const totalMemoryGB = Math.floor(osUtils.totalmem() / 1024);\n  return { totalStorage, totalMemoryGB, cpuModel };\n}\n"
  },
  {
    "path": "Frontend/src/electron/specs/systemSpecs.ts",
    "content": "import { platform } from \"os\";\nimport { ExecException, exec } from \"child_process\";\n\nexport async function systemSpecs(): Promise<{\n  cpu: string;\n  vram: string;\n  GPU_Manufacturer?: string;\n}> {\n  const os = platform();\n\n  return new Promise((resolve) => {\n    if (os === \"darwin\") {\n      // macOS\n      exec(\n        \"system_profiler SPHardwareDataType SPDisplaysDataType\",\n        (error: ExecException | null, stdout: string) => {\n          if (error) {\n            console.error(\"Error getting system specs:\", error);\n            resolve({\n              cpu: \"Unknown\",\n              vram: \"Unknown\",\n              GPU_Manufacturer: \"Unknown\",\n            });\n            return;\n          }\n\n          const cpu =\n            stdout.match(/Chip: (.+)/)?.[1] ||\n            stdout.match(/Processor Name: (.+)/)?.[1] ||\n            \"Unknown\";\n\n          const memory = stdout.match(/Memory: (.+)/)?.[1] || \"Unknown\";\n          const gpuCores = stdout.match(/Total Number of Cores: (\\d+)/)?.[1];\n\n          // Check for GPU manufacturer\n          const GPU_Manufacturer = cpu.includes(\"Apple\")\n            ? \"Apple Silicon\"\n            : stdout.includes(\"NVIDIA\")\n            ? \"NVIDIA\"\n            : stdout.includes(\"AMD\")\n            ? \"AMD\"\n            : \"Unknown\";\n\n          const vram = cpu.includes(\"Apple\")\n            ? `${memory} Unified Memory, ${gpuCores || \"Unknown\"} GPU Cores`\n            : stdout.match(/VRAM \\(Total\\): (.+)/)?.[1] || \"Unknown\";\n\n          resolve({ cpu, vram, GPU_Manufacturer });\n        }\n      );\n    } else if (os === \"win32\") {\n      // Windows - Use separate commands for GPU info and VRAM\n      const gpuCommand = \"wmic path win32_VideoController get name\";\n      const vramCommand =\n        'powershell -command \"$qwMemorySize = (Get-ItemProperty -Path \\\\\"HKLM:\\\\SYSTEM\\\\ControlSet001\\\\Control\\\\Class\\\\{4d36e968-e325-11ce-bfc1-08002be10318}\\\\0*\\\\\" -Name HardwareInformation.qwMemorySize -ErrorAction SilentlyContinue).\\\\\"HardwareInformation.qwMemorySize\\\\\"; [math]::round($qwMemorySize/1GB)\"';\n      const cpuCommand = \"wmic cpu get name\";\n\n      // Execute all commands in sequence\n      exec(gpuCommand, (gpuError: ExecException | null, gpuStdout: string) => {\n        if (gpuError) {\n          console.error(\"Error getting GPU info:\", gpuError);\n          resolve({\n            cpu: \"Unknown\",\n            vram: \"Unknown\",\n            GPU_Manufacturer: \"Unknown\",\n          });\n          return;\n        }\n\n        // Parse GPU info\n        const gpuLines = gpuStdout.trim().split(\"\\n\");\n        const gpuName = gpuLines[1]?.trim() || \"Unknown\";\n        let GPU_Manufacturer = \"Unknown\";\n\n        // Determine manufacturer from GPU name\n        if (gpuName.toLowerCase().includes(\"nvidia\")) {\n          GPU_Manufacturer = \"NVIDIA\";\n        } else if (\n          gpuName.toLowerCase().includes(\"amd\") ||\n          gpuName.toLowerCase().includes(\"radeon\")\n        ) {\n          GPU_Manufacturer = \"AMD\";\n        } else if (gpuName.toLowerCase().includes(\"intel\")) {\n          GPU_Manufacturer = \"Intel\";\n        }\n\n        // Get CPU info\n        exec(\n          cpuCommand,\n          (cpuError: ExecException | null, cpuStdout: string) => {\n            const cpu = cpuError\n              ? \"Unknown\"\n              : cpuStdout.trim().split(\"\\n\")[1]?.trim() || \"Unknown\";\n\n            // Get VRAM info\n            exec(\n              vramCommand,\n              (vramError: ExecException | null, vramStdout: string) => {\n                let vram = \"Unknown\";\n                if (!vramError) {\n                  const vramGB = parseInt(vramStdout.trim());\n                  if (!isNaN(vramGB)) {\n                    vram = `${vramGB} GB`;\n                  }\n                }\n\n                GPU_Manufacturer = gpuName;\n                resolve({ cpu, vram, GPU_Manufacturer });\n              }\n            );\n          }\n        );\n      });\n    } else {\n      // Linux\n      const getVRAM = async (GPU_Manufacturer: string): Promise<string> => {\n        return new Promise((resolve) => {\n          if (GPU_Manufacturer === \"NVIDIA\") {\n            exec(\"nvidia-smi --query-gpu=memory.total --format=csv,noheader,nounits\", (error, stdout) => {\n              if (!error && stdout) {\n                const vramMB = parseInt(stdout.trim());\n                resolve(vramMB >= 1024 ? `${(vramMB / 1024).toFixed(1)} GB` : `${vramMB} MB`);\n              } else {\n                resolve(\"Unknown\");\n              }\n            });\n          } else if (GPU_Manufacturer === \"AMD\") {\n            exec(\"rocm-smi --showmeminfo vram\", (error, stdout) => {\n              if (!error && stdout) {\n                const match = stdout.match(/(\\d+)\\s*MB/);\n                if (match) {\n                  const vramMB = parseInt(match[1]);\n                  resolve(vramMB >= 1024 ? `${(vramMB / 1024).toFixed(1)} GB` : `${vramMB} MB`);\n                } else {\n                  resolve(\"Unknown\");\n                }\n              } else {\n                resolve(\"Unknown\");\n              }\n            });\n          } else if (GPU_Manufacturer === \"Intel\") {\n            // For Intel, try multiple methods to get memory information\n            exec(\"free -m && glxinfo | grep -i 'dedicated video memory\\\\|total available memory'\", (error, stdout) => {\n              if (!error && stdout) {\n                // Try to find dedicated or total available memory from glxinfo\n                const dedicatedMatch = stdout.match(/Dedicated video memory:\\s*(\\d+)\\s*MB/i);\n                const totalMatch = stdout.match(/Total available memory:\\s*(\\d+)\\s*MB/i);\n                // Get system memory from free command\n                const memMatch = stdout.match(/Mem:\\s+(\\d+)/);\n                \n                if (dedicatedMatch) {\n                  const vramMB = parseInt(dedicatedMatch[1]);\n                  resolve(vramMB >= 1024 ? `${(vramMB / 1024).toFixed(1)} GB` : `${vramMB} MB`);\n                } else if (totalMatch) {\n                  const vramMB = parseInt(totalMatch[1]);\n                  resolve(`${(vramMB / 1024).toFixed(1)} GB (Shared)`);\n                } else if (memMatch) {\n                  // If no specific GPU memory info, show system memory as shared\n                  const totalMemMB = parseInt(memMatch[1]);\n                  const sharedGB = (totalMemMB / 1024).toFixed(1);\n                  resolve(`Up to ${sharedGB} GB Shared Memory`);\n                } else {\n                  resolve(\"Shared Memory\");\n                }\n              } else {\n                // Fallback to just getting system memory\n                exec(\"free -m\", (error2, stdout2) => {\n                  if (!error2 && stdout2) {\n                    const memMatch = stdout2.match(/Mem:\\s+(\\d+)/);\n                    if (memMatch) {\n                      const totalMemMB = parseInt(memMatch[1]);\n                      const sharedGB = (totalMemMB / 1024).toFixed(1);\n                      resolve(`Up to ${sharedGB} GB Shared Memory`);\n                    } else {\n                      resolve(\"Shared Memory\");\n                    }\n                  } else {\n                    resolve(\"Shared Memory\");\n                  }\n                });\n              }\n            });\n          } else {\n            resolve(\"Unknown\");\n          }\n        });\n      };\n\n      exec(\n        \"lscpu | grep 'Model name' && lspci | grep -i vga\",\n        async (error: ExecException | null, stdout: string) => {\n          if (error) {\n            console.error(\"Error getting system specs:\", error);\n            resolve({\n              cpu: \"Unknown\",\n              vram: \"Unknown\",\n              GPU_Manufacturer: \"Unknown\",\n            });\n            return;\n          }\n\n          const cpu = stdout.match(/Model name:\\s*(.+)/)?.[1]?.trim() || \"Unknown\";\n          const gpuLine = stdout.match(/VGA.*: (.+)/)?.[1] || \"\";\n\n          // Determine GPU manufacturer from the GPU line\n          const GPU_Manufacturer = gpuLine.includes(\"NVIDIA\")\n            ? \"NVIDIA\"\n            : gpuLine.includes(\"AMD\")\n            ? \"AMD\"\n            : gpuLine.includes(\"Intel\")\n            ? \"Intel\"\n            : \"Unknown\";\n\n          const vram = await getVRAM(GPU_Manufacturer);\n          resolve({ cpu, vram, GPU_Manufacturer });\n        }\n      );\n    }\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/storage/deleteCollection.ts",
    "content": "import db from \"../db.js\";\n\nimport { getToken } from \"../authentication/token.js\";\n\nexport async function deleteCollection(\n  collectionId: number,\n  collectionName: string,\n  userId: number\n) {\n  const token = await getToken({ userId: userId.toString() });\n  let apiKey = null;\n  let isLocal = false;\n  try {\n    apiKey = await db.getApiKey(userId, \"openai\");\n  } catch {\n    apiKey = null;\n  }\n  if (!apiKey) {\n    isLocal = true;\n  }\n\n  const response = await fetch(\"http://127.0.0.1:47372/delete-collection\", {\n    method: \"POST\",\n    headers: {\n      Authorization: `Bearer ${token}`,\n      \"Content-Type\": \"application/json\",\n    },\n    body: JSON.stringify({\n      collection_id: Number(collectionId),\n      collection_name: collectionName,\n      is_local: isLocal,\n      api_key: apiKey,\n    }),\n  });\n  if (response.ok) {\n    return true;\n  }\n  return false;\n}\n"
  },
  {
    "path": "Frontend/src/electron/storage/getFiles.ts",
    "content": "import db from \"../db.js\";\nexport function getFilesInCollection(userId: number, collectionId: number) {\n  try {\n    const files = db.getFilesInCollection(userId, collectionId);\n    return files;\n  } catch (error) {\n    console.error(\"Error reading files in collection:\", error);\n    return [];\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/storage/getUserFiles.ts",
    "content": "import fs from \"fs\";\nimport path from \"path\";\nimport { app } from \"electron\";\n\nexport function getUserCollectionFiles(payload: {\n  userId: number;\n  userName: string;\n}): Promise<{\n  userId: number;\n  userName: string;\n  files: string[];\n}> {\n  const userPath = path.join(\n    process.platform === \"linux\" ? app.getPath(\"userData\") : app.getAppPath(),\n    \"..\",\n    \"FileCollections\",\n    payload.userId.toString() + \"_\" + payload.userName\n  );\n\n  if (!fs.existsSync(userPath)) {\n    return Promise.resolve({ ...payload, files: [] });\n  }\n\n  const getAllFiles = (dirPath: string): string[] => {\n    const entries = fs.readdirSync(dirPath, { withFileTypes: true });\n    let files: string[] = [];\n\n    for (const entry of entries) {\n      const fullPath = path.join(dirPath, entry.name);\n      if (entry.isDirectory()) {\n        files = files.concat(getAllFiles(fullPath));\n      } else {\n        files.push(fullPath);\n      }\n    }\n\n    return files;\n  };\n\n  const files = getAllFiles(userPath);\n  const relativeFiles = files.map((file) => path.relative(userPath, file));\n\n  return Promise.resolve({ ...payload, files: relativeFiles });\n}\n"
  },
  {
    "path": "Frontend/src/electron/storage/newFile.ts",
    "content": "import fs from \"fs\";\nimport path from \"path\";\nimport { app, BrowserWindow } from \"electron\";\nimport db from \"../db.js\";\nimport { getToken } from \"../authentication/token.js\";\n\ninterface PythonProgressData {\n  type: string;\n  message: string;\n  chunk?: number;\n  totalChunks?: number;\n  percent_complete?: string;\n}\n\ninterface ProgressData {\n  status: string;\n  data: {\n    message: string;\n    chunk?: number;\n    total_chunks?: number;\n    percent_complete?: string;\n  };\n}\n\nexport async function addFileToCollection(\n  userId: number,\n  userName: string,\n  collectionId: number,\n  collectionName: string,\n  fileName: string,\n  fileContent: string,\n  signal?: AbortSignal\n) {\n  try {\n    const windows = BrowserWindow.getAllWindows();\n    const mainWindow = windows[0];\n\n    const sendProgress = (data: string) => {\n      try {\n        if (typeof data === \"string\") {\n          const lines = data.split(\"\\n\");\n          for (const line of lines) {\n            if (line.trim()) {\n              const jsonStr = line.replace(/^data:\\s*/, \"\").trim();\n              if (jsonStr) {\n                try {\n                  // Convert Python-style single quotes to double quotes for JSON parsing\n                  const formattedJson = jsonStr\n                    .replace(/'/g, '\"')\n                    // Handle nested quotes in message strings\n                    .replace(/\"([^\"]*)'([^']*)'([^\"]*)\"/, '\"$1\\\\\"$2\\\\\"$3\"');\n                  const parsedData = JSON.parse(\n                    formattedJson\n                  ) as PythonProgressData;\n\n                  const progressData: ProgressData = {\n                    status: parsedData.type || \"progress\",\n                    data: {\n                      message: parsedData.message,\n                      chunk: parsedData.chunk,\n                      total_chunks: parsedData.totalChunks,\n                      percent_complete: parsedData.percent_complete,\n                    },\n                  };\n\n                  mainWindow?.webContents.send(\"ingest-progress\", progressData);\n                } catch (parseError) {\n                  console.error(\"[NEW_FILE] JSON parse error:\", parseError);\n                  console.error(\"[NEW_FILE] Failed to parse data:\", jsonStr);\n                }\n              }\n            }\n          }\n        } else {\n          mainWindow?.webContents.send(\"ingest-progress\", data);\n        }\n      } catch (error) {\n        console.error(\"[NEW_FILE] Error in sendProgress:\", error);\n        console.error(\"[NEW_FILE] Problematic data:\", data);\n        mainWindow?.webContents.send(\"ingest-progress\", {\n          status: \"error\",\n          data: {\n            message: \"Error processing progress update\",\n          },\n        });\n      }\n    };\n\n    const collectionPath = path.join(\n      process.platform === \"linux\" ? app.getPath(\"userData\") : app.getAppPath(),\n      \"..\",\n      \"FileCollections\",\n      userId.toString() + \"_\" + userName,\n      collectionId.toString() + \"_\" + collectionName\n    );\n\n    if (!fs.existsSync(collectionPath)) {\n      fs.mkdirSync(collectionPath, { recursive: true });\n    }\n\n    const filePath = path.join(collectionPath, fileName);\n    // Write the file content as binary data from base64\n    fs.writeFileSync(filePath, Buffer.from(fileContent, 'base64'));\n    let apiKey = null;\n    try {\n      apiKey = db.getApiKey(userId, \"openai\");\n    } catch {\n      apiKey = null;\n    }\n    let isLocal = false;\n    let localEmbeddingModel = \"\";\n    if (!apiKey) {\n      isLocal = true;\n      localEmbeddingModel =\n        \"HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\";\n    }\n\n    if (collectionId) {\n      if (db.isCollectionLocal(collectionId)) {\n        isLocal = true;\n        localEmbeddingModel = db.getCollectionLocalEmbeddingModel(collectionId);\n      }\n    }\n    db.addFileToCollection(userId, collectionId, fileName);\n\n    sendProgress(\n      JSON.stringify({\n        type: \"progress\",\n        message: \"Starting file processing...\",\n        chunk: 1,\n        totalChunks: 2,\n        percent_complete: \"50%\",\n      })\n    );\n\n    const controller = new AbortController();\n\n    if (signal) {\n      signal.addEventListener(\"abort\", () => {\n        controller.abort();\n      });\n    }\n\n    const token = await getToken({ userId: userId.toString() });\n    const response = await fetch(\"http://localhost:47372/embed\", {\n      method: \"POST\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n        Authorization: `Bearer ${token}`,\n        Accept: \"text/event-stream\",\n      },\n      body: JSON.stringify({\n        file_path: filePath,\n        api_key: apiKey,\n        user: userId,\n        collection: collectionId,\n        collection_name: collectionName,\n        is_local: isLocal,\n        local_embedding_model: localEmbeddingModel,\n      }),\n      signal: controller.signal,\n    });\n\n    const reader = response.body?.getReader();\n    if (!reader) throw new Error(\"Failed to get response reader\");\n\n    const decoder = new TextDecoder();\n    let buffer = \"\";\n\n    while (true) {\n      const { done, value } = await reader.read();\n      if (done) break;\n\n      if (signal?.aborted || controller.signal.aborted) {\n        reader.cancel();\n        sendProgress(\n          JSON.stringify({\n            type: \"error\",\n            message: \"Operation cancelled\",\n          })\n        );\n        return { success: false, error: \"Operation cancelled\" };\n      }\n\n      buffer += decoder.decode(value, { stream: true });\n      const messages = buffer.split(\"\\n\\n\");\n      buffer = messages.pop() || \"\";\n\n      for (const message of messages) {\n        if (message.trim()) {\n          sendProgress(message);\n        }\n      }\n    }\n\n    if (buffer.trim()) {\n      sendProgress(buffer);\n    }\n\n    return { success: true, filePath };\n  } catch (error) {\n    console.error(\"[NEW_FILE] Error adding file to collection:\", error);\n    const windows = BrowserWindow.getAllWindows();\n    const mainWindow = windows[0];\n\n    if (error instanceof Error && error.name === \"AbortError\") {\n      mainWindow?.webContents.send(\"ingest-progress\", {\n        status: \"error\",\n        data: {\n          message: \"Operation cancelled\",\n        },\n      });\n      return { success: false, error: \"Operation cancelled\" };\n    }\n\n    mainWindow?.webContents.send(\"ingest-progress\", {\n      status: \"error\",\n      data: {\n        message: error instanceof Error ? error.message : \"Unknown error\",\n      },\n    });\n    return { success: false, error: \"Failed to add file to collection\" };\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/storage/openCollectionFolder.ts",
    "content": "import { shell, app } from \"electron\";\nimport path from \"path\";\n\nexport const openCollectionFolder = (filepath: string) => {\n  const collectionPath = path.dirname(filepath);\n  shell.openPath(collectionPath);\n};\n\nexport const openCollectionFolderFromFileExplorer = (filepath: string) => {\n  console.log(\"Opening collection folder:\", filepath);\n  const basePath =\n    process.platform === \"linux\" ? app.getPath(\"userData\") : app.getAppPath();\n  const fullPath = path.join(basePath, \"..\", \"FileCollections\", filepath);\n  const collectionPath = path.dirname(fullPath);\n  console.log(\"Collection path:\", collectionPath);\n  shell.openPath(collectionPath);\n  return { filepath };\n};\n"
  },
  {
    "path": "Frontend/src/electron/storage/removeFileorFolder.ts",
    "content": "import fs from \"fs\";\nimport path from \"path\";\nimport { app } from \"electron\";\n\nexport function removeFileorFolder(payload: {\n  userId: number;\n  userName: string;\n  file: string;\n}): Promise<{\n  userId: number;\n  userName: string;\n  file: string;\n  success: boolean;\n}> {\n  try {\n    const userPath = path.join(\n      process.platform === \"linux\" ? app.getPath(\"userData\") : app.getAppPath(),\n      \"..\",\n      \"FileCollections\",\n      payload.userId.toString() + \"_\" + payload.userName\n    );\n\n    // Remove the user identifier prefix from the file path if it exists\n    const filePath = payload.file.replace(\n      new RegExp(`^${payload.userId}_${payload.userName}/`),\n      \"\"\n    );\n\n    const fullPath = path.join(userPath, filePath);\n    const normalizedFullPath = path.normalize(fullPath);\n    const normalizedUserPath = path.normalize(userPath);\n\n    // Security check: ensure the target path is within the user's directory\n    if (!normalizedFullPath.startsWith(normalizedUserPath)) {\n      throw new Error(\"Invalid file path\");\n    }\n\n    if (!fs.existsSync(fullPath)) {\n      return Promise.resolve({ ...payload, success: false });\n    }\n\n    fs.rmSync(fullPath, { recursive: true, force: true });\n\n    // Verify the file/folder was actually removed\n    if (fs.existsSync(fullPath)) {\n      throw new Error(\"Failed to remove file or folder\");\n    }\n\n    return Promise.resolve({ ...payload, success: true });\n  } catch (error) {\n    console.error(\"Error removing file or folder:\", error);\n    return Promise.resolve({ ...payload, success: false });\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/storage/renameFile.ts",
    "content": "import fs from \"fs\";\nimport path from \"path\";\nimport { app } from \"electron\";\n\nexport function renameFile(payload: {\n  userId: number;\n  userName: string;\n  file: string;\n  newName: string;\n}): Promise<{\n  userId: number;\n  userName: string;\n  file: string;\n  newName: string;\n  success: boolean;\n}> {\n  try {\n    console.log(\"Rename file payload:\", payload);\n\n    const userPath = path.join(\n      process.platform === \"linux\" ? app.getPath(\"userData\") : app.getAppPath(),\n      \"..\",\n      \"FileCollections\",\n      payload.userId.toString() + \"_\" + payload.userName\n    );\n    console.log(\"User path:\", userPath);\n\n    // Remove the user identifier prefix from the file path if it exists\n    const filePath = payload.file.replace(\n      new RegExp(`^${payload.userId}_${payload.userName}/`),\n      \"\"\n    );\n\n    // The file path should already include the collection directory\n    const oldPath = path.join(userPath, filePath);\n    const newPath = path.join(path.dirname(oldPath), payload.newName);\n\n    console.log(\"Old path:\", oldPath);\n    console.log(\"New path:\", newPath);\n\n    // Security check: ensure both paths are within the user's directory\n    const normalizedOldPath = path.normalize(oldPath);\n    const normalizedNewPath = path.normalize(newPath);\n    const normalizedUserPath = path.normalize(userPath);\n\n    if (\n      !normalizedOldPath.startsWith(normalizedUserPath) ||\n      !normalizedNewPath.startsWith(normalizedUserPath)\n    ) {\n      console.error(\"Invalid file path - security check failed\");\n      return Promise.resolve({ ...payload, success: false });\n    }\n\n    // Check if source exists and destination doesn't\n    if (!fs.existsSync(oldPath)) {\n      console.error(\"Source file does not exist:\", oldPath);\n      return Promise.resolve({ ...payload, success: false });\n    }\n\n    if (fs.existsSync(newPath)) {\n      console.error(\"Destination already exists:\", newPath);\n      return Promise.resolve({ ...payload, success: false });\n    }\n\n    // Create the directory if it doesn't exist\n    const newDir = path.dirname(newPath);\n    if (!fs.existsSync(newDir)) {\n      fs.mkdirSync(newDir, { recursive: true });\n    }\n\n    // Perform the rename\n    fs.renameSync(oldPath, newPath);\n\n    // Verify the rename was successful\n    if (!fs.existsSync(newPath)) {\n      console.error(\"Rename operation failed - new file does not exist\");\n      return Promise.resolve({ ...payload, success: false });\n    }\n\n    return Promise.resolve({ ...payload, success: true });\n  } catch (error) {\n    console.error(\"Error renaming file:\", error);\n    return Promise.resolve({ ...payload, success: false });\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/storage/websiteFetch.ts",
    "content": "import { chromium } from \"playwright\";\n\nimport fs from \"fs\";\nimport path from \"path\";\nimport { app, BrowserWindow } from \"electron\";\nimport db from \"../db.js\";\nimport { getToken } from \"../authentication/token.js\";\n\ninterface PythonProgressData {\n  type: string;\n  message: string;\n  chunk?: number;\n  totalChunks?: number;\n  percent_complete?: string;\n}\n\ninterface ProgressData {\n  status: string;\n  data: {\n    message: string;\n    chunk?: number;\n    total_chunks?: number;\n    percent_complete?: string;\n  };\n}\n\nexport async function websiteFetch(payload: {\n  url: string;\n  userId: number;\n  userName: string;\n  collectionId: number;\n  collectionName: string;\n  signal?: AbortSignal;\n}): Promise<{\n  success: boolean;\n  content?: string;\n  textContent?: string;\n  metadata?: {\n    title: string;\n    description: string;\n    author: string;\n    keywords: string;\n    ogImage: string;\n  };\n  error?: string;\n  url: string;\n  filePath?: string;\n}> {\n  try {\n    const windows = BrowserWindow.getAllWindows();\n    const mainWindow = windows[0];\n\n    const sendProgress = (data: string) => {\n      try {\n        if (typeof data === \"string\") {\n          const lines = data.split(\"\\n\");\n          for (const line of lines) {\n            if (line.trim()) {\n              const jsonStr = line.replace(/^data:\\s*/, \"\").trim();\n              if (jsonStr) {\n                try {\n                  // Convert Python-style single quotes to double quotes for JSON parsing\n                  const formattedJson = jsonStr\n                    .replace(/'/g, '\"')\n                    // Handle nested quotes in message strings\n                    .replace(/\"([^\"]*)'([^']*)'([^\"]*)\"/, '\"$1\\\\\"$2\\\\\"$3\"');\n                  const parsedData = JSON.parse(\n                    formattedJson\n                  ) as PythonProgressData;\n\n                  const progressData: ProgressData = {\n                    status: parsedData.type || \"progress\",\n                    data: {\n                      message: parsedData.message,\n                      chunk: parsedData.chunk,\n                      total_chunks: parsedData.totalChunks,\n                      percent_complete: parsedData.percent_complete,\n                    },\n                  };\n\n                  mainWindow?.webContents.send(\"ingest-progress\", progressData);\n                } catch (parseError) {\n                  console.error(\n                    \"[WEBSITE_FETCH] JSON parse error:\",\n                    parseError\n                  );\n                  console.error(\n                    \"[WEBSITE_FETCH] Failed to parse data:\",\n                    jsonStr\n                  );\n                }\n              }\n            }\n          }\n        } else {\n          mainWindow?.webContents.send(\"ingest-progress\", data);\n        }\n      } catch (error) {\n        console.error(\"[WEBSITE_FETCH] Error in sendProgress:\", error);\n        console.error(\"[WEBSITE_FETCH] Problematic data:\", data);\n        mainWindow?.webContents.send(\"ingest-progress\", {\n          status: \"error\",\n          data: {\n            message: \"Error processing progress update\",\n          },\n        });\n      }\n    };\n\n    let apiKey = null;\n    try {\n      apiKey = db.getApiKey(payload.userId, \"openai\");\n    } catch {\n      apiKey = null;\n    }\n    let isLocal = false;\n    let localEmbeddingModel = \"\";\n    if (!apiKey) {\n      isLocal = true;\n      localEmbeddingModel =\n        \"HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\";\n    }\n    if (payload.collectionId) {\n      if (db.isCollectionLocal(payload.collectionId)) {\n        isLocal = true;\n        localEmbeddingModel = db.getCollectionLocalEmbeddingModel(\n          payload.collectionId\n        );\n      }\n    }\n\n    const browser = await chromium.launch({\n      headless: true,\n      executablePath:\n        process.platform === \"win32\"\n          ? \"C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\"\n          : process.platform === \"linux\"\n          ? \"/usr/bin/google-chrome\"\n          : \"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome\",\n    });\n\n    const context = await browser.newContext({\n      userAgent:\n        \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36\",\n      viewport: { width: 1920, height: 1080 },\n      deviceScaleFactor: 1,\n      isMobile: false,\n      hasTouch: false,\n      javaScriptEnabled: true,\n      bypassCSP: true,\n      ignoreHTTPSErrors: true,\n    });\n    sendProgress(\n      JSON.stringify({\n        type: \"progress\",\n        message: \"Launching browser...\",\n        chunk: 1,\n        totalChunks: 4,\n        percent_complete: \"25%\",\n      })\n    );\n\n    const page = await context.newPage();\n\n    sendProgress(\n      JSON.stringify({\n        type: \"progress\",\n        message: \"Navigating to website...\",\n        chunk: 2,\n        totalChunks: 4,\n        percent_complete: \"50%\",\n      })\n    );\n\n    await page.goto(payload.url, {\n      waitUntil: \"networkidle\",\n      timeout: 30000,\n    });\n    await page.waitForSelector(\"body\");\n\n    const metadata = await page.evaluate((url) => {\n      const getMetaContent = (name: string): string => {\n        const element = document.querySelector(\n          `meta[name=\"${name}\"], meta[property=\"${name}\"]`\n        );\n        return element ? (element as HTMLMetaElement).content : \"\";\n      };\n\n      return {\n        title: document.title,\n        source: url,\n        description:\n          getMetaContent(\"description\") || getMetaContent(\"og:description\"),\n        author: getMetaContent(\"author\"),\n        keywords: getMetaContent(\"keywords\"),\n        ogImage: getMetaContent(\"og:image\"),\n      };\n    }, payload.url);\n\n    sendProgress(\n      JSON.stringify({\n        type: \"progress\",\n        message: \"Extracting content...\",\n        chunk: 3,\n        totalChunks: 4,\n        percent_complete: \"75%\",\n      })\n    );\n\n    const textContent = await page.evaluate(() => {\n      const scripts = document.getElementsByTagName(\"script\");\n      const styles = document.getElementsByTagName(\"style\");\n      Array.from(scripts).forEach((script) => script.remove());\n      Array.from(styles).forEach((style) => style.remove());\n      return document.body.innerText;\n    });\n\n    await browser.close();\n\n    const collectionPath = path.join(\n      process.platform === \"linux\" ? app.getPath(\"userData\") : app.getAppPath(),\n      \"..\",\n      \"FileCollections\",\n      payload.userId.toString() + \"_\" + payload.userName,\n      payload.collectionId.toString() + \"_\" + payload.collectionName\n    );\n\n    if (!fs.existsSync(collectionPath)) {\n      fs.mkdirSync(collectionPath, { recursive: true });\n    }\n\n    const fileName = `${new URL(payload.url).hostname}_${Date.now()}.txt`;\n    const filePath = path.join(collectionPath, fileName);\n    fs.writeFileSync(filePath, textContent);\n\n    db.addFileToCollection(payload.userId, payload.collectionId, fileName);\n\n    sendProgress(\n      JSON.stringify({\n        type: \"progress\",\n        message: \"Starting file processing...\",\n        chunk: 4,\n        totalChunks: 4,\n        percent_complete: \"90%\",\n      })\n    );\n\n    const controller = new AbortController();\n\n    if (payload.signal) {\n      payload.signal.addEventListener(\"abort\", () => {\n        controller.abort();\n      });\n    }\n\n    const token = await getToken({ userId: payload.userId.toString() });\n    const response = await fetch(\"http://localhost:47372/embed\", {\n      method: \"POST\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n        Accept: \"text/event-stream\",\n        Authorization: `Bearer ${token}`,\n      },\n      body: JSON.stringify({\n        file_path: filePath,\n        metadata: metadata,\n        api_key: apiKey,\n        user: payload.userId,\n        collection: payload.collectionId,\n        collection_name: payload.collectionName,\n        is_local: isLocal,\n        local_embedding_model: localEmbeddingModel,\n      }),\n      signal: controller.signal,\n    });\n\n    const reader = response.body?.getReader();\n    if (!reader) throw new Error(\"Failed to get response reader\");\n\n    const decoder = new TextDecoder();\n    let buffer = \"\";\n\n    while (true) {\n      const { done, value } = await reader.read();\n      if (done) break;\n\n      if (payload.signal?.aborted || controller.signal.aborted) {\n        reader.cancel();\n        sendProgress(\n          JSON.stringify({\n            type: \"error\",\n            message: \"Operation cancelled\",\n          })\n        );\n        return {\n          success: false,\n          error: \"Operation cancelled\",\n          url: payload.url,\n        };\n      }\n\n      buffer += decoder.decode(value, { stream: true });\n      const messages = buffer.split(\"\\n\\n\");\n      buffer = messages.pop() || \"\";\n\n      for (const message of messages) {\n        if (message.trim()) {\n          sendProgress(message);\n        }\n      }\n    }\n\n    if (buffer.trim()) {\n      sendProgress(buffer);\n    }\n\n    return {\n      success: true,\n      textContent,\n      metadata,\n      url: payload.url,\n      filePath,\n    };\n  } catch (error) {\n    console.error(\"[WEBSITE_FETCH] Error in website fetch:\", error);\n    const windows = BrowserWindow.getAllWindows();\n    const mainWindow = windows[0];\n\n    mainWindow?.webContents.send(\"ingest-progress\", {\n      status: \"error\",\n      data: {\n        message:\n          error instanceof Error ? error.message : \"Unknown error occurred\",\n      },\n    });\n    return {\n      success: false,\n      error: error instanceof Error ? error.message : \"Unknown error occurred\",\n      url: payload.url,\n    };\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/tray.test.ts",
    "content": "import { test, expect, vi, Mock } from \"vitest\";\nimport { createTray } from \"./tray.js\";\nimport { app, BrowserWindow, Menu, MenuItemConstructorOptions, MenuItem } from \"electron\";\n\nvi.mock(\"electron\", () => ({\n  Tray: vi.fn().mockReturnValue({\n    setContextMenu: vi.fn(),\n  }),\n  app: {\n    getAppPath: vi.fn().mockReturnValue(\"/\"),\n    dock: {\n      show: vi.fn(),\n    },\n    quit: vi.fn(),\n  },\n  Menu: {\n    buildFromTemplate: vi.fn(),\n  },\n  MenuItem: vi.fn().mockImplementation((options) => ({\n    ...options,\n    commandId: 1,\n    menu: null,\n    userAccelerator: null,\n  })),\n}));\n\n// Create a mock BrowserWindow with just the methods we need\nconst mainWindow = {\n  show: vi.fn(),\n  webContents: {\n    openDevTools: vi.fn(),\n  },\n  // Add required event emitter methods\n  on: vi.fn(),\n  once: vi.fn(),\n  addListener: vi.fn(),\n  removeListener: vi.fn(),\n  removeAllListeners: vi.fn(),\n  off: vi.fn(),\n  emit: vi.fn(),\n  listenerCount: vi.fn(),\n  listeners: vi.fn(),\n  rawListeners: vi.fn(),\n  prependListener: vi.fn(),\n  prependOnceListener: vi.fn(),\n  eventNames: vi.fn(),\n} as unknown as BrowserWindow;\n\ntest(\"createTray creates tray with correct menu items\", () => {\n  createTray(mainWindow);\n  const calls = (Menu.buildFromTemplate as Mock).mock.calls;\n  const args = calls[0] as [MenuItemConstructorOptions[]];\n  const template = args[0];\n  \n  expect(template).toHaveLength(2);\n  expect(template[0].label).toEqual(\"Show\");\n  expect(template[1].label).toEqual(\"Quit\");\n\n  // Create mock objects for click handlers\n  const mockMenuItem = new MenuItem({}) as MenuItem;\n  const mockBrowserWindow = {} as BrowserWindow;\n  const mockEvent = {} as KeyboardEvent;\n\n  // Test Show menu item click\n  template[0].click?.(mockMenuItem, mockBrowserWindow, mockEvent);\n  expect(mainWindow.show).toHaveBeenCalled();\n  expect(app.dock.show).toHaveBeenCalled();\n\n  // Test Quit menu item click\n  template[1].click?.(mockMenuItem, mockBrowserWindow, mockEvent);\n  expect(app.quit).toHaveBeenCalled();\n});\n"
  },
  {
    "path": "Frontend/src/electron/tray.ts",
    "content": "import { app, Menu, Tray, BrowserWindow } from \"electron\";\nimport { getAssetsPath } from \"./pathResolver.js\";\n\nexport function createTray(mainWindow: BrowserWindow) {\n  const tray = new Tray(getAssetsPath() + \"/trayIcon.png\");\n  tray.setContextMenu(\n    Menu.buildFromTemplate([\n      {\n        label: \"Show\",\n        click: () => {\n          mainWindow.show();\n          if (app.dock) {\n            app.dock.show();\n          }\n        },\n      },\n      { label: \"Quit\", click: () => app.quit() },\n    ])\n  );\n}\n"
  },
  {
    "path": "Frontend/src/electron/tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"target\": \"ESNext\",\n    \"module\": \"NodeNext\",\n    \"strict\": true,\n    \"outDir\": \"../../dist-electron\",\n    \"skipLibCheck\": true,\n    \"types\": [\"../../types\", \"node\", \"../../types.d.ts\"],\n    \"esModuleInterop\": true,\n    \"allowSyntheticDefaultImports\": true,\n    \"resolveJsonModule\": true\n  },\n}\n"
  },
  {
    "path": "Frontend/src/electron/util.ts",
    "content": "import {\n  ipcMain,\n  IpcMainInvokeEvent,\n  WebContents,\n  WebFrameMain,\n} from \"electron\";\nimport { getUIPath } from \"./pathResolver.js\";\nimport { pathToFileURL } from \"url\";\n\nexport function isDev(): boolean {\n  return process.env.NODE_ENV === \"development\";\n}\n\nexport function ipcMainHandle<\n  Channel extends string,\n  Input extends Record<string, unknown> = Record<string, unknown>,\n  Output = unknown\n>(\n  channel: Channel,\n  handler: (event: IpcMainInvokeEvent, payload: Input) => Promise<Output>\n) {\n  ipcMain.handle(channel, handler);\n}\n\nexport function ipcMainDatabaseHandle<Key extends keyof EventPayloadMapping>(\n  key: Key,\n  handler: (\n    payload: EventPayloadMapping[Key]\n  ) => Promise<EventPayloadMapping[Key]>\n) {\n  ipcMain.handle(key, (event, payload) => {\n    validateEventFrame(event.senderFrame);\n    return handler(payload as EventPayloadMapping[Key]);\n  });\n}\n\nexport function ipcWebContentsSend<Key extends keyof EventPayloadMapping>(\n  key: Key,\n  webContents: WebContents,\n  payload?: EventPayloadMapping[Key]\n) {\n  webContents.send(key, payload);\n}\n\nexport function validateEventFrame(frame: WebFrameMain | null) {\n  if (frame === null) {\n    throw new Error(\"Sender frame is null\");\n  }\n  if (isDev() && new URL(frame.url).host === \"localhost:5131\") {\n    return;\n  }\n  if (frame.url !== pathToFileURL(getUIPath()).toString()) {\n    throw new Error(\"Malicious event\");\n  }\n}\n\nexport function ipcMainOn<Key extends keyof EventPayloadMapping>(\n  key: Key,\n  handler: (payload: EventPayloadMapping[Key]) => void\n) {\n  ipcMain.on(key, (event, payload) => {\n    validateEventFrame(event.senderFrame);\n    handler(payload);\n  });\n}\n"
  },
  {
    "path": "Frontend/src/electron/voice/audioTranscription.ts",
    "content": "import * as fs from \"fs\";\nimport * as path from \"path\";\nimport { app } from \"electron\";\nimport { getToken } from \"../authentication/token.js\";\n\nexport async function audioTranscription(audioData: Buffer, userId: number) {\n  let filepath: string | null = null;\n  try {\n    const tempDir = path.join(app.getPath(\"temp\"), \"notate-audio\");\n    if (!fs.existsSync(tempDir)) {\n      fs.mkdirSync(tempDir, { recursive: true });\n    }\n\n    const filename = `recording-${Date.now()}.wav`;\n    filepath = path.join(tempDir, filename);\n\n    // Save the file locally first\n    fs.writeFileSync(filepath, audioData);\n\n    // Create form data with the saved file\n    const formData = new FormData();\n    const file = new Blob([fs.readFileSync(filepath)], { type: \"audio/wav\" });\n    formData.append(\"audio_file\", file, filename);\n    formData.append(\"model_name\", \"base\");\n    const token = await getToken({ userId: userId.toString() });\n    const response = await fetch(\"http://localhost:47372/transcribe\", {\n      method: \"POST\",\n      headers: {\n        Authorization: `Bearer ${token}`,\n      },\n      body: formData,\n    });\n\n    if (!response.ok) {\n      throw new Error(`HTTP error! status: ${response.status}`);\n    }\n\n    const data = await response.json();\n\n    // Clean up temporary file\n    if (filepath && fs.existsSync(filepath)) {\n      fs.unlinkSync(filepath);\n    }\n\n    if (data.status === \"error\") {\n      return {\n        success: false,\n        error: data.error || \"Unknown error occurred during transcription\",\n      };\n    }\n\n    // Return the transcription data properly\n    return {\n      success: true,\n      transcription: data.text,\n      language: data.language,\n    };\n  } catch (error) {\n    console.error(\"Error in transcribeAudio:\", error);\n    // Clean up on error too\n    if (filepath && fs.existsSync(filepath)) {\n      try {\n        fs.unlinkSync(filepath);\n      } catch (cleanupError) {\n        console.error(\"Error cleaning up temporary file:\", cleanupError);\n      }\n    }\n    return {\n      success: false,\n      error: error instanceof Error ? error.message : \"Unknown error occurred\",\n    };\n  }\n}\n"
  },
  {
    "path": "Frontend/src/electron/youtube/youtubeIngest.ts",
    "content": "import { BrowserWindow } from \"electron\";\nimport db from \"../db.js\";\nimport { getToken } from \"../authentication/token.js\";\n\ninterface PythonProgressData {\n  type: string;\n  message: string;\n  chunk: number;\n  totalChunks: number;\n  percent_complete: string;\n}\n\ninterface ProgressData {\n  status: string;\n  data: {\n    message: string;\n    chunk?: number;\n    total_chunks?: number;\n    percent_complete?: string;\n  };\n}\n\nexport async function youtubeIngest(payload: {\n  url: string;\n  userId: number;\n  userName: string;\n  collectionId: number;\n  collectionName: string;\n}) {\n  try {\n    const windows = BrowserWindow.getAllWindows();\n    const mainWindow = windows[0];\n    db.addFileToCollection(payload.userId, payload.collectionId, payload.url);\n\n    const sendProgress = (data: string) => {\n      try {\n        if (typeof data === \"string\") {\n          const lines = data.split(\"\\n\");\n          for (const line of lines) {\n            if (line.trim()) {\n              const jsonStr = line.replace(/^data:\\s*/, \"\").trim();\n              if (jsonStr) {\n                try {\n                  const formattedJson = jsonStr\n                    .replace(/'/g, '\"')\n                    .replace(/\"([^\"]*)'([^']*)'([^\"]*)\"/, '\"$1\\\\\"$2\\\\\"$3\"');\n                  const parsedData = JSON.parse(\n                    formattedJson\n                  ) as PythonProgressData;\n\n                  const progressData: ProgressData = {\n                    status: parsedData.type || \"progress\",\n                    data: {\n                      message: parsedData.message,\n                      chunk: parsedData.chunk,\n                      total_chunks: parsedData.totalChunks,\n                      percent_complete: parsedData.percent_complete,\n                    },\n                  };\n\n                  mainWindow?.webContents.send(\"ingest-progress\", progressData);\n                } catch (parseError) {\n                  console.error(\n                    \"[YOUTUBE_INGEST] JSON parse error:\",\n                    parseError\n                  );\n                  console.error(\n                    \"[YOUTUBE_INGEST] Failed to parse data:\",\n                    jsonStr\n                  );\n                }\n              }\n            }\n          }\n        } else {\n          mainWindow?.webContents.send(\"ingest-progress\", data);\n        }\n      } catch (error) {\n        console.error(\"[YOUTUBE_INGEST] Error in sendProgress:\", error);\n        console.error(\"[YOUTUBE_INGEST] Problematic data:\", data);\n        mainWindow?.webContents.send(\"ingest-progress\", {\n          status: \"error\",\n          data: {\n            message: \"Error processing progress update\",\n          },\n        });\n      }\n    };\n\n    let apiKey = null;\n    try {\n      apiKey = db.getApiKey(payload.userId, \"openai\");\n    } catch {\n      apiKey = null;\n    }\n    let isLocal = false;\n    let localEmbeddingModel = \"\";\n    if (!apiKey) {\n      isLocal = true;\n      localEmbeddingModel =\n        \"HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5\";\n    }\n    if (payload.collectionId) {\n      if (db.isCollectionLocal(payload.collectionId)) {\n        isLocal = true;\n        localEmbeddingModel = db.getCollectionLocalEmbeddingModel(\n          payload.collectionId\n        );\n      }\n    }\n    const token = await getToken({ userId: payload.userId.toString() });\n    const response = await fetch(`http://localhost:47372/youtube-ingest`, {\n      method: \"POST\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n        Accept: \"text/event-stream\",\n        Authorization: `Bearer ${token}`,\n      },\n      body: JSON.stringify({\n        url: payload.url,\n        user_id: payload.userId,\n        collection_id: payload.collectionId,\n        collection_name: payload.collectionName,\n        username: payload.userName,\n        api_key: apiKey,\n        is_local: isLocal,\n        local_embedding_model: localEmbeddingModel,\n      }),\n    });\n\n    if (!response.ok) {\n      throw new Error(`Server responded with status: ${response.status}`);\n    }\n\n    const reader = response.body?.getReader();\n    if (!reader) throw new Error(\"Failed to get response reader\");\n\n    const decoder = new TextDecoder();\n    let buffer = \"\";\n\n    while (true) {\n      const { done, value } = await reader.read();\n      if (done) break;\n\n      buffer += decoder.decode(value, { stream: true });\n      const messages = buffer.split(\"\\n\\n\");\n      buffer = messages.pop() || \"\";\n\n      for (const message of messages) {\n        if (message.trim()) {\n          sendProgress(message);\n        }\n      }\n    }\n\n    if (buffer.trim()) {\n      sendProgress(buffer);\n    }\n\n    return {\n      userId: payload.userId,\n      conversationId: payload.collectionId,\n    };\n  } catch (error) {\n    console.error(\"[YOUTUBE_INGEST] Error in YouTube ingest:\", error);\n    const windows = BrowserWindow.getAllWindows();\n    const mainWindow = windows[0];\n\n    mainWindow?.webContents.send(\"ingest-progress\", {\n      status: \"error\",\n      data: {\n        message: error instanceof Error ? error.message : \"Unknown error\",\n      },\n    });\n    throw error;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/hooks/use-toast.ts",
    "content": "\"use client\";\n\n// Inspired by react-hot-toast library\nimport * as React from \"react\";\n\nimport type { ToastActionElement, ToastProps } from \"@/components/ui/toast\";\n\nconst TOAST_LIMIT = 1;\nconst TOAST_REMOVE_DELAY = 1000000;\n\ntype ToasterToast = ToastProps & {\n  id: string;\n  title?: React.ReactNode;\n  description?: React.ReactNode;\n  action?: ToastActionElement;\n};\n\nconst actionTypes = {\n  ADD_TOAST: \"ADD_TOAST\",\n  UPDATE_TOAST: \"UPDATE_TOAST\",\n  DISMISS_TOAST: \"DISMISS_TOAST\",\n  REMOVE_TOAST: \"REMOVE_TOAST\",\n} as const;\n\nlet count = 0;\n\nfunction genId() {\n  count = (count + 1) % Number.MAX_SAFE_INTEGER;\n  return count.toString();\n}\n\ntype ActionType = typeof actionTypes;\n\ntype Action =\n  | {\n      type: ActionType[\"ADD_TOAST\"];\n      toast: ToasterToast;\n    }\n  | {\n      type: ActionType[\"UPDATE_TOAST\"];\n      toast: Partial<ToasterToast>;\n    }\n  | {\n      type: ActionType[\"DISMISS_TOAST\"];\n      toastId?: ToasterToast[\"id\"];\n    }\n  | {\n      type: ActionType[\"REMOVE_TOAST\"];\n      toastId?: ToasterToast[\"id\"];\n    };\n\ninterface State {\n  toasts: ToasterToast[];\n}\n\nconst toastTimeouts = new Map<string, ReturnType<typeof setTimeout>>();\n\nconst addToRemoveQueue = (toastId: string) => {\n  if (toastTimeouts.has(toastId)) {\n    return;\n  }\n\n  const timeout = setTimeout(() => {\n    toastTimeouts.delete(toastId);\n    dispatch({\n      type: \"REMOVE_TOAST\",\n      toastId: toastId,\n    });\n  }, TOAST_REMOVE_DELAY);\n\n  toastTimeouts.set(toastId, timeout);\n};\n\nexport const reducer = (state: State, action: Action): State => {\n  switch (action.type) {\n    case \"ADD_TOAST\":\n      return {\n        ...state,\n        toasts: [action.toast, ...state.toasts].slice(0, TOAST_LIMIT),\n      };\n\n    case \"UPDATE_TOAST\":\n      return {\n        ...state,\n        toasts: state.toasts.map((t) =>\n          t.id === action.toast.id ? { ...t, ...action.toast } : t\n        ),\n      };\n\n    case \"DISMISS_TOAST\": {\n      const { toastId } = action;\n\n      // ! Side effects ! - This could be extracted into a dismissToast() action,\n      // but I'll keep it here for simplicity\n      if (toastId) {\n        addToRemoveQueue(toastId);\n      } else {\n        state.toasts.forEach((toast) => {\n          addToRemoveQueue(toast.id);\n        });\n      }\n\n      return {\n        ...state,\n        toasts: state.toasts.map((t) =>\n          t.id === toastId || toastId === undefined\n            ? {\n                ...t,\n                open: false,\n              }\n            : t\n        ),\n      };\n    }\n    case \"REMOVE_TOAST\":\n      if (action.toastId === undefined) {\n        return {\n          ...state,\n          toasts: [],\n        };\n      }\n      return {\n        ...state,\n        toasts: state.toasts.filter((t) => t.id !== action.toastId),\n      };\n  }\n};\n\nconst listeners: Array<(state: State) => void> = [];\n\nlet memoryState: State = { toasts: [] };\n\nfunction dispatch(action: Action) {\n  memoryState = reducer(memoryState, action);\n  listeners.forEach((listener) => {\n    listener(memoryState);\n  });\n}\n\ntype Toast = Omit<ToasterToast, \"id\">;\n\nfunction toast({ ...props }: Toast) {\n  const id = genId();\n\n  const update = (props: ToasterToast) =>\n    dispatch({\n      type: \"UPDATE_TOAST\",\n      toast: { ...props, id },\n    });\n  const dismiss = () => dispatch({ type: \"DISMISS_TOAST\", toastId: id });\n\n  dispatch({\n    type: \"ADD_TOAST\",\n    toast: {\n      ...props,\n      id,\n      open: true,\n      onOpenChange: (open) => {\n        if (!open) dismiss();\n      },\n    },\n  });\n\n  return {\n    id: id,\n    dismiss,\n    update,\n  };\n}\n\nfunction useToast() {\n  const [state, setState] = React.useState<State>(memoryState);\n\n  React.useEffect(() => {\n    listeners.push(setState);\n    return () => {\n      const index = listeners.indexOf(setState);\n      if (index > -1) {\n        listeners.splice(index, 1);\n      }\n    };\n  }, [state]);\n\n  return {\n    ...state,\n    toast,\n    dismiss: (toastId?: string) => dispatch({ type: \"DISMISS_TOAST\", toastId }),\n  };\n}\n\nexport { useToast, toast };\n"
  },
  {
    "path": "Frontend/src/hooks/useAppInitialization.tsx",
    "content": "import { useEffect, useCallback } from \"react\";\nimport { useView } from \"@/context/useView\";\nimport { useUser } from \"@/context/useUser\";\nimport { useSysSettings } from \"@/context/useSysSettings\";\nimport { initializeShiki } from \"@/lib/shikiHightlight\";\nimport { useLibrary } from \"@/context/useLibrary\";\nimport { fetchEmbeddingModels } from \"@/data/models\";\nimport { fetchSystemSpecs } from \"@/data/sysSpecs\";\n\nexport function useAppInitialization() {\n  const { setActiveView } = useView();\n  const {\n    activeUser,\n    setApiKeys,\n    setConversations,\n    setPrompts,\n    setActiveUser,\n    handleResetChat,\n    setFilteredConversations,\n    setIsSearchOpen,\n    setSearchTerm,\n    fetchDevAPIKeys,\n    getUserConversations,\n    fetchApiKey,\n    fetchPrompts,\n    fetchOpenRouterModels,\n    fetchAzureModels,\n    fetchCustomModels,\n    fetchTools,\n    fetchSystemTools,\n    fetchExternalOllama,\n  } = useUser();\n  const {\n    setUserCollections,\n    setSelectedCollection,\n    setOpenLibrary,\n    setOpenAddToCollection,\n    fetchCollections,\n    setEmbeddingModels,\n  } = useLibrary();\n  const {\n    setSettings,\n    setUsers,\n    setSettingsOpen,\n    checkFFMPEG,\n    setPlatform,\n    fetchSettings,\n    setSystemSpecs,\n  } = useSysSettings();\n\n  // Initial setup that doesn't depend on activeUser\n  useEffect(() => {\n    initializeShiki();\n    const fetchUsers = async () => {\n      if (window.electron && window.electron.getUsers) {\n        try {\n          const response = await window.electron.getUsers();\n          const fetchedUsers = response.users as User[];\n          setUsers(fetchedUsers);\n          if (fetchedUsers.length === 0) {\n            setActiveView(\"Signup\");\n          } else {\n            setActiveView(\"SelectAccount\");\n          }\n        } catch (error) {\n          console.error(\"Error fetching users:\", error);\n          setActiveView(\"Signup\");\n        }\n      } else {\n        console.error(\"window.electron or getUsers method is not defined\");\n        setActiveView(\"Signup\");\n      }\n    };\n    const getPlatform = async () => {\n      const plat = await window.electron.getPlatform();\n      setPlatform(plat.platform);\n    };\n    getPlatform();\n    checkFFMPEG();\n    fetchUsers();\n    fetchSystemSpecs(setSystemSpecs);\n  }, []);\n\n  // User-dependent initialization\n  useEffect(() => {\n    if (activeUser) {\n      fetchOpenRouterModels();\n      fetchSettings(activeUser);\n      getUserConversations();\n      fetchApiKey();\n      fetchPrompts();\n      fetchExternalOllama();\n      fetchEmbeddingModels(setEmbeddingModels);\n      fetchDevAPIKeys();\n      fetchCollections();\n      fetchAzureModels();\n      fetchCustomModels();\n      fetchTools();\n      fetchSystemTools();\n    }\n  }, [activeUser]);\n\n  const handleResetUserState = useCallback(() => {\n    setActiveUser(null);\n    setUserCollections([]);\n    setApiKeys([]);\n    setConversations([]);\n    setPrompts([]);\n    setSettings({});\n    setSelectedCollection(null);\n    setFilteredConversations([]);\n    setOpenLibrary(false);\n    setOpenAddToCollection(false);\n    setIsSearchOpen(false);\n    setSearchTerm(\"\");\n    setSettingsOpen(false);\n    handleResetChat();\n  }, [\n    setActiveUser,\n    setUserCollections,\n    setApiKeys,\n    setConversations,\n    setPrompts,\n    setSettings,\n    setSelectedCollection,\n    setFilteredConversations,\n    setOpenLibrary,\n    setOpenAddToCollection,\n    setIsSearchOpen,\n    setSearchTerm,\n    setSettingsOpen,\n    handleResetChat,\n  ]);\n\n  const handleViewChange = useCallback(\n    (view: View) => {\n      setActiveView(view);\n    },\n    [setActiveView]\n  );\n\n  useEffect(() => {\n    const unsubscribeReset =\n      window.electron.subscribeResetUserState(handleResetUserState);\n    const unsubscribeView =\n      window.electron.subscribeChangeView(handleViewChange);\n\n    return () => {\n      unsubscribeReset();\n      unsubscribeView();\n    };\n  }, [handleResetUserState, handleViewChange]);\n}\n"
  },
  {
    "path": "Frontend/src/hooks/useChatLogic.ts",
    "content": "import { useChatInput } from \"@/context/useChatInput\";\nimport { useUser } from \"@/context/useUser\";\nimport { useView } from \"@/context/useView\";\nimport { useEffect } from \"react\";\n\nimport { useRef } from \"react\";\n\nimport { useState } from \"react\";\n\nexport function useChatLogic() {\n  const scrollAreaRef = useRef<HTMLDivElement>(null);\n  const [resetCounter, setResetCounter] = useState(0);\n  const bottomRef = useRef<HTMLDivElement>(null);\n  const [shouldAutoScroll, setShouldAutoScroll] = useState(true);\n  const [hasUserScrolled, setHasUserScrolled] = useState(false);\n  const [showScrollButton, setShowScrollButton] = useState(false);\n\n  const { setActiveView } = useView();\n  const {\n    handleResetChat: originalHandleResetChat,\n    agentActions,\n    setAgentActions,\n    streamingMessage,\n    setStreamingMessage,\n    setStreamingMessageReasoning,\n    streamingMessageReasoning,\n    activeUser,\n    messages,\n    setMessages,\n    setCurrentRequestId,\n  } = useUser();\n\n  const { isLoading, setIsLoading } = useChatInput();\n\n  const scrollToBottom = (behavior: ScrollBehavior = \"smooth\") => {\n    const scrollElement = scrollAreaRef.current?.querySelector(\n      \"[data-radix-scroll-area-viewport]\"\n    );\n\n    if (scrollElement) {\n      scrollElement.scrollTo({\n        top: scrollElement.scrollHeight,\n        behavior,\n      });\n      setShouldAutoScroll(true);\n      setHasUserScrolled(false);\n    }\n  };\n\n  useEffect(() => {\n    if ((shouldAutoScroll || !hasUserScrolled) && messages.length > 0) {\n      const timeoutId = setTimeout(() => {\n        scrollToBottom(\"instant\");\n      }, 50);\n      return () => clearTimeout(timeoutId);\n    }\n  }, [\n    messages,\n    streamingMessage,\n    streamingMessageReasoning,\n    agentActions,\n    isLoading,\n    shouldAutoScroll,\n    hasUserScrolled,\n  ]);\n\n  // Move all the useEffects here\n  useEffect(() => {\n    if (messages.length === 0) {\n      setHasUserScrolled(false);\n      setShouldAutoScroll(true);\n    }\n  }, [messages.length]);\n\n  // Move other effects...\n\n  useEffect(() => {\n    const scrollElement = scrollAreaRef.current?.querySelector(\n      \"[data-radix-scroll-area-viewport]\"\n    );\n\n    const handleScroll = () => {\n      if (!scrollElement) return;\n\n      const { scrollTop, scrollHeight, clientHeight } = scrollElement;\n      const isNearBottom = scrollHeight - scrollTop - clientHeight < 100;\n      const needsScroll = scrollHeight > clientHeight;\n\n      setShowScrollButton(!isNearBottom && needsScroll);\n      setShouldAutoScroll(isNearBottom);\n\n      if (!hasUserScrolled && !isNearBottom) {\n        setHasUserScrolled(true);\n      }\n    };\n\n    if (scrollElement) {\n      scrollElement.addEventListener(\"scroll\", handleScroll, { passive: true });\n      // Initial check when component mounts\n      handleScroll();\n      return () => {\n        scrollElement.removeEventListener(\"scroll\", handleScroll);\n      };\n    }\n  }, [hasUserScrolled]);\n\n  useEffect(() => {\n    let newMessage: string = \"\";\n    let newReasoning: string = \"\";\n    let isSubscribed = true; // Add a flag to prevent updates after unmount\n\n    const handleMessageChunk = (chunk: string) => {\n      if (!isSubscribed) return; // Skip if component is unmounted\n      if (chunk.startsWith(\"[REASONING]:\")) {\n        newReasoning += chunk.replace(\"[REASONING]:\", \"\");\n        setStreamingMessageReasoning(newReasoning);\n      } else if (chunk.startsWith(\"[Agent]:\")) {\n        setAgentActions(chunk.replace(\"[Agent]:\", \"\"));\n      } else {\n        newMessage += chunk;\n        setStreamingMessage(newMessage);\n      }\n    };\n\n    const handleStreamEnd = () => {\n      if (!isSubscribed) return;\n\n      const finalMessage = newMessage;\n      const finalReasoning = newReasoning;\n\n      setMessages((prevMessages) => {\n        const lastMessage = prevMessages[prevMessages.length - 1];\n        if (!lastMessage || lastMessage.role === \"user\") {\n          return [\n            ...prevMessages,\n            {\n              role: \"assistant\",\n              content: finalMessage,\n              reasoning_content: finalReasoning,\n              timestamp: new Date(),\n            },\n          ];\n        } else if (lastMessage.role === \"assistant\") {\n          const updatedMessage = {\n            ...lastMessage,\n            content: finalMessage,\n            reasoning_content: finalReasoning,\n          };\n          return [...prevMessages.slice(0, -1), updatedMessage];\n        }\n        return prevMessages;\n      });\n\n      // Ensure we stay at bottom when message completes\n      if (!hasUserScrolled) {\n        requestAnimationFrame(() => {\n          if (!isSubscribed) return;\n          const scrollElement = scrollAreaRef.current?.querySelector(\n            \"[data-radix-scroll-area-viewport]\"\n          );\n          if (scrollElement) {\n            scrollElement.scrollTo({\n              top: scrollElement.scrollHeight,\n              behavior: \"instant\",\n            });\n          }\n          setStreamingMessage(\"\");\n          setStreamingMessageReasoning(\"\");\n          setIsLoading(false);\n          setCurrentRequestId(null);\n        });\n      } else {\n        // If user has scrolled, just update the state without forcing scroll\n        setStreamingMessage(\"\");\n        setStreamingMessageReasoning(\"\");\n        setIsLoading(false);\n        setCurrentRequestId(null);\n      }\n\n      newMessage = \"\";\n      newReasoning = \"\";\n    };\n\n    // Remove any existing listeners before adding new ones\n    window.electron.offMessageChunk(handleMessageChunk);\n    window.electron.offStreamEnd(handleStreamEnd);\n\n    // Add new listeners\n    window.electron.onMessageChunk(handleMessageChunk);\n    window.electron.onStreamEnd(handleStreamEnd);\n\n    return () => {\n      isSubscribed = false; // Set flag to prevent updates after unmount\n      // Clean up listeners\n      window.electron.offMessageChunk(handleMessageChunk);\n      window.electron.offStreamEnd(handleStreamEnd);\n    };\n  }, [\n    setIsLoading,\n    setMessages,\n    setStreamingMessage,\n    setStreamingMessageReasoning,\n    setAgentActions,\n    setCurrentRequestId,\n  ]);\n\n  useEffect(() => {\n    if (!activeUser) {\n      setActiveView(\"SelectAccount\");\n    }\n  }, [activeUser, setActiveView]);\n\n  const handleResetChat = async () => {\n    await originalHandleResetChat();\n    setResetCounter((c) => c + 1);\n    setHasUserScrolled(false);\n    setShouldAutoScroll(true);\n  };\n\n  return {\n    scrollAreaRef,\n    resetCounter,\n    bottomRef,\n    shouldAutoScroll,\n    hasUserScrolled,\n    showScrollButton,\n    handleResetChat,\n    scrollToBottom,\n  };\n}\n"
  },
  {
    "path": "Frontend/src/hooks/useChatManagement.ts",
    "content": "import { useCallback, useState } from \"react\";\n\nexport const useChatManagement = (\n  activeUser: User | null,\n  onChatComplete?: () => void\n) => {\n  const [messages, setMessages] = useState<Message[]>([]);\n  const [streamingMessage, setStreamingMessage] = useState<string>(\"\");\n  const [streamingMessageReasoning, setStreamingMessageReasoning] =\n    useState<string>(\"\");\n  const [agentActions, setAgentActions] = useState<string>(\"\");\n  const [isLoading, setIsLoading] = useState<boolean>(false);\n  const [currentRequestId, setCurrentRequestId] = useState<number | null>(null);\n  const [error, setError] = useState<string | null>(null);\n  const [input, setInput] = useState<string>(\"\");\n\n  const handleChatRequest = useCallback(\n    async (\n      collectionId: number | undefined,\n      suggestion?: string,\n      conversationId?: number\n    ) => {\n      if (!activeUser) return;\n      setIsLoading(true);\n      const requestId = Date.now();\n      setCurrentRequestId(requestId);\n\n      setError(null);\n      const newUserMessage = {\n        role: \"user\",\n        content: suggestion || input,\n        timestamp: new Date(),\n      } as Message;\n      setMessages((prev) => [...prev, newUserMessage]);\n      setInput(\"\");\n\n      try {\n        const result = await window.electron.chatRequest(\n          [...messages, newUserMessage],\n          activeUser,\n          conversationId,\n          collectionId,\n          undefined,\n          requestId\n        );\n\n        if (result.error) {\n          setError(result.error);\n          setIsLoading(false);\n          console.error(\"Error in chat:\", result.error);\n        }\n\n        setMessages(result.messages);\n\n        // Notify parent of chat completion\n        onChatComplete?.();\n      } catch (error) {\n        if (error instanceof Error && error.name === \"AbortError\") {\n          setError(\"Request was cancelled\");\n        } else {\n          console.error(\"Error in chat:\", error);\n        }\n      }\n    },\n    [activeUser, messages, input, onChatComplete]\n  );\n\n  const cancelRequest = useCallback(() => {\n    return new Promise<void>((resolve) => {\n      if (currentRequestId) {\n        window.electron.abortChatRequest(currentRequestId);\n        setTimeout(() => {\n          setStreamingMessage(\"\");\n          setStreamingMessageReasoning(\"\");\n          resolve();\n        }, 100);\n      } else {\n        resolve();\n      }\n    });\n  }, [currentRequestId]);\n\n  return {\n    messages,\n    setMessages,\n    streamingMessage,\n    setStreamingMessage,\n    streamingMessageReasoning,\n    setStreamingMessageReasoning,\n    isLoading,\n    setIsLoading,\n    error,\n    setError,\n    currentRequestId,\n    setCurrentRequestId,\n    handleChatRequest,\n    cancelRequest,\n    input,\n    setInput,\n    agentActions,\n    setAgentActions,\n  };\n};\n"
  },
  {
    "path": "Frontend/src/hooks/useConversationManagement.ts",
    "content": "import { useCallback, useState } from \"react\";\n\nexport const useConversationManagement = (activeUser: User | null) => {\n  const [conversations, setConversations] = useState<Conversation[]>([]);\n  const [activeConversation, setActiveConversation] = useState<number | null>(\n    null\n  );\n  const [title, setTitle] = useState<string | null>(null);\n  const [newConversation, setNewConversation] = useState<boolean>(true);\n\n  const getUserConversations = useCallback(async () => {\n    if (!window.electron || !activeUser) return;\n    const conversations = await window.electron.getUserConversations(\n      activeUser.id\n    );\n    if (conversations?.conversations) {\n      setConversations(conversations.conversations);\n    }\n  }, [activeUser]);\n\n  return {\n    conversations,\n    setConversations,\n    activeConversation,\n    setActiveConversation,\n    title,\n    setTitle,\n    newConversation,\n    setNewConversation,\n    getUserConversations,\n  };\n};\n"
  },
  {
    "path": "Frontend/src/hooks/useModelManagement.ts",
    "content": "import { useCallback, useState } from \"react\";\n\nexport const useModelManagement = (activeUser: User | null) => {\n  const [openRouterModels, setOpenRouterModels] = useState<OpenRouterModel[]>(\n    []\n  );\n  const [externalOllama, setExternalOllama] = useState<ExternalOllama[]>([]);\n  const [azureModels, setAzureModels] = useState<AzureModel[]>([]);\n  const [customModels, setCustomModels] = useState<CustomModel[]>([]);\n  const [tools, setTools] = useState<Tool[]>([]);\n  const [userTools, setUserTools] = useState<UserTool[]>([]);\n  const [systemTools, setSystemTools] = useState<Tool[]>([]);\n  const fetchOpenRouterModels = useCallback(async () => {\n    if (!window.electron || !activeUser) return;\n    const models = await window.electron.getOpenRouterModels(activeUser.id);\n    setOpenRouterModels(models.models);\n  }, [activeUser]);\n\n  const fetchExternalOllama = useCallback(async () => {\n    if (!window.electron || !activeUser) return;\n    const ollama = await window.electron.getExternalOllama(activeUser.id);\n    setExternalOllama(ollama.ollama);\n  }, [activeUser]);\n\n  const fetchAzureModels = useCallback(async () => {\n    if (!window.electron || !activeUser) return;\n    const models = await window.electron.getAzureOpenAIModels(activeUser.id);\n    setAzureModels(\n      models.models.map((m) => ({\n        ...m,\n        id: m.id,\n        deployment: m.model,\n        apiKey: m.api_key,\n      }))\n    );\n  }, [activeUser]);\n\n  const fetchTools = useCallback(async () => {\n    if (!window.electron || !activeUser) return;\n\n    // First get system tools to have the complete tool information\n    const systemToolsResult = await window.electron.getTools();\n    const systemTools = systemToolsResult.tools;\n\n    // Then get user tool settings\n    const userToolsResult = await window.electron.getUserTools(activeUser.id);\n    const userToolSettings = userToolsResult.tools;\n\n    // Join the user tool settings with system tool information\n    const completeUserTools = userToolSettings\n      .map((userTool) => {\n        const systemTool = systemTools.find((st) => st.id === userTool.id);\n        if (!systemTool) return null;\n\n        return {\n          id: userTool.id,\n          name: systemTool.name,\n          description: systemTool.description,\n          enabled: userTool.enabled,\n          docked: Number(userTool.docked),\n        };\n      })\n      .filter((tool): tool is NonNullable<typeof tool> => tool !== null);\n\n    setUserTools(completeUserTools);\n  }, [activeUser]);\n\n  const fetchSystemTools = useCallback(async () => {\n    if (!window.electron || !activeUser) return;\n    const tools = await window.electron.getTools();\n    setSystemTools(tools.tools);\n  }, [activeUser]);\n\n  const toggleTool = (tool: UserTool) => {\n    if (!activeUser) return;\n    const existingTool = userTools.find((t) => t.id === tool.id);\n\n    if (existingTool) {\n      setUserTools((prev) =>\n        prev.map((t) =>\n          t.id === tool.id ? { ...t, enabled: t.enabled === 1 ? 0 : 1 } : t\n        )\n      );\n      window.electron.updateUserTool(\n        activeUser.id,\n        tool.id,\n        existingTool.enabled === 1 ? 0 : 1,\n        1\n      );\n    } else {\n      setUserTools((prev) => [\n        ...prev,\n        {\n          ...tool,\n          enabled: 1,\n          docked: 1,\n        },\n      ]);\n      window.electron.updateUserTool(activeUser.id, tool.id, 1, 1);\n    }\n  };\n\n  const dockTool = (tool: UserTool) => {\n    if (!activeUser) return;\n    const existingTool = userTools.find((t) => t.name === tool.name);\n\n    if (existingTool) {\n      setUserTools((prev) => prev.filter((t) => t.name !== tool.name));\n      window.electron.updateUserTool(activeUser.id, tool.id, 0, 0);\n    } else {\n      const newTool = {\n        ...tool,\n        enabled: 1,\n        docked: 1,\n      };\n      setUserTools((prev) => [...prev, newTool]);\n      window.electron.updateUserTool(activeUser.id, tool.id, 1, 1);\n    }\n  };\n\n  const fetchCustomModels = useCallback(async () => {\n    if (!window.electron || !activeUser) return;\n    const models = await window.electron.getCustomAPIs(activeUser.id);\n    setCustomModels(models.api);\n  }, [activeUser]);\n\n  return {\n    openRouterModels,\n    setOpenRouterModels,\n    azureModels,\n    setAzureModels,\n    customModels,\n    setCustomModels,\n    fetchOpenRouterModels,\n    fetchAzureModels,\n    fetchCustomModels,\n    tools,\n    setTools,\n    dockTool,\n    fetchTools,\n    systemTools,\n    setSystemTools,\n    fetchSystemTools,\n    userTools,\n    setUserTools,\n    toggleTool,\n    externalOllama,\n    setExternalOllama,\n    fetchExternalOllama,\n  };\n};\n"
  },
  {
    "path": "Frontend/src/hooks/useStatistics.tsx",
    "content": "import { useEffect, useState } from \"react\";\n\nexport default function useStatistics(dataPointCount: number): Statistics[] {\n  const [value, setValue] = useState<Statistics[]>([]);\n  useEffect(() => {\n    const unsub = window.electron.subscribeStatistics((stats: Statistics) => {\n      setValue((prev) => {\n        const newData = [...prev, stats];\n        if (newData.length > dataPointCount) {\n          newData.shift();\n        }\n        return newData;\n      });\n    });\n    return unsub;\n  }, []);\n  return value;\n}\n"
  },
  {
    "path": "Frontend/src/hooks/useUIState.ts",
    "content": "import { useEffect, useRef, useState } from \"react\";\nexport const useUIState = () => {\n  const [isSearchOpen, setIsSearchOpen] = useState<boolean>(false);\n  const [searchTerm, setSearchTerm] = useState<string>(\"\");\n  const searchRef = useRef<HTMLDivElement>(null);\n  const [alertForUser, setAlertForUser] = useState<boolean>(false);\n\n  useEffect(() => {\n    function handleClickOutside(event: MouseEvent) {\n      if (\n        searchRef.current &&\n        !searchRef.current.contains(event.target as Node)\n      ) {\n        setIsSearchOpen(false);\n        setSearchTerm(\"\");\n      }\n    }\n\n    document.addEventListener(\"mousedown\", handleClickOutside);\n    return () => {\n      document.removeEventListener(\"mousedown\", handleClickOutside);\n    };\n  }, []);\n\n  return {\n    isSearchOpen,\n    setIsSearchOpen,\n    searchTerm,\n    setSearchTerm,\n    searchRef,\n    alertForUser,\n    setAlertForUser,\n  };\n};\n"
  },
  {
    "path": "Frontend/src/lib/shikiHightlight.ts",
    "content": "import { createHighlighter, Highlighter } from \"shiki\";\n\nlet highlighter: Highlighter | null = null;\nexport async function initializeShiki() {\n  highlighter = await createHighlighter({\n    themes: [\"github-dark-dimmed\"],\n    langs: [\n      \"javascript\",\n      \"typescript\",\n      \"python\",\n      \"html\",\n      \"css\",\n      \"json\",\n      \"bash\",\n      \"java\",\n      \"c\",\n      \"cpp\",\n      \"csharp\",\n      \"go\",\n      \"rust\",\n      \"ruby\",\n      \"php\",\n      \"swift\",\n      \"kotlin\",\n      \"sql\",\n      \"yaml\",\n      \"xml\",\n      \"markdown\",\n      \"shell\",\n      \"dockerfile\",\n      \"json\",\n      \"yaml\",\n      \"xml\",\n      \"markdown\",\n      \"shell\",\n      \"dockerfile\",\n      \"powershell\",\n      \"sql\",\n      \"yaml\",\n      \"json\",\n      \"markdown\",\n      \"shell\",\n      \"dockerfile\",\n    ],\n  });\n}\n\nexport function highlightCode(code: string, language: string): string {\n  if (language === \"math\") {\n    language = \"python\";\n  }\n  if (!highlighter) {\n    console.error(\"Shiki highlighter not initialized\");\n    return code;\n  }\n  try {\n    // Use plaintext as fallback for unsupported languages\n    const lang = language.toLowerCase();\n    return highlighter.codeToHtml(code, {\n      lang: highlighter.getLoadedLanguages().includes(lang)\n        ? lang\n        : \"plaintext\",\n      theme: \"github-dark-dimmed\",\n    });\n  } catch (error) {\n    console.error(\"Error highlighting code:\", error);\n    return code;\n  }\n}\n"
  },
  {
    "path": "Frontend/src/lib/utils.ts",
    "content": "import { clsx, type ClassValue } from \"clsx\";\nimport { twMerge } from \"tailwind-merge\";\n\nexport function cn(...inputs: ClassValue[]) {\n  return twMerge(clsx(inputs));\n}\n\nexport function formatDate(input: Date | string): string {\n  const date = input instanceof Date ? input : new Date(input);\n\n  if (isNaN(date.getTime())) {\n    throw new Error(\"Invalid date input\");\n  }\n\n  return new Intl.DateTimeFormat(\"en-US\", {\n    hour: \"numeric\",\n    minute: \"numeric\",\n    hour12: true,\n  }).format(date);\n}\n\nexport async function sanitizeStoreName(name: string) {\n  return name\n    .replace(/\\s+/g, \"_\")\n    .replace(/[^a-zA-Z0-9_-]/g, \"\")\n    .replace(/^[^a-zA-Z0-9]+|[^a-zA-Z0-9]+$/g, \"\")\n    .replace(/_+/g, \"_\")\n    .toLowerCase();\n}\n\nexport const getYouTubeLink = (source: string, startTime?: number) => {\n  if (!source.includes(\"youtube.com\") && !source.includes(\"youtu.be\"))\n    return source;\n\n  // Remove any existing timestamp\n  const cleanUrl = source.replace(/[&?]t=\\d+s?/, \"\");\n\n  // If there's a timestamp, add it to the URL\n  if (startTime) {\n    if (cleanUrl.includes(\"?\")) {\n      return `${cleanUrl}&t=${Math.floor(startTime)}`;\n    }\n    return `${cleanUrl}?t=${Math.floor(startTime)}`;\n  }\n  return cleanUrl;\n};\n\nexport const formatTimestamp = (seconds: number) => {\n  const hours = Math.floor(seconds / 3600);\n  const minutes = Math.floor((seconds % 3600) / 60);\n  const remainingSeconds = Math.floor(seconds % 60);\n\n  if (hours > 0) {\n    return `${hours}:${minutes.toString().padStart(2, \"0\")}:${remainingSeconds\n      .toString()\n      .padStart(2, \"0\")}`;\n  }\n  return `${minutes}:${remainingSeconds.toString().padStart(2, \"0\")}`;\n};\n\nexport const getFileName = (source: string) => {\n  try {\n    return source.split(\"/\").pop();\n  } catch (error) {\n    console.error(error);\n    return source;\n  }\n};\n\nexport const processFiles = (\n  files: string | { files: string } | string[] | unknown\n): string[] => {\n  if (typeof files === \"string\") {\n    return files.split(\",\").filter(Boolean);\n  }\n  if (\n    typeof files === \"object\" &&\n    files !== null &&\n    \"files\" in files &&\n    typeof files.files === \"string\"\n  ) {\n    return files.files.split(\",\").filter(Boolean);\n  }\n  if (Array.isArray(files)) {\n    return files;\n  }\n  return [];\n};\n\n\n"
  },
  {
    "path": "Frontend/src/loading.html",
    "content": "<!DOCTYPE html>\n<html>\n  <head>\n    <title>Loading</title>\n    <script src=\"https://cdn.tailwindcss.com\"></script>\n    <script>\n      tailwind.config = {\n        theme: {\n          extend: {\n            animation: {\n              orbit: \"orbit calc(var(--duration)*1s) linear infinite\",\n            },\n            keyframes: {\n              orbit: {\n                \"0%\": {\n                  transform:\n                    \"rotate(calc(var(--angle) * 1deg)) translateX(calc(var(--radius) * 1px)) rotate(calc(var(--angle) * -1deg))\",\n                },\n                \"100%\": {\n                  transform:\n                    \"rotate(calc(var(--angle) * 1deg + 360deg)) translateX(calc(var(--radius) * 1px)) rotate(calc((var(--angle) * -1deg) - 360deg))\",\n                },\n              },\n            },\n          },\n        },\n      };\n    </script>\n    <style>\n      :root {\n        --background: 187 36.4% 4.48%;\n        --foreground: 187 5.6% 97.8%;\n        --muted: 187 28% 16.8%;\n        --primary: 187 56% 56%;\n        --secondary: 187 28% 16.8%;\n        --border: 187 28% 16.8%;\n        --window-radius: 12px;\n      }\n\n      @keyframes writing {\n        0% {\n          transform: translateX(-20px) rotate(-5deg);\n          opacity: 0;\n        }\n        100% {\n          transform: translateX(0) rotate(0deg);\n          opacity: 1;\n        }\n      }\n      @keyframes float {\n        0%,\n        100% {\n          transform: translateY(0);\n        }\n        50% {\n          transform: translateY(-10px);\n        }\n      }\n      @keyframes fadeIn {\n        from {\n          opacity: 0;\n          transform: translateY(10px);\n        }\n        to {\n          opacity: 1;\n          transform: translateY(0);\n        }\n      }\n      body {\n        -webkit-app-region: drag;\n        background-color: hsl(var(--background));\n        color: hsl(var(--foreground));\n        margin: 0;\n        padding: 0;\n        height: 100vh;\n        width: 100vw;\n        overflow: hidden;\n        border-radius: var(--window-radius);\n      }\n      .no-drag {\n        -webkit-app-region: no-drag;\n      }\n      .status-container {\n        height: 3em;\n        overflow: hidden;\n      }\n      .failure-message {\n        display: none;\n      }\n      .failure-message.show {\n        display: block;\n        animation: fadeIn 0.5s ease-out forwards;\n      }\n      .orbit-container {\n        position: relative;\n        width: 320px;\n        height: 320px;\n      }\n      .orbit-circle {\n        position: absolute;\n        inset: 0;\n      }\n      .orbit-dot {\n        position: absolute;\n        left: 50%;\n        top: 50%;\n        transform-origin: center;\n        margin-left: calc(var(--icon-size) / -2);\n        margin-top: calc(var(--icon-size) / -2);\n      }\n      .glass-panel {\n        backdrop-filter: blur(12px);\n        border: 1px solid hsl(var(--border) / 0.5);\n        box-shadow: 0 20px 25px -5px rgb(0 0 0 / 0.1);\n        border-radius: var(--window-radius);\n        overflow: hidden;\n        background: linear-gradient(to bottom right, hsl(var(--secondary) / 0.5), hsl(var(--secondary) / 0.3), hsl(var(--background)));\n      }\n      .progress-bar {\n        background-color: hsl(var(--muted));\n        border-radius: 9999px;\n        height: 0.625rem;\n      }\n      .progress-bar-fill {\n        background-color: hsl(var(--primary));\n        height: 100%;\n        border-radius: 9999px;\n        transition: width 300ms;\n      }\n    </style>\n  </head>\n  <body class=\"h-screen flex items-center justify-center p-6 bg-background\">\n    <div class=\"glass-panel p-8 w-[480px] text-center\">\n      <div class=\"flex justify-center\">\n        <div class=\"orbit-container mb-8 relative\">\n          <h2\n            class=\"text-4xl font-semibold text-[#ffffff] absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2 z-10\"\n          >\n            Notate\n          </h2>\n          <svg\n            xmlns=\"http://www.w3.org/2000/svg\"\n            version=\"1.1\"\n            class=\"orbit-circle pointer-events-none size-full\"\n            viewBox=\"0 0 320 320\"\n          >\n            <circle\n              class=\"stroke-white/10\"\n              cx=\"160\"\n              cy=\"160\"\n              r=\"120\"\n              fill=\"none\"\n              stroke-width=\"1\"\n            />\n          </svg>\n\n          <!-- Orbiting Icons -->\n          <div\n            style=\"\n              --duration: 20;\n              --radius: 120;\n              --angle: 70;\n              --icon-size: 24px;\n            \"\n            class=\"orbit-dot flex size-[var(--icon-size)] transform-gpu animate-orbit items-center justify-center\"\n          >\n            <div class=\"flex items-center justify-center\">\n              <svg\n                class=\"size-14 pl-3 pt-2\"\n                style=\"color: #00ff9d\"\n                fill=\"currentColor\"\n                fill-rule=\"evenodd\"\n                viewBox=\"0 0 24 24\"\n                xmlns=\"http://www.w3.org/2000/svg\"\n              >\n                <path\n                  d=\"M13.85 7.6 13 6.77 8.35 2.15a.48.48 0 0 0-.7 0L3 6.72l-.83.82a.5.5 0 1 0 .7.71v5.2a.5.5 0 0 0 .5.5h9.29a.5.5 0 0 0 .5-.5V8.3a.52.52 0 0 0 .35.14.51.51 0 0 0 .36-.15.49.49 0 0 0-.02-.69ZM6.38 13V8.92h3V13Zm5.76 0H10.4V8.42a.51.51 0 0 0-.5-.5h-4a.51.51 0 0 0-.5.5V13H3.85V7.31L8 3.2l4.14 4.11Z\"\n                ></path>\n              </svg>\n            </div>\n          </div>\n          <div\n            style=\"\n              --duration: 20;\n              --radius: 120;\n              --angle: 140;\n              --icon-size: 24px;\n            \"\n            class=\"orbit-dot flex size-[var(--icon-size)] transform-gpu animate-orbit items-center justify-center\"\n          >\n            <div class=\"flex items-center justify-center\">\n              <svg\n                class=\"size-6\"\n                style=\"color: #ff6b6b\"\n                fill=\"currentColor\"\n                fill-rule=\"evenodd\"\n                viewBox=\"0 0 24 24\"\n                xmlns=\"http://www.w3.org/2000/svg\"\n              >\n                <title>Ollama</title>\n                <path\n                  d=\"M7.905 1.09c.216.085.411.225.588.41.295.306.544.744.734 1.263.191.522.315 1.1.362 1.68a5.054 5.054 0 012.049-.636l.051-.004c.87-.07 1.73.087 2.48.474.101.053.2.11.297.17.05-.569.172-1.134.36-1.644.19-.52.439-.957.733-1.264a1.67 1.67 0 01.589-.41c.257-.1.53-.118.796-.042.401.114.745.368 1.016.737.248.337.434.769.561 1.287.23.934.27 2.163.115 3.645l.053.04.026.019c.757.576 1.284 1.397 1.563 2.35.435 1.487.216 3.155-.534 4.088l-.018.021.002.003c.417.762.67 1.567.724 2.4l.002.03c.064 1.065-.2 2.137-.814 3.19l-.007.01.01.024c.472 1.157.62 2.322.438 3.486l-.006.039a.651.651 0 01-.747.536.648.648 0 01-.54-.742c.167-1.033.01-2.069-.48-3.123a.643.643 0 01.04-.617l.004-.006c.604-.924.854-1.83.8-2.72-.046-.779-.325-1.544-.8-2.273a.644.644 0 01.18-.886l.009-.006c.243-.159.467-.565.58-1.12a4.229 4.229 0 00-.095-1.974c-.205-.7-.58-1.284-1.105-1.683-.595-.454-1.383-.673-2.38-.61a.653.653 0 01-.632-.371c-.314-.665-.772-1.141-1.343-1.436a3.288 3.288 0 00-1.772-.332c-1.245.099-2.343.801-2.67 1.686a.652.652 0 01-.61.425c-1.067.002-1.893.252-2.497.703-.522.39-.878.935-1.066 1.588a4.07 4.07 0 00-.068 1.886c.112.558.331 1.02.582 1.269l.008.007c.212.207.257.53.109.785-.36.622-.629 1.549-.673 2.44-.05 1.018.186 1.902.719 2.536l.016.019a.643.643 0 01.095.69c-.576 1.236-.753 2.252-.562 3.052a.652.652 0 01-1.269.298c-.243-1.018-.078-2.184.473-3.498l.014-.035-.008-.012a4.339 4.339 0 01-.598-1.309l-.005-.019a5.764 5.764 0 01-.177-1.785c.044-.91.278-1.842.622-2.59l.012-.026-.002-.002c-.293-.418-.51-.953-.63-1.545l-.005-.024a5.352 5.352 0 01.093-2.49c.262-.915.777-1.701 1.536-2.269.06-.045.123-.09.186-.132-.159-1.493-.119-2.73.112-3.67.127-.518.314-.95.562-1.287.27-.368.614-.622 1.015-.737.266-.076.54-.059.797.042z\"\n                ></path>\n              </svg>\n            </div>\n          </div>\n          <div\n            style=\"\n              --duration: 20;\n              --radius: 120;\n              --angle: 210;\n              --icon-size: 24px;\n            \"\n            class=\"orbit-dot flex size-[var(--icon-size)] transform-gpu animate-orbit items-center justify-center\"\n          >\n            <div class=\"flex items-center justify-center\">\n              <svg\n                class=\"size-6\"\n                style=\"color: #ff47d2\"\n                fill=\"currentColor\"\n                fill-rule=\"evenodd\"\n                viewBox=\"0 0 24 24\"\n                xmlns=\"http://www.w3.org/2000/svg\"\n              >\n                <title>Anthropic</title>\n                <path\n                  d=\"M13.827 3.52h3.603L24 20h-3.603l-6.57-16.48zm-7.258 0h3.767L16.906 20h-3.674l-1.343-3.461H5.017l-1.344 3.46H0L6.57 3.522zm4.132 9.959L8.453 7.687 6.205 13.48H10.7z\"\n                ></path>\n              </svg>\n            </div>\n          </div>\n          <div\n            style=\"\n              --duration: 20;\n              --radius: 120;\n              --angle: 280;\n              --icon-size: 24px;\n            \"\n            class=\"orbit-dot flex size-[var(--icon-size)] transform-gpu animate-orbit items-center justify-center\"\n          >\n            <div class=\"flex items-center justify-center\">\n              <svg\n                class=\"size-6\"\n                style=\"color: #47b3ff\"\n                fill=\"currentColor\"\n                fill-rule=\"evenodd\"\n                viewBox=\"0 0 24 24\"\n                xmlns=\"http://www.w3.org/2000/svg\"\n              >\n                <title>Grok</title>\n                <path\n                  d=\"M6.469 8.776L16.512 23h-4.464L2.005 8.776H6.47zm-.004 7.9l2.233 3.164L6.467 23H2l4.465-6.324zM22 2.582V23h-3.659V7.764L22 2.582zM22 1l-9.952 14.095-2.233-3.163L17.533 1H22z\"\n                ></path>\n              </svg>\n            </div>\n          </div>\n          <div\n            style=\"\n              --duration: 20;\n              --radius: 120;\n              --angle: 360;\n              --icon-size: 24px;\n            \"\n            class=\"orbit-dot flex size-[var(--icon-size)] transform-gpu animate-orbit items-center justify-center\"\n          >\n            <div class=\"flex items-center justify-center\">\n              <svg\n                class=\"size-6\"\n                fill=\"currentColor\"\n                fill-rule=\"evenodd\"\n                style=\"color: #9747ff\"\n                viewBox=\"0 0 24 24\"\n                xmlns=\"http://www.w3.org/2000/svg\"\n              >\n                <title>OpenAI</title>\n                <path\n                  d=\"M21.55 10.004a5.416 5.416 0 00-.478-4.501c-1.217-2.09-3.662-3.166-6.05-2.66A5.59 5.59 0 0010.831 1C8.39.995 6.224 2.546 5.473 4.838A5.553 5.553 0 001.76 7.496a5.487 5.487 0 00.691 6.5 5.416 5.416 0 00.477 4.502c1.217 2.09 3.662 3.165 6.05 2.66A5.586 5.586 0 0013.168 23c2.443.006 4.61-1.546 5.361-3.84a5.553 5.553 0 003.715-2.66 5.488 5.488 0 00-.693-6.497v.001zm-8.381 11.558a4.199 4.199 0 01-2.675-.954c.034-.018.093-.05.132-.074l4.44-2.53a.71.71 0 00.364-.623v-6.176l1.877 1.069c.02.01.033.029.036.05v5.115c-.003 2.274-1.87 4.118-4.174 4.123zM4.192 17.78a4.059 4.059 0 01-.498-2.763c.032.02.09.055.131.078l4.44 2.53c.225.13.504.13.73 0l5.42-3.088v2.138a.068.068 0 01-.027.057L9.9 19.288c-1.999 1.136-4.552.46-5.707-1.51h-.001zM3.023 8.216A4.15 4.15 0 015.198 6.41l-.002.151v5.06a.711.711 0 00.364.624l5.42 3.087-1.876 1.07a.067.067 0 01-.063.005l-4.489-2.559c-1.995-1.14-2.679-3.658-1.53-5.63h.001zm15.417 3.54l-5.42-3.088L14.896 7.6a.067.067 0 01.063-.006l4.489 2.557c1.998 1.14 2.683 3.662 1.529 5.633a4.163 4.163 0 01-2.174 1.807V12.38a.71.71 0 00-.363-.623zm1.867-2.773a6.04 6.04 0 00-.132-.078l-4.44-2.53a.731.731 0 00-.729 0l-5.42 3.088V7.325a.068.068 0 01.027-.057L14.1 4.713c2-1.137 4.555-.46 5.707 1.513.487.833.664 1.809.499 2.757h.001zm-11.741 3.81l-1.877-1.068a.065.065 0 01-.036-.051V6.559c.001-2.277 1.873-4.122 4.181-4.12.976 0 1.92.338 2.671.954-.034.018-.092.05-.131.073l-4.44 2.53a.71.71 0 00-.365.623l-.003 6.173v.002zm1.02-2.168L12 9.25l2.414 1.375v2.75L12 14.75l-2.415-1.375v-2.75z\"\n                ></path>\n              </svg>\n            </div>\n          </div>\n        </div>\n      </div>\n\n      <div class=\"status-container text-foreground/80 mt-8\" id=\"status\">\n        Initializing...\n      </div>\n\n      <div class=\"progress-bar mb-4\">\n        <div\n          id=\"progress-bar\"\n          class=\"progress-bar-fill transition-all duration-300\"\n          style=\"width: 0%\"\n        ></div>\n      </div>\n\n      <div id=\"failure-message\" class=\"failure-message mt-4\">\n        <p class=\"text-red-400 mb-4\">Failed to start Notate</p>\n        <div class=\"flex justify-center space-x-4\">\n          <button\n            onclick=\"openLogs()\"\n            class=\"no-drag px-4 py-2 bg-[hsl(var(--primary))] hover:bg-[hsl(var(--primary)/.9)] text-white rounded-[8px] transition-colors\"\n          >\n            View Logs\n          </button>\n          <button\n            onclick=\"openGithubIssue()\"\n            class=\"no-drag px-4 py-2 bg-[hsl(var(--secondary))] hover:bg-[hsl(var(--secondary)/.9)] text-white rounded-[8px] transition-colors\"\n          >\n            Report Issue\n          </button>\n        </div>\n      </div>\n    </div>\n\n    <script>\n      const { ipcRenderer } = require(\"electron\");\n\n      ipcRenderer.on(\"update-status\", (event, { text, progress, failed }) => {\n        document.getElementById(\"status\").textContent = text;\n        document.getElementById(\"progress-bar\").style.width = `${progress}%`;\n\n        if (failed) {\n          document.getElementById(\"failure-message\").classList.add(\"show\");\n        }\n      });\n\n      function openLogs() {\n        ipcRenderer.send(\"open-logs\");\n      }\n\n      function openGithubIssue() {\n        ipcRenderer.send(\"open-github-issue\");\n      }\n    </script>\n  </body>\n</html>\n"
  },
  {
    "path": "Frontend/src/types/contextTypes/LibraryContextTypes.ts",
    "content": "export interface LibraryContextType {\n  handleDeleteCollection: () => void;\n  files: string[];\n  setFiles: React.Dispatch<React.SetStateAction<string[]>>;\n  loadFiles: () => Promise<void>;\n  handleCancelEmbed: () => Promise<void>;\n  handleProgressData: (data: ProgressData) => void;\n  showProgress: boolean;\n  progressMessage: string;\n  progress: number;\n  openLibrary: boolean;\n  setOpenLibrary: React.Dispatch<React.SetStateAction<boolean>>;\n  openAddToCollection: boolean;\n  setOpenAddToCollection: React.Dispatch<React.SetStateAction<boolean>>;\n  fetchCollections: () => Promise<void>;\n  ingesting: boolean;\n  setIngesting: React.Dispatch<React.SetStateAction<boolean>>;\n  userCollections: Collection[];\n  setUserCollections: React.Dispatch<React.SetStateAction<Collection[]>>;\n  selectedCollection: Collection | null;\n  setSelectedCollection: React.Dispatch<\n    React.SetStateAction<Collection | null>\n  >;\n  setEmbeddingModels: React.Dispatch<React.SetStateAction<Model[]>>;\n  embeddingModels: Model[];\n  showUpload: boolean;\n  setShowUpload: React.Dispatch<React.SetStateAction<boolean>>;\n  showAddStore: boolean;\n  setShowAddStore: React.Dispatch<React.SetStateAction<boolean>>;\n  fileExpanded: boolean;\n  setFileExpanded: React.Dispatch<React.SetStateAction<boolean>>;\n  link: string;\n  setLink: React.Dispatch<React.SetStateAction<string>>;\n  selectedFile: File | null;\n  setSelectedFile: React.Dispatch<React.SetStateAction<File | null>>;\n  selectedLinkType: \"website\" | \"youtube\" | \"crawl\" | \"documentation\" | null;\n  setSelectedLinkType: React.Dispatch<\n    React.SetStateAction<\n      \"website\" | \"youtube\" | \"crawl\" | \"documentation\" | null\n    >\n  >;\n  setProgressMessage: React.Dispatch<React.SetStateAction<string>>;\n  setProgress: React.Dispatch<React.SetStateAction<number>>;\n  setShowProgress: React.Dispatch<React.SetStateAction<boolean>>;\n  handleUpload: (base64Content: string) => Promise<void>;\n}\n"
  },
  {
    "path": "Frontend/src/types/contextTypes/SystemSettingsTypes.ts",
    "content": "import { SystemSpecs } from \"@/data/sysSpecs\";\n\nexport interface SysSettingsContextType {\n  ollamaInit: boolean;\n  setOllamaInit: React.Dispatch<React.SetStateAction<boolean>>;\n  isOllamaRunning: boolean;\n  setIsOllamaRunning: (isOllamaRunning: boolean) => void;\n  systemSpecs: SystemSpecs;\n  setSystemSpecs: React.Dispatch<React.SetStateAction<SystemSpecs>>;\n  settingsOpen: boolean;\n  setSettingsOpen: React.Dispatch<React.SetStateAction<boolean>>;\n  settings: UserSettings;\n  setSettings: React.Dispatch<React.SetStateAction<UserSettings>>;\n  platform: \"win32\" | \"darwin\" | \"linux\" | null;\n  setPlatform: React.Dispatch<\n    React.SetStateAction<\"win32\" | \"darwin\" | \"linux\" | null>\n  >;\n  sourceType: \"local\" | \"external\";\n  setSourceType: React.Dispatch<React.SetStateAction<\"local\" | \"external\">>;\n  users: User[];\n  setUsers: React.Dispatch<React.SetStateAction<User[]>>;\n  totalVRAM: number;\n  localModels: Model[];\n  setLocalModels: React.Dispatch<React.SetStateAction<Model[]>>;\n  isRunningModel: boolean;\n  setIsRunningModel: React.Dispatch<React.SetStateAction<boolean>>;\n  isFFMPEGInstalled: boolean;\n  setisFFMPEGInstalled: React.Dispatch<React.SetStateAction<boolean>>;\n  localModalLoading: boolean;\n  setLocalModalLoading: React.Dispatch<React.SetStateAction<boolean>>;\n  progressRef: React.RefObject<HTMLDivElement>;\n  progressLocalOutput: string[];\n  setProgressLocalOutput: React.Dispatch<React.SetStateAction<string[]>>;\n  handleRunOllama: (model: string, activeUser: User) => Promise<void>;\n  isMaximized: boolean;\n  setIsMaximized: React.Dispatch<React.SetStateAction<boolean>>;\n  checkFFMPEG: () => Promise<void>;\n  fetchLocalModels: () => Promise<void>;\n  checkOllama: () => Promise<void>;\n  maxTokens: number;\n  setMaxTokens: React.Dispatch<React.SetStateAction<number>>;\n  localModelDir: string;\n  setLocalModelDir: React.Dispatch<React.SetStateAction<string>>;\n  loadModelsFromDirectory: (dirPath: string) => Promise<void>;\n  fetchSettings: (activeUser: User) => Promise<void>;\n  handleRunModel: (\n    model_name: string,\n    model_location: string,\n    model_type: string,\n    user_id: string\n  ) => Promise<void>;\n  ollamaModels: OllamaModel[];\n  setOllamaModels: React.Dispatch<React.SetStateAction<OllamaModel[]>>;\n  selectedModel: Model | null;\n  setSelectedModel: React.Dispatch<React.SetStateAction<Model | null>>;\n  selectedProvider: string;\n  setSelectedProvider: React.Dispatch<React.SetStateAction<string>>;\n  localModel: string;\n  setLocalModel: React.Dispatch<React.SetStateAction<string>>;\n  handleOllamaIntegration: (activeUser: User) => Promise<void>;\n}\n"
  },
  {
    "path": "Frontend/src/types/contextTypes/UserContextType.ts",
    "content": "export interface UserContextType {\n  title: string | null;\n  setTitle: React.Dispatch<React.SetStateAction<string | null>>;\n  activeUser: User | null;\n  setActiveUser: React.Dispatch<React.SetStateAction<User | null>>;\n  apiKeys: ApiKey[];\n  setApiKeys: React.Dispatch<React.SetStateAction<ApiKey[]>>;\n  activeConversation: number | null;\n  setActiveConversation: React.Dispatch<React.SetStateAction<number | null>>;\n  conversations: Conversation[];\n  setConversations: React.Dispatch<React.SetStateAction<Conversation[]>>;\n  prompts: UserPrompts[];\n  setPrompts: React.Dispatch<React.SetStateAction<UserPrompts[]>>;\n  streamingMessage: string;\n  setStreamingMessage: React.Dispatch<React.SetStateAction<string>>;\n  filteredConversations: Conversation[];\n  setFilteredConversations: React.Dispatch<\n    React.SetStateAction<Conversation[]>\n  >;\n  isSearchOpen: boolean;\n  setIsSearchOpen: React.Dispatch<React.SetStateAction<boolean>>;\n  searchTerm: string;\n  setSearchTerm: React.Dispatch<React.SetStateAction<string>>;\n  searchRef: React.RefObject<HTMLDivElement>;\n  messages: Message[];\n  setMessages: React.Dispatch<React.SetStateAction<Message[]>>;\n  newConversation: boolean;\n  setNewConversation: React.Dispatch<React.SetStateAction<boolean>>;\n  handleResetChat: () => void;\n  devAPIKeys: Keys[];\n  setDevAPIKeys: React.Dispatch<React.SetStateAction<Keys[]>>;\n  fetchDevAPIKeys: () => Promise<void>;\n  getUserConversations: () => Promise<void>;\n  alertForUser: boolean;\n  setAlertForUser: React.Dispatch<React.SetStateAction<boolean>>;\n  fetchApiKey: () => Promise<void>;\n  fetchPrompts: () => Promise<void>;\n  error: string | null;\n  setError: React.Dispatch<React.SetStateAction<string | null>>;\n  currentRequestId: number | null;\n  setCurrentRequestId: React.Dispatch<React.SetStateAction<number | null>>;\n  agentActions: string;\n  setAgentActions: React.Dispatch<React.SetStateAction<string>>;\n  fetchMessages: () => Promise<void>;\n  openRouterModels: OpenRouterModel[];\n  setOpenRouterModels: React.Dispatch<React.SetStateAction<OpenRouterModel[]>>;\n  apiKeyInput: string;\n  setApiKeyInput: React.Dispatch<React.SetStateAction<string>>;\n  azureModels: AzureModel[];\n  setAzureModels: React.Dispatch<React.SetStateAction<AzureModel[]>>;\n  customModels: CustomModel[];\n  setCustomModels: React.Dispatch<React.SetStateAction<CustomModel[]>>;\n  fetchOpenRouterModels: () => Promise<void>;\n  fetchAzureModels: () => Promise<void>;\n  fetchCustomModels: () => Promise<void>;\n  streamingMessageReasoning: string | null;\n  setStreamingMessageReasoning: React.Dispatch<React.SetStateAction<string>>;\n  tools: Tool[];\n  setTools: React.Dispatch<React.SetStateAction<Tool[]>>;\n  dockTool: (tool: UserTool) => void;\n  fetchTools: () => Promise<void>;\n  systemTools: Tool[];\n  setSystemTools: React.Dispatch<React.SetStateAction<Tool[]>>;\n  fetchSystemTools: () => Promise<void>;\n  userTools: UserTool[];\n  setUserTools: React.Dispatch<React.SetStateAction<UserTool[]>>;\n  toggleTool: (tool: UserTool) => void;\n  externalOllama: ExternalOllama[];\n  setExternalOllama: React.Dispatch<React.SetStateAction<ExternalOllama[]>>;\n  fetchExternalOllama: () => Promise<void>;\n}\n"
  },
  {
    "path": "Frontend/src/types/contextTypes/UserViewTypes.ts",
    "content": "import React from \"react\";\n\nexport interface UserViewContextType {\n  activeView: View;\n  setActiveView: React.Dispatch<React.SetStateAction<View>>;\n}\n"
  },
  {
    "path": "Frontend/src/utils/chatUtilts.ts",
    "content": "export const scrollToBottom = (\n  scrollElement: HTMLElement | null,\n  behavior: ScrollBehavior = \"smooth\"\n) => {\n  if (scrollElement) {\n    scrollElement.scrollTo({\n      top: scrollElement.scrollHeight,\n      behavior,\n    });\n  }\n};\n\nexport const handleScroll = (\n  scrollElement: HTMLElement,\n  hasUserScrolled: boolean,\n  setShowScrollButton: (show: boolean) => void,\n  setShouldAutoScroll: (scroll: boolean) => void,\n  setHasUserScrolled: (scrolled: boolean) => void\n) => {\n  const { scrollTop, scrollHeight, clientHeight } = scrollElement;\n  const isNearBottom = scrollHeight - scrollTop - clientHeight < 100;\n  const needsScroll = scrollHeight > clientHeight;\n  setShowScrollButton(!isNearBottom && needsScroll);\n  setShouldAutoScroll(isNearBottom);\n  if (!hasUserScrolled && !isNearBottom) {\n    setHasUserScrolled(true);\n  }\n};\n"
  },
  {
    "path": "Frontend/src/utils/webAudioRecorder.ts",
    "content": "export class WebAudioRecorder {\n  private mediaRecorder: MediaRecorder | null = null;\n  private audioChunks: Blob[] = [];\n  private stream: MediaStream | null = null;\n\n  private static getSupportedMimeType(): string {\n    const types = [\n      \"audio/webm;codecs=opus\",\n      \"audio/webm\",\n      \"audio/ogg;codecs=opus\",\n      \"audio/wav\",\n      \"audio/mp4\",\n    ];\n\n    for (const type of types) {\n      if (MediaRecorder.isTypeSupported(type)) {\n        return type;\n      }\n    }\n    throw new Error(\"No supported audio MIME types found\");\n  }\n\n  async startRecording(): Promise<void> {\n    try {\n      // Request high-quality audio with fallback options\n      const constraints: MediaTrackConstraints = {\n        channelCount: { ideal: 1 }, // Mono preferred for speech\n        sampleRate: { ideal: 44100, min: 16000 }, // Fallback to lower sample rate if needed\n        echoCancellation: true,\n        noiseSuppression: true,\n        autoGainControl: true,\n      };\n\n      this.stream = await navigator.mediaDevices.getUserMedia({\n        audio: constraints,\n      });\n\n      // Get supported mime type and configure options\n      const mimeType = WebAudioRecorder.getSupportedMimeType();\n      const options: MediaRecorderOptions = {\n        mimeType,\n        audioBitsPerSecond: 128000,\n      };\n\n      try {\n        this.mediaRecorder = new MediaRecorder(this.stream, options);\n      } catch (err) {\n        // Fallback to default options if custom options fail\n        console.warn(\n          \"Failed to create MediaRecorder with options, falling back to defaults:\",\n          err\n        );\n        this.mediaRecorder = new MediaRecorder(this.stream);\n      }\n\n      this.audioChunks = [];\n\n      this.mediaRecorder.ondataavailable = (event) => {\n        if (event.data.size > 0) {\n          this.audioChunks.push(event.data);\n        }\n      };\n\n      // Add error handler\n      this.mediaRecorder.onerror = (event) => {\n        console.error(\"MediaRecorder error:\", event);\n        this.cleanup();\n      };\n\n      // Collect data more frequently for smoother recording\n      this.mediaRecorder.start(100);\n    } catch (error) {\n      this.cleanup();\n      console.error(\"Error starting recording:\", error);\n      throw new Error(\n        `Failed to start recording: ${\n          error instanceof Error ? error.message : \"Unknown error\"\n        }`\n      );\n    }\n  }\n\n  async stopRecording(): Promise<ArrayBuffer> {\n    return new Promise((resolve, reject) => {\n      if (!this.mediaRecorder) {\n        reject(new Error(\"MediaRecorder not initialized\"));\n        return;\n      }\n\n      const timeoutId = setTimeout(() => {\n        this.cleanup();\n        reject(new Error(\"Recording stop timeout\"));\n      }, 5000); // 5 second timeout\n\n      this.mediaRecorder.onstop = async () => {\n        try {\n          clearTimeout(timeoutId);\n          const mimeType = this.mediaRecorder?.mimeType || \"audio/webm\";\n          const audioBlob = new Blob(this.audioChunks, { type: mimeType });\n          const arrayBuffer = await audioBlob.arrayBuffer();\n\n          this.cleanup();\n          resolve(arrayBuffer);\n        } catch (error) {\n          this.cleanup();\n          console.error(\"Error in onstop handler:\", error);\n          reject(\n            error instanceof Error\n              ? error\n              : new Error(\"Unknown error while stopping recording\")\n          );\n        }\n      };\n\n      try {\n        this.mediaRecorder.stop();\n      } catch (error) {\n        clearTimeout(timeoutId);\n        this.cleanup();\n        console.error(\"Error stopping MediaRecorder:\", error);\n        reject(\n          error instanceof Error ? error : new Error(\"Failed to stop recording\")\n        );\n      }\n    });\n  }\n\n  private cleanup(): void {\n    try {\n      if (this.mediaRecorder?.state === \"recording\") {\n        this.mediaRecorder.stop();\n      }\n      this.audioChunks = [];\n      if (this.stream) {\n        this.stream.getTracks().forEach((track) => track.stop());\n        this.stream = null;\n      }\n      this.mediaRecorder = null;\n    } catch (error) {\n      console.error(\"Error during cleanup:\", error);\n    }\n  }\n\n  isRecording(): boolean {\n    return this.mediaRecorder?.state === \"recording\";\n  }\n\n  cancelRecording(): void {\n    this.cleanup();\n  }\n}\n"
  },
  {
    "path": "Frontend/tailwind.config.js",
    "content": "/** @type {import('tailwindcss').Config} */\nmodule.exports = {\n  darkMode: [\"class\"],\n  content: [\"./index.html\", \"./src/**/*.{ts,tsx,js,jsx}\"],\n  theme: {\n    extend: {\n      keyframes: {\n        \"fade-in\": {\n          \"0%\": { opacity: \"0\", transform: \"translateY(10px)\" },\n          \"100%\": { opacity: \"1\", transform: \"translateY(0)\" },\n        },\n        \"fade-out\": {\n          \"0%\": { opacity: \"1\", transform: \"translateY(0)\" },\n          \"100%\": { opacity: \"0\", transform: \"translateY(-10px)\" },\n        },\n        pulse: {\n          \"0%, 100%\": { boxShadow: \"0 0 0 0 var(--pulse-color)\" },\n          \"50%\": { boxShadow: \"0 0 0 8px var(--pulse-color)\" },\n        },\n      },\n      animation: {\n        \"fade-in\": \"fade-in 0.6s ease-out\",\n        \"fade-out\": \"fade-out 0.6s ease-out forwards\",\n        pulse: \"pulse var(--duration) ease-out infinite\",\n      },\n      fontFamily: {\n        inter: [\"Inter\", \"sans-serif\"],\n        roboto: [\"Roboto\", \"sans-serif\"],\n        lato: [\"Lato\", \"sans-serif\"],\n        merriweather: [\"Merriweather\", \"serif\"],\n        \"fira-code\": [\"Fira Code\", \"monospace\"],\n      },\n      borderRadius: {\n        lg: \"var(--radius)\",\n        md: \"calc(var(--radius) - 2px)\",\n        sm: \"calc(var(--radius) - 4px)\",\n      },\n      colors: {\n        background: \"hsl(var(--background))\",\n        foreground: \"hsl(var(--foreground))\",\n        card: {\n          DEFAULT: \"hsl(var(--card))\",\n          foreground: \"hsl(var(--card-foreground))\",\n        },\n        popover: {\n          DEFAULT: \"hsl(var(--popover))\",\n          foreground: \"hsl(var(--popover-foreground))\",\n        },\n        primary: {\n          DEFAULT: \"hsl(var(--primary))\",\n          foreground: \"hsl(var(--primary-foreground))\",\n        },\n        secondary: {\n          DEFAULT: \"hsl(var(--secondary))\",\n          foreground: \"hsl(var(--secondary-foreground))\",\n        },\n        muted: {\n          DEFAULT: \"hsl(var(--muted))\",\n          foreground: \"hsl(var(--muted-foreground))\",\n        },\n        accent: {\n          DEFAULT: \"hsl(var(--accent))\",\n          foreground: \"hsl(var(--accent-foreground))\",\n        },\n        destructive: {\n          DEFAULT: \"hsl(var(--destructive))\",\n          foreground: \"hsl(var(--destructive-foreground))\",\n        },\n        border: \"hsl(var(--border))\",\n        input: \"hsl(var(--input))\",\n        ring: \"hsl(var(--ring))\",\n        chart: {\n          1: \"hsl(var(--chart-1))\",\n          2: \"hsl(var(--chart-2))\",\n          3: \"hsl(var(--chart-3))\",\n          4: \"hsl(var(--chart-4))\",\n          5: \"hsl(var(--chart-5))\",\n        },\n      },\n    },\n    safelist: [\n      \"font-inter\",\n      \"font-roboto\",\n      \"font-lato\",\n      \"font-merriweather\",\n      \"font-fira-code\",\n    ],\n  },\n  plugins: [require(\"tailwindcss-animate\"), require(\"@tailwindcss/typography\")],\n};\n"
  },
  {
    "path": "Frontend/tsconfig.app.json",
    "content": "{\n  \"compilerOptions\": {\n    \"tsBuildInfoFile\": \"./node_modules/.tmp/tsconfig.app.tsbuildinfo\",\n    \"types\": [\"./types\"],\n    \"target\": \"ES2020\",\n    \"useDefineForClassFields\": true,\n    \"lib\": [\"ES2020\", \"DOM\", \"DOM.Iterable\"],\n    \"module\": \"ESNext\",\n    \"skipLibCheck\": true,\n\n    /* Bundler mode */\n    \"moduleResolution\": \"bundler\",\n    \"allowImportingTsExtensions\": true,\n    \"isolatedModules\": true,\n    \"moduleDetection\": \"force\",\n    \"noEmit\": true,\n    \"jsx\": \"react-jsx\",\n\n    /* Linting */\n    \"strict\": true,\n    \"noUnusedLocals\": true,\n    \"noUnusedParameters\": true,\n    \"noFallthroughCasesInSwitch\": true,\n    \"noUncheckedSideEffectImports\": true\n  },\n  \"include\": [\"src\", \"types.d.ts\", \"src/**/*\"],\n  \"exclude\": [\"src/electron\"],\n  \"baseUrl\": \".\",\n    \"paths\": {\n      \"@/*\": [\"./src/*\"]\n    } \n}\n"
  },
  {
    "path": "Frontend/tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"target\": \"ESNext\",\n    \"useDefineForClassFields\": true,\n    \"lib\": [\"DOM\", \"DOM.Iterable\", \"ESNext\"],\n    \"allowJs\": false,\n    \"skipLibCheck\": true,\n    \"esModuleInterop\": false,\n    \"allowSyntheticDefaultImports\": true,\n    \"strict\": true,\n    \"forceConsistentCasingInFileNames\": true,\n    \"module\": \"ESNext\",\n    \"moduleResolution\": \"Node\",\n    \"resolveJsonModule\": true,\n    \"isolatedModules\": true,\n    \"noEmit\": true,\n    \"jsx\": \"react-jsx\",\n    \"types\": [\"vite/client\", \"node\"],\n    \"baseUrl\": \".\",\n    \"paths\": {\n      \"@/*\": [\"./src/*\"],\n      \"@/ui/*\": [\"./src/app/*\"],\n      \"@/components/*\": [\"./src/components/*\"]\n    }\n  },\n  \"include\": [\"**/*.ts\", \"**/*.tsx\", \"./vite.config.ts\", \"./types\"],\n  \"references\": [{ \"path\": \"./tsconfig.node.json\" }]\n}"
  },
  {
    "path": "Frontend/tsconfig.node.json",
    "content": "{\n  \"compilerOptions\": {\n    \"composite\": true,\n    \"tsBuildInfoFile\": \"./node_modules/.tmp/tsconfig.node.tsbuildinfo\",\n    \"target\": \"ES2022\",\n    \"lib\": [\"ES2023\"],\n    \"module\": \"ESNext\",\n    \"skipLibCheck\": true,\n    \"moduleResolution\": \"bundler\",\n    \"isolatedModules\": true,\n    \"moduleDetection\": \"force\",\n    \"noEmit\": false,\n    \"strict\": true,\n    \"noUnusedLocals\": true,\n    \"noUnusedParameters\": true,\n    \"noFallthroughCasesInSwitch\": true,\n    \"allowSyntheticDefaultImports\": true,\n    \"forceConsistentCasingInFileNames\": true,\n    \"resolveJsonModule\": true\n  },\n  \"include\": [\"vite.config.ts\", \"../../types.d.ts\"]\n}\n"
  },
  {
    "path": "Frontend/types.d.ts",
    "content": "type Statistics = {\n  cpuUsage: number;\n  memoryUsage: number;\n  storageUsage: number;\n};\n\ntype StaticData = {\n  totalStorage: number;\n  totalMemoryGB: number;\n  cpuModel: string;\n};\n\ntype UnsubscribeFunction = () => void;\n\ntype View =\n  | \"Chat\"\n  | \"Library\"\n  | \"Settings\"\n  | \"Signup\"\n  | \"SelectAccount\"\n  | \"History\"\n  | \"FileExplorer\";\n\ntype User = {\n  id: number;\n  name: string;\n};\n\ntype AzureModel = {\n  id: number;\n  name: string;\n  endpoint: string;\n  deployment: string;\n  apiKey: string;\n};\n\ntype Message = {\n  role: \"user\" | \"assistant\" | \"system\";\n  content: string;\n  reasoning_content?: string;\n  timestamp?: Date;\n  isRetrieval?: boolean;\n  collectionId?: number;\n  conversationId?: number;\n  data_id?: number;\n  data_content?: string;\n};\n\ntype ReasoningEffort = \"low\" | \"medium\" | \"high\";\n\ninterface UserSettings {\n  userId?: number;\n  id?: number;\n  cot?: number;\n  vectorstore?: string;\n  prompt?: string;\n  temperature?: number;\n  model?: string;\n  provider?: string;\n  isLocal?: boolean;\n  modelDirectory?: string;\n  modelType?: string;\n  modelLocation?: string;\n  ollamaIntegration?: number;\n  ollamaModel?: string;\n  baseUrl?: string;\n  selectedAzureId?: number;\n  selectedCustomId?: number;\n  displayModel?: string;\n  maxTokens?: number;\n  topP?: number;\n  promptId?: number;\n  webSearch?: number;\n  reasoningEffort?: ReasoningEffort;\n  selectedExternalOllamaId?: number;\n}\n\ntype Collection = {\n  id: number;\n  name: string;\n  description: string;\n  type: string;\n  files: string;\n  userId: number;\n};\n\ntype ApiKey = {\n  id: number;\n  key: string;\n  provider: string;\n};\n\ntype Conversation = {\n  id: number;\n  title: string;\n  userId: number;\n  created_at: Date;\n};\n\ninterface UserPrompts {\n  id: number;\n  name: string;\n  prompt: string;\n  userId: number;\n}\ntype FrameWindowAction = \"close\" | \"minimize\" | \"maximize\" | \"unmaximize\";\n\ninterface TranscribeAudioInput {\n  userId: number;\n  audioData: Buffer;\n}\n\ninterface TranscribeAudioOutput {\n  success: boolean;\n  transcription?: string;\n  language?: string;\n  error?: string;\n}\n\ninterface Model {\n  name: string;\n  type: string;\n  model_location: string;\n  modified_at: string;\n  size: number;\n  digest: string;\n}\n\ninterface CustomModel {\n  id: number;\n  user_id: number;\n  name: string;\n  endpoint: string;\n  api_key: string;\n  model?: string;\n}\n\ninterface DownloadModelProgress {\n  type: \"progress\";\n  data: {\n    message: string;\n    fileName?: string;\n    fileNumber?: number;\n    totalFiles?: number;\n    fileProgress?: number;\n    totalProgress: number;\n  };\n}\n\ninterface EventPayloadMapping {\n  resetAppState: void;\n  statistics: Statistics;\n  getStaticData: StaticData;\n  frameWindowAction: FrameWindowAction;\n  changeView: View;\n  openDevTools: void;\n  openDirectory: string;\n  resizeWindow: {\n    width: number;\n    height: number;\n  };\n  getCustomAPIs: { userId: number };\n  chatRequest: {\n    messages: Message[];\n    activeUser: User;\n    conversationId?: bigint | number;\n    title?: string | undefined;\n    collectionId?: bigint | number | undefined;\n    requestId: number;\n  };\n  abortChatRequest: number;\n  changeUser: void;\n  quit: void;\n  undo: void;\n  redo: void;\n  cut: void;\n  copy: void;\n  paste: void;\n  delete: void;\n  selectAll: void;\n  print: void;\n  chat: void;\n  history: void;\n  setApiKey: { success: boolean; apiKey?: string };\n  messageChunk: string;\n  streamEnd: void;\n  offStreamEnd: void;\n  getUsers: { users: { name: string; id: number }[] };\n  addUser: { name: string };\n  updateUserSettings: UserSettings;\n  getUserSettings: { userId: number };\n  getUserPrompts: { userId: number };\n  addUserPrompt: { userId: number; name: string; prompt: string };\n  updateUserPrompt: {\n    userId: number;\n    id: number;\n    name: string;\n    prompt: string;\n  };\n  openCollectionFolderFromFileExplorer: { filepath: string };\n  openCollectionFolder: { filepath: string };\n  addAPIKey: { userId: number; key: string; provider: string };\n  createCollection: {\n    userId: number;\n    name: string;\n    description: string;\n    type: string;\n    isLocal: boolean;\n    localEmbeddingModel: string;\n  };\n  getConversationMessagesWithData: {\n    userId: number;\n    conversationId: number;\n  };\n  addUserConversation: { userId: number; input: string };\n  deleteCollection: { userId: number; id: number; collectionName: string };\n  getUserCollections: { userId: number };\n  getUserApiKeys: { userId: number };\n  getUserConversations: { userId: number };\n  getConversationMessages: { userId: number; conversationId: number };\n  addFileToCollection: {\n    userId: number;\n    userName: string;\n    collectionId: number;\n    collectionName: string;\n    fileName: string;\n    fileContent: string;\n  };\n  vectorstoreQuery: {\n    userId: number;\n    userName: string;\n    collectionId: number;\n    collectionName: string;\n    query: string;\n    conversationId: number;\n  };\n  getFilesInCollection: {\n    userId: number;\n    collectionId: number;\n  };\n  getPlatform: { platform: \"win32\" | \"darwin\" | \"linux\" };\n  keyValidation: {\n    apiKey: string;\n    inputProvider: string;\n  };\n  youtubeIngest: {\n    url: string;\n    userId: number;\n    userName: string;\n    collectionId: number;\n    collectionName: string;\n  };\n  systemSpecs: {\n    cpu: string;\n    vram: string;\n    GPU_Manufacturer?: string;\n  };\n  stopRecording: { text: string };\n  websiteFetch: {\n    url: string;\n    userId: number;\n    userName: string;\n    collectionId: number;\n    collectionName: string;\n  };\n  webcrawl: {\n    base_url: string;\n    max_workers: number;\n    collection_name: string;\n    collection_id: number;\n    user_id: number;\n    user_name: string;\n  };\n  fetchOllamaModels: { models: string[] };\n  \"ingest-progress\": string;\n  cancelEmbed: { userId: number };\n  deleteConversation: { userId: number; conversationId: number };\n  resetUserState: void;\n  checkOllama: { isOllamaRunning: boolean };\n  runOllama: { model: string; user: User };\n  ollamaProgress: OllamaProgressEvent;\n  pullModel: { model: string };\n  transcribeAudio: TranscribeAudioInput;\n  checkIfFFMPEGInstalled: { success: boolean; message: boolean };\n  deleteCollection: {\n    collectionId: number;\n    collectionName: string;\n    userId: number;\n  };\n  loadModel: {\n    model_location: string;\n    model_name: string;\n    model_type?: string;\n    user_id: number;\n  };\n  addDevAPIKey: {\n    userId: number;\n    name: string;\n    expiration: string | null;\n  };\n  getDevAPIKeys: { userId: number };\n  deleteDevAPIKey: { userId: number; id: number };\n  cancelWebcrawl: { userId: number };\n  getUserCollectionFiles: {\n    userId: number;\n    userName: string;\n  };\n  removeFileorFolder: {\n    userId: number;\n    userName: string;\n    file: string;\n  };\n  renameFile: {\n    userId: number;\n    userName: string;\n    file: string;\n    newName: string;\n    success: boolean;\n  };\n  getOpenRouterModel: { userId: number };\n  addOpenRouterModel: { userId: number; model: string };\n  deleteOpenRouterModel: { userId: number; id: number };\n  getOpenRouterModels: { userId: number };\n  getDirModels: { dirPath: string };\n  getModelInfo: {\n    model_location: string;\n    model_name: string;\n    model_type?: string;\n    user_id: number;\n  };\n  unloadModel: {\n    model_location: string;\n    model_name: string;\n    model_type?: string;\n    user_id: number;\n  };\n  downloadModel: {\n    modelId: string;\n    dirPath: string;\n    hfToken?: string;\n  };\n  \"download-model-progress\": DownloadModelProgress;\n  cancelDownload: { success: boolean };\n  deleteAzureOpenAIModel: { userId: number; id: number };\n  getAzureOpenAIModels: { userId: number };\n  getAzureOpenAIModel: { userId: number; id: number };\n  addAzureOpenAIModel: {\n    userId: number;\n    name: string;\n    model: string;\n    endpoint: string;\n    api_key: string;\n  };\n  getModelsPath: string;\n  getEmbeddingsModels: { models: Model[] };\n  getCustomAPI: { userId: number };\n  addCustomAPI: {\n    userId: number;\n    name: string;\n    endpoint: string;\n    api_key: string;\n    model: string;\n  };\n  deleteCustomAPI: { userId: number; id: number };\n  addCustomApi: {\n    userId: number;\n    name: string;\n    endpoint: string;\n    api_key: string;\n    model: string;\n  };\n  deleteCustomApi: {\n    userId: number;\n    id: number;\n  };\n  getCustomAPI: {\n    userId: number;\n    id: number;\n  };\n  getCustomModels: {\n    userId: number;\n  };\n  ƒ;\n  getUserTools: {\n    userId: number;\n  };\n  addUserTool: {\n    userId: number;\n    toolId: number;\n    enabled: number;\n    docked: number;\n  };\n  removeUserTool: {\n    userId: number;\n    toolId: number;\n  };\n  updateUserTool: {\n    userId: number;\n    toolId: number;\n    enabled: number;\n    docked: number;\n  };\n  getTools: void;\n  addExternalOllama: {\n    userId: number;\n    name: string;\n    endpoint: string;\n    api_key: string;\n    model: string;\n  };\n  getExternalOllama: { userId: number };\n}\n\ninterface Window {\n  electron: {\n    unloadModel: (payload: {\n      model_location: string;\n      model_name: string;\n      model_type?: string;\n      user_id: number;\n    }) => Promise<void>;\n    getModelInfo: (payload: {\n      model_location: string;\n      model_name: string;\n      model_type?: string;\n      user_id: number;\n    }) => Promise<{ model_info: Model }>;\n    getDirModels: (dirPath: string) => Promise<{\n      dirPath: string;\n      models: Model[];\n    }>;\n    pullModel: (model: string) => Promise<void>;\n    changeUser: () => Promise<void>;\n    quit: () => Promise<void>;\n    undo: () => Promise<void>;\n    redo: () => Promise<void>;\n    cut: () => Promise<void>;\n    copy: () => Promise<void>;\n    paste: () => Promise<void>;\n    delete: () => Promise<void>;\n    selectAll: () => Promise<void>;\n    print: () => Promise<void>;\n    chat: () => Promise<void>;\n    history: () => Promise<void>;\n    openDirectory: () => Promise<string>;\n    cancelEmbed: (payload: { userId: number }) => Promise<void>;\n    subscribeStatistics: (\n      callback: (statistics: Statistics) => void\n    ) => UnsubscribeFunction;\n    getStaticData: () => Promise<StaticData>;\n    subscribeChangeView: (\n      callback: (view: View) => void\n    ) => UnsubscribeFunction;\n    openDevTools: () => void;\n    sendFrameAction: (payload: FrameWindowAction) => void;\n    resizeWindow: (width: number, height: number) => void;\n    addUserConversation: (\n      userId: number,\n      input: string\n    ) => Promise<{\n      id: bigint | number;\n      title: string;\n    }>;\n    getCustomAPIs: (userId: number) => Promise<{\n      api: {\n        id: number;\n        user_id: number;\n        name: string;\n        endpoint: string;\n        api_key: string;\n      }[];\n    }>;\n    chatRequest: (\n      messages: Message[],\n      activeUser: User,\n      conversationId?: bigint | number,\n      collectionId?: bigint | number | undefined,\n      title?: string,\n      requestId?: number\n    ) => Promise<{\n      messages: Message[];\n      conversationId?: bigint | number;\n      title: string;\n      error?: string;\n    }>;\n    fetchOllamaModels: () => Promise<{ models: OllamaModel[] }>;\n    abortChatRequest: (requestId: number) => void;\n    onMessageChunk: (callback: (chunk: string) => void) => void;\n    offMessageChunk: (callback: (chunk: string) => void) => void;\n    onStreamEnd: (callback: () => void) => void;\n    offStreamEnd: (callback: () => void) => void;\n    setApiKey: (\n      apiKey?: string\n    ) => Promise<{ success: boolean; apiKey?: string }>;\n    getUsers: () => Promise<{ users: { name: string; id: number }[] }>;\n    addUser: (name: string) => Promise<{\n      name: string;\n      error?: string;\n    }>;\n    updateUserSettings: (UserSettings: UserSettings) => Promise<UserSettings>;\n    getUserSettings: (userId: number) => Promise<UserSettings>;\n    getUserPrompts: (userId: number) => Promise<{ prompts: UserPrompts[] }>;\n    addUserPrompt: (\n      userId: number,\n      name: string,\n      prompt: string\n    ) => Promise<UserPrompts>;\n    getPlatform: () => Promise<{ platform: \"win32\" | \"darwin\" | \"linux\" }>;\n    updateUserPrompt: (\n      userId: number,\n      id: number,\n      name: string,\n      prompt: string\n    ) => Promise<UserPrompts>;\n    addAPIKey: (\n      userId: number,\n      key: string,\n      provider: string\n    ) => Promise<{ userId: number; key: string; provider: string }>;\n    createCollection: (\n      userId: number,\n      name: string,\n      description: string,\n      type: string,\n      isLocal: boolean,\n      localEmbeddingModel: string\n    ) => Promise<{\n      id: number;\n      name: string;\n      description: string;\n      type: string;\n    }>;\n    removeFileorFolder: (\n      userId: number,\n      userName: string,\n      file: string\n    ) => Promise<{\n      userId: number;\n      userName: string;\n      file: string;\n      success: boolean;\n    }>;\n    renameFile: (\n      userId: number,\n      userName: string,\n      file: string,\n      newName: string\n    ) => Promise<{\n      userId: number;\n      userName: string;\n      file: string;\n      newName: string;\n      success: boolean;\n    }>;\n    getUserCollections: (\n      userId: number\n    ) => Promise<{ collections: Collection[] }>;\n    getUserApiKeys: (userId: number) => Promise<{ apiKeys: ApiKey[] }>;\n    getUserConversations: (\n      userId: number\n    ) => Promise<{ conversations: Conversation[] }>;\n    addFileToCollection: (\n      userId: number,\n      userName: string,\n      collectionId: number,\n      collectionName: string,\n      fileName: string,\n      fileContent: string\n    ) => Promise<{\n      result: {\n        success: boolean;\n      };\n    }>;\n    openCollectionFolder: (filepath: string) => void;\n    openCollectionFolderFromFileExplorer: (filepath: string) => void;\n    vectorstoreQuery: (\n      userId: number,\n      userName: string,\n      collectionId: number,\n      collectionName: string,\n      query: string,\n      conversationId: number\n    ) => Promise<{\n      results: {\n        content: string;\n        source: string;\n      }[];\n      conversationId: number;\n      status: string;\n    }>;\n    getFilesInCollection: (\n      userId: number,\n      collectionId: number\n    ) => Promise<{\n      files: string[];\n    }>;\n    getConversationMessagesWithData: (\n      userId: number,\n      conversationId: number,\n      collectionId?: number\n    ) => Promise<{ messages: Message[] }>;\n    keyValidation: ({\n      apiKey: string,\n      inputProvider: string,\n    }) => Promise<{ error?: string; success?: boolean }>;\n    getConversationMessages: (\n      userId: number,\n      conversationId: number\n    ) => Promise<{ messages: Message[] }>;\n    onIngestProgress: (\n      callback: (event: Electron.IpcRendererEvent, message: string) => void\n    ) => void;\n    on: (\n      channel:\n        | \"ingest-progress\"\n        | \"ollama-progress\"\n        | \"download-model-progress\",\n      func: (\n        event: Electron.IpcRendererEvent,\n        message: string | OllamaProgressEvent | DownloadModelProgress\n      ) => void\n    ) => void;\n    removeListener: (\n      channel:\n        | \"ingest-progress\"\n        | \"ollama-progress\"\n        | \"download-model-progress\",\n      func: (\n        event: Electron.IpcRendererEvent,\n        message: string | OllamaProgressEvent | DownloadModelProgress\n      ) => void\n    ) => void;\n    deleteConversation: (\n      userId: number,\n      conversationId: number\n    ) => Promise<{ userId: number; conversationId: number }>;\n    youtubeIngest: (\n      url: string,\n      userId: number,\n      userName: string,\n      collectionId: number,\n      collectionName: string\n    ) => Promise<{\n      url: string;\n      userId: number;\n      userName: string;\n      collectionId: number;\n      collectionName: string;\n    }>;\n    systemSpecs: () => Promise<{\n      cpu: string;\n      vram: string;\n      GPU_Manufacturer?: string;\n    }>;\n    checkOllama: () => Promise<{ isOllamaRunning: boolean }>;\n    runOllama: (\n      model: string,\n      user: User\n    ) => Promise<{ success: boolean; error?: string }>;\n    websiteFetch: (\n      url: string,\n      userId: number,\n      userName: string,\n      collectionId: number,\n      collectionName: string\n    ) => Promise<{\n      success: boolean;\n      content?: string;\n      textContent?: string;\n      metadata?: {\n        title: string;\n        description: string;\n        author: string;\n        keywords: string;\n        ogImage: string;\n      };\n    }>;\n    getEmbeddingsModels: () => Promise<{ models: Model[] }>;\n    webcrawl: (payload: {\n      base_url: string;\n      user_id: number;\n      user_name: string;\n      collection_id: number;\n      collection_name: string;\n      max_workers: number;\n    }) => Promise<{\n      base_url: string;\n      user_id: number;\n      user_name: string;\n      collection_id: number;\n      collection_name: string;\n      max_workers: number;\n      status: string;\n    }>;\n    deleteAzureOpenAIModel: (\n      userId: number,\n      id: number\n    ) => Promise<{\n      userId: number;\n      id: number;\n      success: boolean;\n    }>;\n    getAzureOpenAIModels: (userId: number) => Promise<{\n      models: {\n        id: number;\n        name: string;\n        model: string;\n        endpoint: string;\n        api_key: string;\n      }[];\n    }>;\n    getAzureOpenAIModel: (\n      userId: number,\n      id: number\n    ) => Promise<{\n      id: number;\n      name: string;\n      model: string;\n      endpoint: string;\n      api_key: string;\n    }>;\n    addAzureOpenAIModel: (\n      userId: number,\n      name: string,\n      model: string,\n      endpoint: string,\n      api_key: string\n    ) => Promise<{\n      id: number;\n    }>;\n    downloadModel: (payload: {\n      modelId: string;\n      dirPath: string;\n      hfToken?: string;\n    }) => Promise<void>;\n    cancelDownload: () => Promise<{ success: boolean }>;\n    subscribeResetUserState: (callback: () => void) => UnsubscribeFunction;\n    transcribeAudio: (\n      audioData: ArrayBuffer,\n      userId: number\n    ) => Promise<TranscribeAudioOutput>;\n    checkIfFFMPEGInstalled: () => Promise<{\n      success: boolean;\n      message: boolean;\n    }>;\n    deleteCollection: (\n      collectionId: number,\n      collectionName: string,\n      userId: number\n    ) => Promise<{\n      collectionId: number;\n      collectionName: string;\n      userId: number;\n    }>;\n    addDevAPIKey: (\n      userId: number,\n      name: string,\n      expiration: string | null\n    ) => Promise<Keys>;\n    getDevAPIKeys: (userId: number) => Promise<{ keys: Keys[] }>;\n    deleteDevAPIKey: (\n      userId: number,\n      id: number\n    ) => Promise<{\n      userId: number;\n      id: number;\n      result: boolean;\n    }>;\n    cancelWebcrawl: (userId: number) => Promise<{\n      userId: number;\n      result: boolean;\n    }>;\n    getUserCollectionFiles: (\n      userId: number,\n      userName: string\n    ) => Promise<{\n      files: string[];\n    }>;\n    getOpenRouterModel: (userId: number) => Promise<{ model: string }>;\n    addOpenRouterModel: (userId: number, model: string) => Promise<void>;\n    deleteOpenRouterModel: (userId: number, id: number) => Promise<void>;\n    getOpenRouterModels: (userId: number) => Promise<{ models: string[] }>;\n    loadModel: (payload: {\n      model_location: string;\n      model_name: string;\n      model_type?: string;\n      user_id: number;\n    }) => Promise<void>;\n    getModelsPath: () => Promise<string>;\n    getCustomAPI: (\n      userId: number,\n      id: number\n    ) => Promise<{\n      api: {\n        id: number;\n        user_id: number;\n        name: string;\n        endpoint: string;\n        api_key: string;\n        model: string;\n      }[];\n    }>;\n    getCustomModels: (userId: number) => Promise<{\n      models: {\n        id: number;\n        user_id: number;\n        name: string;\n        endpoint: string;\n        api_key: string;\n        model: string;\n      }[];\n    }>;\n    addCustomAPI: (\n      userId: number,\n      name: string,\n      endpoint: string,\n      api_key: string,\n      model: string\n    ) => Promise<{\n      id: number;\n    }>;\n    deleteCustomAPI: (userId: number, id: number) => Promise<void>;\n    getUserTools: (userId: number) => Promise<{\n      tools: {\n        id: number;\n        name: string;\n        enabled: number;\n        docked: number;\n      }[];\n    }>;\n    addUserTool: (\n      userId: number,\n      toolId: number,\n      enabled: number,\n      docked: number\n    ) => Promise<{\n      result: number;\n    }>;\n    removeUserTool: (\n      userId: number,\n      toolId: number\n    ) => Promise<{\n      result: boolean;\n    }>;\n    getTools: () => Promise<{\n      tools: {\n        id: number;\n        name: string;\n        description: string;\n      }[];\n    }>;\n    updateUserTool: (\n      userId: number,\n      toolId: number,\n      enabled: number,\n      docked: number\n    ) => Promise<{\n      result: boolean;\n    }>;\n    addExternalOllama: (\n      userId: number,\n      name: string,\n      endpoint: string,\n      api_key: string,\n      model: string\n    ) => Promise<{\n      id: number;\n    }>;\n    getExternalOllama: (userId: number) => Promise<{\n      ollama: ExternalOllama[];\n    }>;\n  };\n}\ntype Keys = {\n  id: number;\n  userId: number;\n  name: string;\n  key: string;\n  expiration: string | null;\n};\ninterface DataContent {\n  top_k: number;\n  results: {\n    content: string;\n    metadata: {\n      source: string;\n      title?: string;\n      chunk_start?: number;\n      chunk_end?: number;\n    };\n  }[];\n}\n\ntype OpenRouterModel = string;\n\ninterface ProgressData extends CustomProgressData, OllamaProgressEvent {}\n\ntype LLMProvider =\n  | \"openai\"\n  | \"anthropic\"\n  | \"gemini\"\n  | \"xai\"\n  | \"openrouter\"\n  | \"local\"\n  | \"ollama\"\n  | \"azure open ai\"\n  | \"custom\"\n  | \"ollama external\";\n\ninterface OllamaProgressEvent {\n  type: \"pull\" | \"verify\";\n  output: string;\n}\n\ninterface Electron {\n  on(\n    channel: \"ingest-progress\" | \"ollama-progress\" | \"download-model-progress\",\n    func: (\n      event: Electron.IpcRendererEvent,\n      message: string | OllamaProgressEvent | DownloadModelProgress\n    ) => void\n  ): void;\n  removeListener(\n    channel: \"ingest-progress\" | \"ollama-progress\" | \"download-model-progress\",\n    func: (\n      event: Electron.IpcRendererEvent,\n      message: string | OllamaProgressEvent | DownloadModelProgress\n    ) => void\n  ): void;\n}\n\ninterface APIKey {\n  id: number;\n  key: string;\n  name: string;\n  expiration: string | null;\n}\n\ntype CustomProgressData = {\n  type?:\n    | \"info\"\n    | \"progress\"\n    | \"start\"\n    | \"processing\"\n    | \"saved\"\n    | \"links\"\n    | \"embedding_start\"\n    | \"embedding_progress\"\n    | \"complete\"\n    | \"error\";\n  message?: string;\n  chunk?: number;\n  totalChunks?: number;\n  percent_complete?: string;\n  est_remaining_time?: string;\n  status?: \"success\" | \"error\" | \"progress\";\n  current?: number;\n  total?: number;\n  url?: string;\n  count?: number;\n  current_batch?: number;\n  total_batches?: number;\n  data?: {\n    message?: string;\n    chunk?: number;\n    total_chunks?: number;\n    percent_complete?: string;\n  };\n};\n\ninterface DownloadProgress {\n  type: \"progress\";\n  data: DownloadProgressData;\n}\ninterface OllamaModel {\n  name: string;\n  type: string;\n}\ninterface ExternalOllama {\n  id: number;\n  user_id: number;\n  name: string;\n  endpoint: string;\n  api_key: string;\n  model: string;\n}\ninterface DownloadProgressData {\n  message: string;\n  fileName?: string;\n  fileNumber?: number;\n  totalFiles?: number;\n  fileProgress?: number;\n  totalProgress: number;\n  currentSize?: string;\n  totalSize?: string;\n  currentStep?: string;\n  speed?: string;\n}\n\n/* Provider Response  & Chat Request Result */\n\ninterface ProviderResponse {\n  id: bigint | number;\n  messages: Message[];\n  title: string;\n  content: string;\n  reasoning?: string;\n  aborted: boolean;\n}\n\ninterface ChatRequestResult {\n  messages: Message[];\n  data_content?: string;\n  reasoning_content?: string;\n  id: bigint | number;\n  title: string;\n  error?: string;\n}\n\ninterface ProviderInputParams {\n  messages: Message[];\n  activeUser: User;\n  userSettings: UserSettings;\n  prompt: string | undefined;\n  conversationId: bigint | number;\n  mainWindow: BrowserWindow | null;\n  currentTitle: string;\n  collectionId?: number;\n  data?: {\n    top_k: number;\n    results: {\n      content: string;\n      metadata: string;\n    }[];\n  };\n  signal?: AbortSignal;\n}\n\ninterface Tool {\n  id: number;\n  name: string;\n  description: string;\n}\n\ninterface UserTool {\n  id: number;\n  name: string;\n  enabled: number;\n  docked: number;\n}\ntype WebSearchResult = {\n  metadata: {\n    title: string;\n    source: string;\n    description: string;\n    author: string;\n    keywords: string;\n    ogImage: string;\n  };\n  textContent: string;\n};\n"
  },
  {
    "path": "Frontend/vite.config.d.ts",
    "content": "declare const _default: import(\"vite\").UserConfig;\nexport default _default;\n"
  },
  {
    "path": "Frontend/vite.config.js",
    "content": "import { defineConfig } from \"vite\";\nimport react from \"@vitejs/plugin-react\";\nimport path from \"path\";\n// https://vitejs.dev/config/\nexport default defineConfig({\n    plugins: [react()],\n    base: \"./\",\n    build: {\n        outDir: \"dist-react\",\n        rollupOptions: {\n            input: {\n                main: path.resolve(__dirname, 'index.html'),\n                loading: path.resolve(__dirname, 'src/loading.html')\n            }\n        }\n    },\n    server: {\n        port: 5131,\n        strictPort: true,\n    },\n    resolve: {\n        alias: {\n            \"@\": path.resolve(__dirname, \"./src\"),\n            \"@/ui\": path.resolve(__dirname, \"./src/app\"),\n            \"@/components\": path.resolve(__dirname, \"./src/components\"),\n        },\n    },\n});\n"
  },
  {
    "path": "Frontend/vite.config.ts",
    "content": "import { defineConfig } from \"vite\";\nimport react from \"@vitejs/plugin-react\";\nimport path from \"path\";\n\n// https://vitejs.dev/config/\nexport default defineConfig({\n  plugins: [react()],\n  base: \"./\",\n  build: {\n    outDir: \"dist-react\",\n    rollupOptions: {\n      input: {\n        main: path.resolve(__dirname, 'index.html'),\n        loading: path.resolve(__dirname, 'src/loading.html')\n      }\n    }\n  },\n  server: {\n    port: 5131,\n    strictPort: true,\n  },\n  resolve: {\n    alias: {\n      \"@\": path.resolve(__dirname, \"./src\"),\n      \"@/ui\": path.resolve(__dirname, \"./src/app\"),\n      \"@/components\": path.resolve(__dirname, \"./src/components\"),\n    },\n  },\n});\n"
  },
  {
    "path": "LICENSE",
    "content": "Apache License\nVersion 2.0, January 2004\nhttp://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n\"License\" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.\n\n\"Licensor\" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.\n\n\"Legal Entity\" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, \"control\" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.\n\n\"You\" (or \"Your\") shall mean an individual or Legal Entity exercising permissions granted by this License.\n\n\"Source\" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.\n\n\"Object\" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.\n\n\"Work\" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).\n\n\"Derivative Works\" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.\n\n\"Contribution\" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, \"submitted\" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as \"Not a Contribution.\"\n\n\"Contributor\" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:\n\nYou must give any other recipients of the Work or Derivative Works a copy of this License; and\nYou must cause any modified files to carry prominent notices stating that You changed the files; and\nYou must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and\nIf the Work includes a \"NOTICE\" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.\nYou may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS"
  },
  {
    "path": "README.md",
    "content": "# Notate\n\nNotate is a powerful, cross-platform chat application designed for seamless AI interactions. It combines enterprise-grade features with an intuitive interface, supporting a wide range of AI models and local deployment options.\n\n## Key Features\n\n- 🤖 **Multi-Model Support**: Integrate with leading AI providers including OpenAI, Anthropic, Google, XAI, OpenRouter, and DeepSeek\n- 🏠 **Local Deployment**: Run models locally using llamacpp, transformers, or ollama inference\n- 📚 **RAG Integration**: Built-in support for document Q&A through ChromaDB integration\n- 🔧 **Flexible Configuration**: Custom API endpoints and comprehensive model settings\n- 🎯 **Advanced Features**: Experimental reasoning capabilities and developer API access\n- 🔒 **Privacy-Focused**: Local-only mode available for sensitive data handling\n\n## Quick Start\n\nDownload the latest version of Notate for your platform:\n\n- [Windows Installer](https://notate.hairetsu.com/download)\n- [macOS Installer](https://notate.hairetsu.com/download)\n- [Linux Deb](https://notate.hairetsu.com/download)\n\nFor detailed installation instructions, see our [Installation Guide](https://notate.hairetsu.com/docs/getting-started).\n\n## Documentation\n\n- [Getting Started](https://notate.hairetsu.com/docs/overview): A quick overview of Notate\n- [Installation Guide](https://notate.hairetsu.com/docs/getting-started): Detailed setup instructions\n- [Model Configuration](https://notate.hairetsu.com/docs/settings): Configure AI models and embeddings\n- [File Collections](https://notate.hairetsu.com/docs/collections): How to use File Collections\n- [File Collection Tools](https://notate.hairetsu.com/docs/collection-tools): Tools to ingest content from outside sources\n- [API Reference](https://notate.hairetsu.com/docs/developer-integration): Technical documentation for developers\n- [Troubleshooting](https://notate.hairetsu.com/docs/troubleshooting): Troubleshooting guide\n\nVisit our complete documentation at [https://notate.hairetsu.com/docs](https://notate.hairetsu.com/docs)\n\n## Community\n\nJoin our Discord community to get help, share feedback, and connect with other users and developers:\n[Discord Server](https://discord.gg/vEFAwB8wFC)\n\n## Support the Project\n\nIf you find this project helpful, consider supporting its development:\n\nDonations are used to cover the costs of running the project, including server costs, domain registration, signed certificates, and other expenses.\n\n[![PayPal](https://img.shields.io/badge/PayPal-donate-blue.svg)](https://www.paypal.com/donate/?hosted_button_id=W96TCRJ5Q3RJG)\n\n## Screenshots\n\n**Chat UI**\n![Notate Chat Screenshot](https://www.hairetsu.com/notate-12.png)\n\n**LLM Intergrations**\n![Notate LLM Intergrations Screenshot](https://www.hairetsu.com/notate-10.png)\n\n**Chat Settings**\n![Notate Collections Screenshot](https://www.hairetsu.com/notate-06.png)\n\n**Tool Settings**\n![Notate Collections Screenshot](https://www.hairetsu.com/notate-11.png)  \n\n**Ingestion from File or URL into ChromaDB**\n![Notate Data Intake Screenshot](https://www.hairetsu.com/notate-3.png)\n\n**Rag Chat Q/A**\n![Notate Collections Screenshot](https://www.hairetsu.com/notate-08.png)\n\n**Reasoning (Experimental)**\n![Notate Collections Screenshot](https://www.hairetsu.com/notate-09.png)\n\n**Dev API Key**\n![Notate Dev Screenshot](https://www.hairetsu.com/notate-2.png)\n\n### Local Only Mode Requirements\n\n_Windows CUDA_\n\n- Microsoft Visual Studio 2022 /w Desktop Development Tools C++ Build Tools\n- CUDA 12.6 toolkit or later\n\n_MacOS_\n\n- Xcode 15.0 or later\n\n- Python 3.12\n- Node.js v16 or higher\n- Package manager: npm or pnpm\n- At least 2GB of free disk space (Recommended 10GB+ minimum for local models and FileCollections)\n- Minimum 8GB RAM recommended\n- CPU: 4 cores or more\n- Nvidia RTX GPU recommended for local model inference 10GB VRAM or more preferably or Apple Silicon\n- Operating System:\n  - macOS 10.15 or later (Intel/Apple Silicon)\n  - Windows 10/11\n  - Linux\n\n### External Requirements\n\n- Python 3.12\n- Node.js v16 or higher\n- Package manager: npm or pnpm\n- CPU: 4 cores or more\n- MEMORY: 8GB RAM or more\n- DISK: 2GB free space (Recommended 4GB minimum for FileCollections)\n- OpenAI API key (optional)\n  - Required for OpenAI embeddings and GPT models\n  - Configure in settings after installation\n- Anthropic API key (optional)\n  - Required for Claude models\n  - Configure in settings after installation\n- Google API key (optional)\n  - Required for Google models\n  - Configure in settings after installation\n- XAI API key (optional)\n  - Required for XAI models\n  - Configure in settings after installation\n\n## Installation\n\n1. Clone the repository: `git clone https://github.com/CNTRLAI/Notate.git`\n2. Navigate to the electron project directory: `cd notate/Frontend`\n3. Install dependencies: `npm install` or `pnpm install`\n4. Build the frontend: `npm run build` or `pnpm run build`\n\n## Running the Application in Development Mode\n\n- Dev mode (macOS): `npm run dev:mac` or `pnpm run dev:mac`\n- Dev mode (Windows): `npm run dev:win` or `pnpm run dev:win`\n- Dev mode (Linux): `npm run dev:linux` or `pnpm run dev:linux`\n\n## Compiling to .exe, .dmg, and .AppImage\n\n- Production mode (macOS): `npm run dist:mac` or `pnpm run dist:mac`\n- Production mode (Windows): `npm run dist:win` or `pnpm run dist:win`\n- Production mode (Linux): `npm run dist:linux` or `pnpm run dist:linux`\n\n## Location of the Application\n\n(if Apple Silicon)\n\n- macOS: `Notate/Frontend/dist/mac-arm64/Notate.app`\n- macOS Installer: `Notate/Frontend/dist/Notate.dmg`\n\n(if Intel)\n\n- macOS: `Notate/Frontend/dist/mac/Notate.app`\n- macOS Installer: `Notate/Frontend/dist/Notate.dmg`\n\n(if Windows)\n\n- Executable: `Notate/Frontend/dist/Notate.exe`\n- Installer: `Notate/Frontend/dist/Notate.msi`\n\n(if Linux)\n\n- AppImage: `Notate/Frontend/dist/Notate.AppImage`\n- Debian Package: `Notate/Frontend/dist/Notate.deb`\n- RPM Package: `Notate/Frontend/dist/Notate.rpm`\n\n## Thanks\n\nSpecial thanks to our #1 beta tester Banks (@scallywag41) for their invaluable contributions and feedback during development.\n\n## Coming Soon\n\n- [ ] Chrome Extension For Ingesting Webpages/Files\n- [ ] Additional Agent Tools\n- [ ] Advanced Ingestion Settings\n- [ ] Additional Document Types\n- [ ] Output to Speech\n"
  }
]