Repository: reid41/QA-Pilot
Branch: main
Commit: 747a9ecb61a3
Files: 35
Total size: 173.1 KB
Directory structure:
gitextract_ot3ibsoy/
├── LICENSE
├── README.md
├── app.py
├── check_postgresql_connection.py
├── config/
│ ├── config.ini
│ └── prompt_templates.ini
├── llamacpp_models/
│ └── model_dir
├── parser.go
├── qa_model_apis.py
├── qa_pilot_run.py
├── requirements.txt
├── svelte-app/
│ ├── .gitignore
│ ├── README.md
│ ├── package.json
│ ├── public/
│ │ ├── global.css
│ │ └── index.html
│ ├── rollup.config.js
│ ├── scripts/
│ │ └── setupTypeScript.js
│ └── src/
│ ├── ApiKeyModal.svelte
│ ├── App.svelte
│ ├── Chat.svelte
│ ├── ConfigEditor.svelte
│ ├── DeleteConfirmationModal.svelte
│ ├── DeleteTemplateModal.svelte
│ ├── LlamaCppModelsModal.svelte
│ ├── NewSourceModal.svelte
│ ├── PromptTemplatesModal.svelte
│ ├── config.js
│ ├── global.css
│ └── main.js
├── templates/
│ ├── go_index.html
│ └── index.html
└── utils/
├── codegraph.py
├── go_codegraph.py
└── helper.py
================================================
FILE CONTENTS
================================================
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: README.md
================================================
QA-Pilot is an interactive chat project that leverages online/local LLM for rapid understanding and navigation of GitHub code repository.
### Features
* Chat with github public repository with git clone way
* Store the chat history
* Easy to set the configuration
* Multiple chat sessions
* Locate the session quicly with search function
* Integrate with `codegraph` to view the python file
* Support the different LLM models
* ollama(deepseek, llama3.1, phi3, llama3, gemma2)
* openai(gpt-4o, gpt-4-turbo, gpt-4, and gpt-3.5-turbo)
* mistralai(mistral-tiny, mistral-tiny, mistral-small-latest, mistral-medium-latest, mistral-large-latest, codestral-lates)
* localai(gpt-4, more)
* zhipuai(glm-4-0520, glm-4, glm-4-air, glm-4-airx, glm-4-flash)
* anthropic(claude-3-opus-20240229, claude-3-sonnet-20240229, claude-3-haiku-20240307, claude-3-5-sonnet-20240620)
* llamacpp
* nvidia(meta/llama3-70b-instruct, more)
* tongyi(qwen-turbo, qwen-plus, qwen-max, more)
* moonshot(moonshot-v1-8k, moonshot-v1-32k, moonshot-v1-128k)
### Release
* 2024-07-03 update langchain to `0.2.6` version and add `moonshot` API support
* 2024-06-30 add `Go Codegraph`
* 2024-06-27 add `nvidia/tongyi` API support
* 2024-06-19 add `llamacpp` API support, improve the `settings` list in the sidebar and add upload model function for `llamacpp`, add `prompt templates` setting
* 2024-06-15 add `anthropic` API support, refactor some functions, and fix chat show messages
* 2024-06-12 add `zhipuai` API support
* 2024-06-10 Convert `flask` to `fastapi` and add `localai` API support
* 2024-06-07 Add `rr:` option and use `FlashRank` for the search
* 2024-06-05 Upgrade `langchain` to `v0.2` and add `ollama embeddings`
* 2024-05-26 Release v2.0.1: Refactoring to replace `Streamlit` fontend with `Svelte` to improve the performance.
### Disclaimer
* This is a test project to validate the feasibility of a fully local solution for question answering using LLMs and Vector embeddings. It is not production ready, and it is not meant to be used in production.
* `Do not use models for analyzing your critical or production data!!`
* `Do not use models for analyzing customer data to ensure data privacy and security!!`
* `Do not use models for analyzing you private/sensitivity code respository!!`
#### QA-Pilot

#### CodeGraph

To deploy QA-Pilot, you can follow the below steps:
1. Clone the QA-Pilot repository:
```shell
git clone https://github.com/reid41/QA-Pilot.git
cd QA-Pilot
```
2. Install [conda](https://www.anaconda.com/download) for virtual environment management. Create and activate a new virtual environment.
```shell
conda create -n QA-Pilot python=3.10.14
conda activate QA-Pilot
```
3. Install the required dependencies:
```shell
pip install -r requirements.txt
```
4. Install the pytorch with cuda [pytorch](https://pytorch.org/get-started/locally/)
5. Setup providers
* For setup [ollama website](https://ollama.com/) and [ollama github](https://github.com/ollama/ollama) to manage the local LLM.
e.g.
```shell
ollama pull
ollama list
```
* For setup [localAI](https://localai.io/) and [LocalAI github](https://github.com/mudler/LocalAI) to manage the local LLM, set the localAI `base_url` in config/config.ini.
e.g.
```shell
docker run -p 8080:8080 --name local-ai -ti localai/localai:latest-aio-cpu
# Do you have a Nvidia GPUs? Use this instead
# CUDA 11
# docker run -p 8080:8080 --gpus all --name local-ai -ti localai/localai:latest-aio-gpu-nvidia-cuda-11
# CUDA 12
# docker run -p 8080:8080 --gpus all --name local-ai -ti localai/localai:latest-aio-gpu-nvidia-cuda-12
# quick check the service with http://:8080/
# quick check the models with http://:8080/models/
```
* For setup llamacpp with [llama-cpp-python](https://github.com/abetlen/llama-cpp-python#windows-remarks)
- upload the model to `llamacpp_models` dir or upload from the `llamacpp models` under the `Settings`
- set the model in `llamacpp_llm_models` section in `config/config.ini`
* For setup API key in `.env`
- [OpenAI](https://platform.openai.com/docs/overview): OPENAI_API_KEY=''
- [MistralAI](https://docs.mistral.ai/): MISTRAL_API_KEY=''
- [ZhipuAI](https://open.bigmodel.cn/): ZHIPUAI_API_KEY=''
- [Anthropic](https://console.anthropic.com/settings/keys): ANTHROPIC_API_KEY=''
- [Nvidia](https://build.nvidia.com/explore/discover): NVIDIA_API_KEY=''
- [TongYi](https://help.aliyun.com/document_detail/611472.html?spm=a2c4g.2399481.0.0): DASHSCOPE_API_KEY=''
- [Moonshot](https://platform.moonshot.cn/): MOONSHOT_API_KEY=''
* For `Go codegraph`, make sure setup [GO](https://go.dev/doc/install) env, compile go file and test
```shell
go build -o parser parser.go
# test
./parser /path/test.go
```
6. Set the related parameters in `config/config.ini`, e.g. `model provider`, `model`, `variable`, `Ollama API url` and setup the [Postgresql](https://www.postgresql.org/download/) env
```shell
# create the db, e.g.
CREATE DATABASE qa_pilot_chatsession_db;
CREATE USER qa_pilot_user WITH ENCRYPTED PASSWORD 'qa_pilot_p';
GRANT ALL PRIVILEGES ON DATABASE qa_pilot_chatsession_db TO qa_pilot_user;
# set the connection
cat config/config.ini
[database]
db_name = qa_pilot_chatsession_db
db_user = qa_pilot_user
db_password = qa_pilot_p
db_host = localhost
db_port = 5432
# set the arg in script and test connection
python check_postgresql_connection.py
```
7. Download and install [node.js](https://nodejs.org/en/download/package-manager) and Set up the fontend env in one terminal
```shell
# make sure the backend server host ip is correct, localhost is by default
cat svelte-app/src/config.js
export const API_BASE_URL = 'http://localhost:5000';
# install deps
cd svelte-app
npm install
npm run dev
```
8. Run the backend QA-Pilot in another terminal:
```shell
python qa_pilot_run.py
```
### Tips
* Do not use url and upload at the same time.
* The remove button cannot really remove the local chromadb, need to remove it manually when stop it.
* Switch to `New Source Button` to add a new project
* Use `rsd:` to start the input and get the source document
* Use `rr:` to start the input and use the `FlashrankRerank` for the search
* Click `Open Code Graph` in `QA-Pilot` to view the code(make sure the the already in the project session and loaded before click), curretly support `python` and `go`
================================================
FILE: app.py
================================================
from fastapi import FastAPI, Request, HTTPException, UploadFile, File, Form
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.templating import Jinja2Templates
import configparser
import os
from dotenv import load_dotenv, set_key
from utils.helper import (
DataHandler,
remove_directory,
encode_kwargs,
model_kwargs,
)
import psycopg2
from psycopg2 import sql
import ast
from qa_model_apis import (
get_chat_model,
get_embedding_model,
)
from utils.codegraph import (
parse_python_code,
read_current_repo_path,
build_file_tree,
)
from utils.go_codegraph import(
parse_go_code,
go_build_file_tree,
)
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
config_path = os.path.join('config', 'config.ini')
prompt_templates_path = os.path.join('config', 'prompt_templates.ini')
config = configparser.ConfigParser()
config.read(config_path)
templates = Jinja2Templates(directory="templates")
DB_NAME = config['database']['db_name']
DB_USER = config['database']['db_user']
DB_PASSWORD = config['database']['db_password']
DB_HOST = config['database']['db_host']
DB_PORT = config['database']['db_port']
# for analyse code
current_session = None
current_model_info = {
"provider": None,
"model": None,
"eb_provider": None,
"eb_model": None,
"chat_model": None,
"embedding_model": None
}
def init_db():
conn = psycopg2.connect(
dbname=DB_NAME,
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT
)
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS sessions (
id BIGINT PRIMARY KEY,
name TEXT NOT NULL,
url TEXT NOT NULL
)
''')
conn.commit()
# fix the first time to set the session
cursor.execute('SELECT id, name, url FROM sessions LIMIT 1')
session = cursor.fetchone()
if session:
global current_session
current_session = {'id': session[0], 'name': session[1], 'url': session[2]}
print("Default session set to:", current_session)
conn.close()
def load_models_if_needed():
selected_provider = config.get('model_providers', 'selected_provider')
selected_model = config.get(f"{selected_provider}_llm_models", 'selected_model')
eb_selected_provider = config.get('embedding_model_providers', 'selected_provider')
eb_selected_model = config.get(f"{eb_selected_provider}_embedding_models", 'selected_model')
if (current_model_info["provider"] != selected_provider or
current_model_info["model"] != selected_model or
current_model_info["eb_provider"] != eb_selected_provider or
current_model_info["eb_model"] != eb_selected_model):
current_model_info["provider"] = selected_provider
current_model_info["model"] = selected_model
current_model_info["eb_provider"] = eb_selected_provider
current_model_info["eb_model"] = eb_selected_model
current_model_info["chat_model"] = get_chat_model(selected_provider, selected_model)
current_model_info["embedding_model"] = get_embedding_model(eb_selected_provider, eb_selected_model, model_kwargs, encode_kwargs)
print(f"Loaded new models: provider={selected_provider}, model={selected_model}")
print(f"Loaded models: provider={selected_provider}, model={selected_model}")
def create_message_table(session_id):
conn = psycopg2.connect(
dbname=DB_NAME,
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT
)
cursor = conn.cursor()
table_name = sql.Identifier(f'session_{session_id}')
cursor.execute(sql.SQL('''
CREATE TABLE IF NOT EXISTS {} (
id BIGSERIAL PRIMARY KEY,
sender TEXT NOT NULL,
text TEXT NOT NULL
)
''').format(table_name))
conn.commit()
conn.close()
init_db()
def load_config():
config.read(config_path)
return config
@app.get('/get_config')
async def get_config():
config = load_config()
config_dict = {section: dict(config.items(section)) for section in config.sections()}
return JSONResponse(content=config_dict)
@app.post('/save_config')
async def save_config(request: Request):
new_config = await request.json()
config = load_config()
for section, section_values in new_config.items():
if not config.has_section(section):
config.add_section(section)
for key, value in section_values.items():
config.set(section, key, value)
with open(config_path, 'w') as configfile:
config.write(configfile)
return JSONResponse(content={"message": "Configuration saved successfully!"})
@app.post('/update_provider')
async def update_provider(request: Request):
data = await request.json()
selected_provider = data.get('selected_provider')
config.set('model_providers', 'selected_provider', selected_provider)
with open(config_path, 'w') as configfile:
config.write(configfile)
return JSONResponse(content={"message": "Provider updated successfully!"})
@app.post('/update_model')
async def update_model(request: Request):
data = await request.json()
selected_provider = data.get('selected_provider')
selected_model = data.get('selected_model')
config.set(f'{selected_provider}_llm_models', 'selected_model', selected_model)
with open(config_path, 'w') as configfile:
config.write(configfile)
return JSONResponse(content={"message": "Model updated successfully!"})
@app.post('/load_repo')
async def load_repo(request: Request):
data = await request.json()
git_url = data.get('git_url')
if not git_url:
raise HTTPException(status_code=400, detail="Git URL is required")
load_models_if_needed()
chat_model = current_model_info["chat_model"]
embedding_model = current_model_info["embedding_model"]
data_handler = DataHandler(git_url, chat_model, embedding_model)
try:
data_handler.git_clone_repo()
data_handler.load_into_db()
return JSONResponse(content={"message": f"Repository {git_url} loaded successfully!"})
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post('/chat')
async def chat(request: Request):
data = await request.json()
load_models_if_needed()
chat_model = current_model_info["chat_model"]
embedding_model = current_model_info["embedding_model"]
user_message = data.get('message')
current_repo = data.get('current_repo')
session_id = data.get('session_id')
if not user_message or not current_repo or not session_id:
raise HTTPException(status_code=400, detail="Message, current_repo and session_id are required")
try:
data_handler = DataHandler(current_repo, chat_model, embedding_model)
data_handler.load_into_db()
rsd = False
rr = False
# return source documents
if user_message.startswith('rsd:'):
user_message = user_message[4:].strip()
rsd = True
# use reranker
elif user_message.startswith('rr:'):
user_message = user_message[3:].strip()
rr = True
bot_response = data_handler.retrieval_qa(user_message, rsd=rsd, rr=rr)
# Save user message and bot response to the session table
conn = psycopg2.connect(
dbname=DB_NAME,
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT
)
cursor = conn.cursor()
table_name = sql.Identifier(f'session_{session_id}')
cursor.execute(sql.SQL('INSERT INTO {} (sender, text) VALUES (%s, %s)').format(table_name), ('You', user_message))
cursor.execute(sql.SQL('INSERT INTO {} (sender, text) VALUES (%s, %s)').format(table_name), ('QA-Pilot', bot_response))
conn.commit()
conn.close()
return JSONResponse(content={"response": bot_response})
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get('/sessions')
async def get_sessions():
conn = psycopg2.connect(
dbname=DB_NAME,
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT
)
cursor = conn.cursor()
cursor.execute('SELECT id, name, url FROM sessions')
sessions = [{'id': row[0], 'name': row[1], 'url': row[2]} for row in cursor.fetchall()]
conn.close()
print(f"Fetched sessions from DB: {sessions}")
return JSONResponse(content=sessions)
@app.post('/sessions')
async def save_sessions(request: Request):
sessions = await request.json()
print(f"Received sessions to save: {sessions}")
conn = psycopg2.connect(
dbname=DB_NAME,
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT
)
cursor = conn.cursor()
for session in sessions:
cursor.execute('INSERT INTO sessions (id, name, url) VALUES (%s, %s, %s) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, url = EXCLUDED.url',
(session['id'], session['name'], session['url']))
create_message_table(session['id'])
conn.commit()
conn.close()
print("Saved sessions to DB")
return JSONResponse(content={"message": "Sessions saved successfully!"})
@app.get('/messages/{session_id}')
async def get_messages(session_id: int):
conn = psycopg2.connect(
dbname=DB_NAME,
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT
)
cursor = conn.cursor()
table_name = sql.Identifier(f'session_{session_id}')
cursor.execute(sql.SQL('SELECT sender, text FROM {}').format(table_name))
messages = [{'sender': row[0], 'text': row[1]} for row in cursor.fetchall()]
conn.close()
print(f"Fetched messages from session {session_id}")
return JSONResponse(content=messages)
@app.post('/update_current_session')
async def update_current_session(request: Request):
global current_session
current_session = await request.json()
return JSONResponse(content={"message": "Current session updated successfully!"})
@app.delete('/sessions/{session_id}')
async def delete_session(session_id: int):
print(f"Deleting session with ID: {session_id}")
conn = psycopg2.connect(
dbname=DB_NAME,
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT
)
cursor = conn.cursor()
try:
cursor.execute('SELECT name FROM sessions WHERE id = %s', (session_id,))
session = cursor.fetchone()
if session:
session_name = session[0]
print("anem", session_name)
cursor.execute('DELETE FROM sessions WHERE id = %s', (session_id,))
conn.commit()
cursor.execute(sql.SQL('DROP TABLE IF EXISTS {}').format(sql.Identifier(f'session_{session_id}')))
conn.commit()
# remove the git clone project
remove_project_path = os.path.join("projects", session_name)
remove_directory(remove_project_path)
print("Session deleted successfully")
return JSONResponse(content={"message": "Session deleted successfully!"})
except Exception as e:
print(f"Error deleting session: {e}")
raise HTTPException(status_code=500, detail=str(e))
finally:
conn.close()
# api key handling functions
@app.post('/check_api_key')
async def check_api_key(request: Request):
data = await request.json()
provider = data.get('provider')
key_var = f"{provider.upper()}_API_KEY"
load_dotenv()
api_key = os.getenv(key_var)
return JSONResponse(content={'exists': bool(api_key)})
@app.post('/save_api_key')
async def save_api_key(request: Request):
data = await request.json()
provider = data.get('provider')
api_key = data.get('api_key')
key_var = f"{provider.upper()}_API_KEY"
dotenv_path = '.env'
set_key(dotenv_path, key_var, api_key)
load_dotenv()
return JSONResponse(content={'message': 'API Key saved successfully!'})
# hanlde llamacpp models(list/upload/delete)
@app.get('/llamacpp_models')
async def list_llamacpp_models():
upload_dir = "llamacpp_models"
if not os.path.exists(upload_dir):
return JSONResponse(content=[])
models = os.listdir(upload_dir)
return JSONResponse(content=models)
@app.post('/llamacpp_models', status_code=201)
async def upload_llamacpp_model(file: UploadFile = File(...), chunk: int = Form(...), totalChunks: int = Form(...)):
try:
upload_dir = "llamacpp_models"
os.makedirs(upload_dir, exist_ok=True)
chunk_dir = os.path.join(upload_dir, "chunks")
os.makedirs(chunk_dir, exist_ok=True)
chunk_file_path = os.path.join(chunk_dir, f"{file.filename}.part{chunk}")
with open(chunk_file_path, "wb") as f:
f.write(await file.read())
# Check if all chunks are uploaded
if len(os.listdir(chunk_dir)) == totalChunks:
final_file_path = os.path.join(upload_dir, file.filename)
with open(final_file_path, "wb") as final_file:
for i in range(totalChunks):
chunk_file_path = os.path.join(chunk_dir, f"{file.filename}.part{i}")
with open(chunk_file_path, "rb") as chunk_file:
final_file.write(chunk_file.read())
os.remove(chunk_file_path)
os.rmdir(chunk_dir) # Remove the chunks directory
return JSONResponse(content={"message": "Chunk uploaded successfully!"})
except Exception as e:
print(f"Error uploading model: {e}")
raise HTTPException(status_code=500, detail="Failed to upload chunk")
@app.delete('/llamacpp_models/{model_name}')
async def delete_llamacpp_model(model_name: str):
file_path = os.path.join("llamacpp_models", model_name)
if os.path.exists(file_path):
os.remove(file_path)
return JSONResponse(content={"message": "Model deleted successfully!"})
else:
raise HTTPException(status_code=404, detail="Model not found")
# handle prompt templates
@app.get('/get_prompt_templates')
async def get_prompt_templates():
prompt_templates_path = os.path.join('config', 'prompt_templates.ini')
templates_config = configparser.ConfigParser()
if not os.path.exists(prompt_templates_path):
return JSONResponse(content={})
with open(prompt_templates_path, 'r', encoding='utf-8') as file:
file_content = file.read()
# Ensure the content has a section header
if '[qa_prompt_templates]' not in file_content:
file_content = '[qa_prompt_templates]\n' + file_content
try:
templates_config.read_string(file_content)
except configparser.ParsingError as e:
print(f"Error parsing config: {e}")
raise HTTPException(status_code=500, detail="Error parsing prompt templates")
templates = {k: v.replace('\\n', '\n') for k, v in templates_config.items('qa_prompt_templates')}
return JSONResponse(content=templates)
@app.post('/delete_prompt_template')
async def delete_prompt_template(request: Request):
data = await request.json()
template_name = data.get('template_name')
prompt_templates_path = os.path.join('config', 'prompt_templates.ini')
templates_config = configparser.ConfigParser()
templates_config.read(prompt_templates_path)
if template_name in templates_config['qa_prompt_templates']:
templates_config.remove_option('qa_prompt_templates', template_name)
with open(prompt_templates_path, 'w', encoding='utf-8') as configfile:
templates_config.write(configfile)
return JSONResponse(content={"message": "Template deleted successfully!"})
else:
raise HTTPException(status_code=404, detail="Template not found")
@app.post('/save_prompt_templates')
async def save_prompt_templates(request: Request):
new_templates = await request.json()
prompt_templates_path = os.path.join('config', 'prompt_templates.ini')
templates_config = configparser.ConfigParser()
templates_config['qa_prompt_templates'] = {k: v.replace('\n', '\\n') for k, v in new_templates.items()}
with open(prompt_templates_path, 'w', encoding='utf-8') as configfile:
templates_config.write(configfile)
return JSONResponse(content={"message": "Templates saved successfully!"})
#############################python codegraph############################
@app.get('/codegraph')
async def codegraph_home(request: Request):
return templates.TemplateResponse('index.html', {'request': request})
@app.get('/data')
async def data(filepath: str):
code_data = parse_python_code(filepath) # Ensure the path points to your Python code file
return JSONResponse(content=code_data)
@app.get('/directory')
async def directory():
current_repo_path = read_current_repo_path(current_session)
if current_repo_path is None:
raise HTTPException(status_code=404, detail="Repository path not set or not found")
dir_tree = build_file_tree(current_repo_path) # Ensure the path points to your code directory
return JSONResponse(content=dir_tree)
@app.post('/analyze')
async def analyze(request: Request):
data = await request.json()
load_models_if_needed()
chat_model = current_model_info["chat_model"]
embedding_model = current_model_info["embedding_model"]
code = data.get('code', '')
# send the code to LLM
data_handler = DataHandler(git_url='', chat_model=chat_model, embedding_model=embedding_model)
code_analysis = data_handler.restrieval_qa_for_code(code)
return JSONResponse(content={'analysis': code_analysis})
#####go codegraph#####
@app.get('/go_codegraph')
async def go_codegraph_home(request: Request):
return templates.TemplateResponse('go_index.html', {'request': request})
@app.get('/go_data')
async def go_data(filepath: str):
if os.path.isdir(filepath):
raise HTTPException(status_code=400, detail="The specified path is a directory, not a file.")
code_data = parse_go_code(filepath)
return JSONResponse(content=code_data)
@app.get('/go_directory')
async def directory():
current_repo_path = read_current_repo_path(current_session)
if current_repo_path is None:
raise HTTPException(status_code=404, detail="Repository path not set or not found")
dir_tree = go_build_file_tree(current_repo_path) # Ensure the path points to your code directory
return JSONResponse(content=dir_tree)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=5003, log_level="debug")
================================================
FILE: check_postgresql_connection.py
================================================
import psycopg2
import configparser
config = configparser.ConfigParser()
config.read('config/config.ini')
db_config = config['database']
try:
conn = psycopg2.connect(
dbname=db_config['db_name'],
user=db_config['db_user'],
password=db_config['db_password'],
host=db_config['db_host'],
port=db_config['db_port']
)
print("Connection successful")
cur = conn.cursor()
cur.execute("SELECT version();")
db_version = cur.fetchone()
print(f"Database version: {db_version}")
cur.close()
conn.close()
except psycopg2.Error as e:
print(f"Error: {e}")
================================================
FILE: config/config.ini
================================================
[app_setting]
version = v2.15.6
[model_providers]
provider_list = openai, ollama, mistralai, localai, zhipuai, anthropic, llamacpp, nvidia, tongyi, moonshot
selected_provider = ollama
[openai_llm_models]
model_list = gpt-3.5-turbo, gpt-4o
selected_model = gpt-4o
[mistralai_llm_models]
model_list = mistral-tiny, mistral-small
selected_model = mistral-small
[ollama_llm_models]
model_list = qwen2.5-coder:14b, qwen2.5:14b, deepseek-r1:14b
selected_model = qwen2.5:14b
base_url = http://localhost:11434
[localai_llm_models]
model_list = gpt-4
selected_model = gpt-4
base_url = http://localhost:8080/v1
[zhipuai_llm_models]
model_list = glm-4
selected_model = glm-4
[anthropic_llm_models]
model_list = claude-3-opus-20240229
selected_model = claude-3-opus-20240229
[llamacpp_llm_models]
model_list =
selected_model =
[nvidia_llm_models]
model_list = meta/llama3-70b-instruct
selected_model = meta/llama3-70b-instruct
[tongyi_llm_models]
model_list = qwen-turbo
selected_model = qwen-turbo
[moonshot_llm_models]
model_list = moonshot-v1-8k, moonshot-v1-32k, moonshot-v1-128k
selected_model = moonshot-v1-8k
[embedding_model_providers]
provider_list = huggingface, ollama
selected_provider = huggingface
[huggingface_embedding_models]
model_list = sentence-transformers/all-MiniLM-L6-v2
selected_model = sentence-transformers/all-MiniLM-L6-v2
[ollama_embedding_models]
model_list = nomic-embed-text, mxbai-embed-large
selected_model = nomic-embed-text
[prompt_templates]
qa_selected_prompt = qa_template
code_selected_prompt = code_template
localai_selected_prompt = code_template_localai
[the_project_dirs]
vectorstore_dir = VectorStore
sessions_dir = sessions
project_dir = projects
[for_loop_dirs_depth]
max_dir_depth = 5
[chunk_setting]
chunk_size = 3000
chunk_overlap = 200
[database]
db_name = qa_pilot_chatsession_db
db_user = qa_pilot_user
db_password = qa_pilot_p
db_host = localhost
db_port = 5432
================================================
FILE: config/prompt_templates.ini
================================================
[qa_prompt_templates]
qa_template = """I want you to act as a very senior code developer who is familar with github/gitlab community. I will provide you the code project, you need to provide answers which are based on the project. {context}"""
code_template = """I want you to act as a Senior Python developer. I will provide you the code project, you provide detailed exaplanation. Human: {input}History: {history} AI:"""
code_template_localai = """I want you to act as a Senior Python developer. I will provide you the code project, you provide detailed exaplanation. Human: {input} AI:"""
================================================
FILE: llamacpp_models/model_dir
================================================
================================================
FILE: parser.go
================================================
package main
import (
"encoding/json"
"fmt"
"go/ast"
"go/parser"
"go/token"
"os"
"strings"
)
type Node struct {
Name string
Type string // "func", "method", "type", "interface", "int", or "import"
Calls []string // list of called functions/methods
Code string // source code of the node
Position string // file position of the node
}
func main() {
if len(os.Args) < 2 {
fmt.Println("Usage: ./parser ")
os.Exit(1)
}
filePath := os.Args[1]
fset := token.NewFileSet()
node, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
nodes := make(map[string]*Node)
ast.Inspect(node, func(n ast.Node) bool {
switch x := n.(type) {
case *ast.FuncDecl:
funcName := x.Name.Name
funcType := "func"
if x.Recv != nil {
recvType := fmt.Sprintf("%s", x.Recv.List[0].Type)
if len(recvType) > 1 && recvType[0] == '*' {
recvType = recvType[1:]
}
funcType = "method"
funcName = fmt.Sprintf("%s.%s", cleanType(recvType), funcName)
}
pos := fset.Position(x.Pos())
code := getNodeCode(fset, x.Pos(), x.End(), filePath)
nodes[funcName] = &Node{
Name: funcName,
Type: funcType,
Calls: []string{},
Code: code,
Position: pos.String(),
}
case *ast.GenDecl:
if x.Tok == token.TYPE {
for _, spec := range x.Specs {
typeSpec := spec.(*ast.TypeSpec)
typeName := typeSpec.Name.Name
pos := fset.Position(typeSpec.Pos())
code := getNodeCode(fset, typeSpec.Pos(), typeSpec.End(), filePath)
nodes[typeName] = &Node{
Name: typeName,
Type: "type",
Calls: []string{},
Code: code,
Position: pos.String(),
}
}
} else if x.Tok == token.IMPORT {
for _, spec := range x.Specs {
importSpec := spec.(*ast.ImportSpec)
importPath := importSpec.Path.Value
pos := fset.Position(importSpec.Pos())
code := importSpec.Path.Value
nodes[importPath] = &Node{
Name: importPath,
Type: "import",
Calls: []string{},
Code: code,
Position: pos.String(),
}
}
}
}
return true
})
// Collect function and method calls
ast.Inspect(node, func(n ast.Node) bool {
switch x := n.(type) {
case *ast.CallExpr:
caller := ""
if sel, ok := x.Fun.(*ast.SelectorExpr); ok {
if ident, ok := sel.X.(*ast.Ident); ok {
caller = fmt.Sprintf("%s.%s", ident.Name, sel.Sel.Name)
}
} else if ident, ok := x.Fun.(*ast.Ident); ok {
caller = ident.Name
}
if caller != "" {
if parentFunc := getParentFunc(node, x.Pos(), fset); parentFunc != "" {
if _, ok := nodes[parentFunc]; ok {
nodes[parentFunc].Calls = append(nodes[parentFunc].Calls, caller)
}
}
}
}
return true
})
jsonOutput, err := json.Marshal(nodes)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(string(jsonOutput))
}
func getNodeCode(fset *token.FileSet, start, end token.Pos, filePath string) string {
startOffset := fset.Position(start).Offset
endOffset := fset.Position(end).Offset
code, _ := os.ReadFile(filePath)
return string(code[startOffset:endOffset])
}
func getParentFunc(node *ast.File, pos token.Pos, fset *token.FileSet) string {
var parentFunc string
ast.Inspect(node, func(n ast.Node) bool {
if fd, ok := n.(*ast.FuncDecl); ok {
if fd.Pos() < pos && fd.End() > pos {
if fd.Recv != nil {
recvType := fmt.Sprintf("%s", fd.Recv.List[0].Type)
if len(recvType) > 1 && recvType[0] == '*' {
recvType = recvType[1:]
}
parentFunc = fmt.Sprintf("%s.%s", cleanType(recvType), fd.Name.Name)
} else {
parentFunc = fd.Name.Name
}
return false
}
}
return true
})
return parentFunc
}
func cleanType(typeName string) string {
// Remove generic type parameters if present
return strings.TrimSpace(strings.Split(typeName, "[")[0])
}
================================================
FILE: qa_model_apis.py
================================================
from langchain_community.chat_models import ChatOllama
from langchain_mistralai.chat_models import ChatMistralAI
from langchain_openai import ChatOpenAI
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from langchain_community.embeddings import OllamaEmbeddings
import os
import configparser
from dotenv import load_dotenv
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from llama_index.llms.openai_like import OpenAILike
from langchain_community.chat_models import ChatZhipuAI
from langchain_anthropic import ChatAnthropic
import multiprocessing
from langchain_community.chat_models import ChatLlamaCpp
from langchain_nvidia_ai_endpoints import ChatNVIDIA
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain_community.chat_models.moonshot import MoonshotChat
# read from the config.ini
config_path = os.path.join('config', 'config.ini')
config = configparser.ConfigParser()
config.read(config_path)
ollama_base_url = config.get('ollama_llm_models', 'base_url')
localai_base_url = config.get('localai_llm_models', 'base_url')
# get the chat model from config
def get_chat_model(provider, model_name=''):
if provider == 'ollama':
return ChatOllama(
base_url=ollama_base_url,
model=model_name,
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()]
)
elif provider == 'openai':
load_dotenv()
return ChatOpenAI(model_name=model_name)
elif provider == 'mistralai':
load_dotenv()
return ChatMistralAI(model_name=model_name)
elif provider == 'localai':
return OpenAILike(
api_base=localai_base_url,
api_key="qa_pilot",
is_chat_model=True,
context_window=32768,
model=model_name
)
elif provider == 'zhipuai':
load_dotenv()
return ChatZhipuAI(
model=model_name,
temperature=0.5,
)
elif provider == 'anthropic':
load_dotenv()
return ChatAnthropic(
model=model_name
)
elif provider == 'llamacpp':
local_model = os.path.join("llamacpp_models", model_name)
return ChatLlamaCpp(
temperature=0.5,
model_path=local_model,
n_ctx=10000,
n_gpu_layers=8,
n_batch=300, # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
max_tokens=512,
n_threads=multiprocessing.cpu_count() - 1,
repeat_penalty=1.5,
top_p=0.5,
verbose=True,
)
elif provider == 'nvidia':
load_dotenv()
return ChatNVIDIA(
model=model_name
)
elif provider == 'tongyi':
load_dotenv()
return ChatTongyi(
model=model_name
)
elif provider == 'moonshot':
load_dotenv()
return MoonshotChat(
model=model_name
)
else:
raise ValueError(f"Unsupported model provider: {provider}")
def get_embedding_model(eb_provider, model_name='', model_kwargs='', encode_kwargs=''):
if eb_provider == 'huggingface':
return HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs,
cache_folder="./cache_embeddings/"
)
elif eb_provider == 'ollama':
return OllamaEmbeddings(
model=model_name,
model_kwargs=model_kwargs
)
else:
raise ValueError(f"Unsupported embedding model provider: {eb_provider}")
================================================
FILE: qa_pilot_run.py
================================================
import uvicorn
from app import app
if __name__ == "__main__":
print("Starting server on http://0.0.0.0:5000")
uvicorn.run(app, host="0.0.0.0", port=5000)
================================================
FILE: requirements.txt
================================================
aiohttp==3.9.3
aiosignal==1.3.1
altair==5.2.0
annotated-types==0.6.0
anthropic==0.28.1
anyio==4.3.0
asgiref==3.8.1
async-timeout==4.0.3
attrs==23.2.0
backoff==2.2.1
bcrypt==4.1.2
blinker==1.7.0
boto3==1.34.71
botocore==1.34.71
build==1.1.1
cachetools==5.3.3
certifi==2024.2.2
charset-normalizer==3.3.2
chroma-hnswlib==0.7.3
chromadb==0.4.24
click==8.1.7
colorama==0.4.6
coloredlogs==15.0.1
dashscope==1.20.0
dataclasses-json==0.6.4
defusedxml==0.7.1
Deprecated==1.2.14
dill==0.3.8
dirtyjson==1.0.8
diskcache==5.6.3
distro==1.9.0
exceptiongroup==1.2.0
fastapi==0.110.0
filelock==3.13.3
FlashRank==0.2.5
Flask==3.0.3
Flask-Cors==4.0.1
flatbuffers==24.3.25
frozenlist==1.4.1
fsspec==2024.3.1
gitdb==4.0.11
GitPython==3.1.42
google-auth==2.29.0
googleapis-common-protos==1.63.0
greenlet==3.0.3
grpcio==1.62.1
h11==0.14.0
httpcore==1.0.4
httptools==0.6.1
httpx==0.27.0
httpx-sse==0.4.0
huggingface-hub==0.23.2
humanfriendly==10.0
humbug==0.3.2
idna==3.6
importlib-metadata==6.11.0
importlib_resources==6.4.0
intel-openmp==2021.4.0
itsdangerous==2.2.0
Jinja2==3.1.3
jiter==0.4.2
jmespath==1.0.1
joblib==1.3.2
jsonpatch==1.33
jsonpointer==2.4
jsonschema==4.21.1
jsonschema-specifications==2023.12.1
kubernetes==29.0.0
langchain==0.2.6
langchain-anthropic==0.1.15
langchain-chroma==0.1.1
langchain-community==0.2.6
langchain-core==0.2.11
langchain-huggingface==0.0.2
langchain-mistralai==0.1.8
langchain-nvidia-ai-endpoints==0.1.2
langchain-openai==0.1.8
langchain-text-splitters==0.2.1
langsmith==0.1.80
llama-index-core==0.10.43.post1
llama-index-llms-openai==0.1.22
llama-index-llms-openai-like==0.1.3
llama_cpp_python==0.2.67
llamaindex-py-client==0.1.19
lz4==4.3.3
markdown-it-py==3.0.0
MarkupSafe==2.1.5
marshmallow==3.21.1
mdurl==0.1.2
mkl==2021.4.0
mmh3==4.1.0
monotonic==1.6
mpmath==1.3.0
multidict==6.0.5
multiprocess==0.70.16
mypy-extensions==1.0.0
nest-asyncio==1.6.0
networkx==3.2.1
nltk==3.8.1
numpy==1.26.4
oauthlib==3.2.2
onnxruntime==1.17.1
openai==1.28.0
opentelemetry-api==1.23.0
opentelemetry-exporter-otlp-proto-common==1.23.0
opentelemetry-exporter-otlp-proto-grpc==1.23.0
opentelemetry-instrumentation==0.44b0
opentelemetry-instrumentation-asgi==0.44b0
opentelemetry-instrumentation-fastapi==0.44b0
opentelemetry-proto==1.23.0
opentelemetry-sdk==1.23.0
opentelemetry-semantic-conventions==0.44b0
opentelemetry-util-http==0.44b0
orjson==3.9.15
overrides==7.7.0
packaging==23.2
pandas==2.2.1
pathos==0.3.2
pillow==10.2.0
posthog==3.5.0
pox==0.3.4
ppft==1.7.6.8
protobuf==4.25.3
psycopg2-binary==2.9.9
pulsar-client==3.4.0
pyarrow==15.0.2
pyasn1==0.6.0
pyasn1_modules==0.4.0
pydantic==2.6.4
pydantic_core==2.16.3
pydeck==0.8.1b0
Pygments==2.17.2
PyJWT==2.8.0
PyPika==0.48.9
pyproject_hooks==1.0.0
pyreadline3==3.4.1
python-dateutil==2.9.0.post0
python-dotenv==1.0.1
python-multipart==0.0.9
pytz==2024.1
PyYAML==6.0.1
referencing==0.34.0
regex==2023.12.25
requests==2.31.0
requests-oauthlib==2.0.0
rich==13.7.1
rpds-py==0.18.0
rsa==4.9
s3transfer==0.10.1
safetensors==0.4.2
scikit-learn==1.4.1.post1
scipy==1.12.0
sentence-transformers==2.6.1
six==1.16.0
smmap==5.0.1
sniffio==1.3.1
SQLAlchemy==2.0.29
starlette==0.36.3
streamlit==1.32.2
sympy==1.12
tbb==2021.12.0
tenacity==8.2.3
text-generation==0.7.0
threadpoolctl==3.4.0
tiktoken==0.7.0
tokenizers==0.19.1
toml==0.10.2
tomli==2.0.1
toolz==0.12.1
tornado==6.4
tqdm==4.66.2
transformers==4.41.2
typer==0.11.0
typing-inspect==0.9.0
typing_extensions==4.10.0
tzdata==2024.1
urllib3==2.2.1
uvicorn==0.29.0
waitress==3.0.0
watchdog==4.0.0
watchfiles==0.21.0
websocket-client==1.7.0
websockets==12.0
Werkzeug==3.0.3
wrapt==1.16.0
yarl==1.9.4
zipp==3.18.1
================================================
FILE: svelte-app/.gitignore
================================================
/node_modules/
/public/build/
.DS_Store
================================================
FILE: svelte-app/README.md
================================================
This repo is no longer maintained. Consider using `npm init vite` and selecting the `svelte` option or — if you want a full-fledged app framework — use [SvelteKit](https://kit.svelte.dev), the official application framework for Svelte.
---
# svelte app
This is a project template for [Svelte](https://svelte.dev) apps. It lives at https://github.com/sveltejs/template.
To create a new project based on this template using [degit](https://github.com/Rich-Harris/degit):
```bash
npx degit sveltejs/template svelte-app
cd svelte-app
```
*Note that you will need to have [Node.js](https://nodejs.org) installed.*
## Get started
Install the dependencies...
```bash
cd svelte-app
npm install
```
...then start [Rollup](https://rollupjs.org):
```bash
npm run dev
```
Navigate to [localhost:8080](http://localhost:8080). You should see your app running. Edit a component file in `src`, save it, and reload the page to see your changes.
By default, the server will only respond to requests from localhost. To allow connections from other computers, edit the `sirv` commands in package.json to include the option `--host 0.0.0.0`.
If you're using [Visual Studio Code](https://code.visualstudio.com/) we recommend installing the official extension [Svelte for VS Code](https://marketplace.visualstudio.com/items?itemName=svelte.svelte-vscode). If you are using other editors you may need to install a plugin in order to get syntax highlighting and intellisense.
## Building and running in production mode
To create an optimised version of the app:
```bash
npm run build
```
You can run the newly built app with `npm run start`. This uses [sirv](https://github.com/lukeed/sirv), which is included in your package.json's `dependencies` so that the app will work when you deploy to platforms like [Heroku](https://heroku.com).
## Single-page app mode
By default, sirv will only respond to requests that match files in `public`. This is to maximise compatibility with static fileservers, allowing you to deploy your app anywhere.
If you're building a single-page app (SPA) with multiple routes, sirv needs to be able to respond to requests for *any* path. You can make it so by editing the `"start"` command in package.json:
```js
"start": "sirv public --single"
```
## Using TypeScript
This template comes with a script to set up a TypeScript development environment, you can run it immediately after cloning the template with:
```bash
node scripts/setupTypeScript.js
```
Or remove the script via:
```bash
rm scripts/setupTypeScript.js
```
If you want to use `baseUrl` or `path` aliases within your `tsconfig`, you need to set up `@rollup/plugin-alias` to tell Rollup to resolve the aliases. For more info, see [this StackOverflow question](https://stackoverflow.com/questions/63427935/setup-tsconfig-path-in-svelte).
## Deploying to the web
### With [Vercel](https://vercel.com)
Install `vercel` if you haven't already:
```bash
npm install -g vercel
```
Then, from within your project folder:
```bash
cd public
vercel deploy --name my-project
```
### With [surge](https://surge.sh/)
Install `surge` if you haven't already:
```bash
npm install -g surge
```
Then, from within your project folder:
```bash
npm run build
surge public my-project.surge.sh
```
================================================
FILE: svelte-app/package.json
================================================
{
"name": "svelte-app",
"version": "1.0.0",
"private": true,
"type": "module",
"scripts": {
"build": "rollup -c",
"dev": "rollup -c -w",
"start": "sirv public --no-clear --port 5001 --host 0.0.0.0"
},
"devDependencies": {
"@rollup/plugin-commonjs": "^24.0.0",
"@rollup/plugin-node-resolve": "^15.0.0",
"@rollup/plugin-terser": "^0.4.4",
"rollup": "^3.15.0",
"rollup-plugin-css-only": "^4.3.0",
"rollup-plugin-livereload": "^2.0.0",
"rollup-plugin-svelte": "^7.1.2",
"svelte": "^3.55.0"
},
"dependencies": {
"cytoscape": "^3.29.2",
"cytoscape-dagre": "^2.5.0",
"highlight.js": "^11.9.0",
"marked": "^12.0.2",
"sirv-cli": "^2.0.0",
"svelte-spa-router": "^4.0.1"
}
}
================================================
FILE: svelte-app/public/global.css
================================================
html, body {
position: relative;
width: 100%;
height: 100%;
}
body {
color: #333;
margin: 0;
padding: 8px;
box-sizing: border-box;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen-Sans, Ubuntu, Cantarell, "Helvetica Neue", sans-serif;
}
a {
color: rgb(0,100,200);
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
a:visited {
color: rgb(0,80,160);
}
label {
display: block;
}
input, button, select, textarea {
font-family: inherit;
font-size: inherit;
-webkit-padding: 0.4em 0;
padding: 0.4em;
margin: 0 0 0.5em 0;
box-sizing: border-box;
border: 1px solid #ccc;
border-radius: 2px;
}
input:disabled {
color: #ccc;
}
button {
color: #333;
background-color: #f4f4f4;
outline: none;
}
button:disabled {
color: #999;
}
button:not(:disabled):active {
background-color: #ddd;
}
button:focus {
border-color: #666;
}
================================================
FILE: svelte-app/public/index.html
================================================
Svelte app
================================================
FILE: svelte-app/rollup.config.js
================================================
import { spawn } from 'child_process';
import svelte from 'rollup-plugin-svelte';
import commonjs from '@rollup/plugin-commonjs';
import terser from '@rollup/plugin-terser';
import resolve from '@rollup/plugin-node-resolve';
import livereload from 'rollup-plugin-livereload';
import css from 'rollup-plugin-css-only';
const production = !process.env.ROLLUP_WATCH;
function serve() {
let server;
function toExit() {
if (server) server.kill(0);
}
return {
writeBundle() {
if (server) return;
server = spawn('npm', ['run', 'start', '--', '--dev'], {
stdio: ['ignore', 'inherit', 'inherit'],
shell: true
});
process.on('SIGTERM', toExit);
process.on('exit', toExit);
}
};
}
export default {
input: 'src/main.js',
output: {
sourcemap: true,
format: 'iife',
name: 'app',
file: 'public/build/bundle.js'
},
plugins: [
svelte({
compilerOptions: {
// enable run-time checks when not in production
dev: !production
}
}),
// we'll extract any component CSS out into
// a separate file - better for performance
css({ output: 'bundle.css' }),
// If you have external dependencies installed from
// npm, you'll most likely need these plugins. In
// some cases you'll need additional configuration -
// consult the documentation for details:
// https://github.com/rollup/plugins/tree/master/packages/commonjs
resolve({
browser: true,
dedupe: ['svelte'],
exportConditions: ['svelte']
}),
commonjs(),
// In dev mode, call `npm run start` once
// the bundle has been generated
!production && serve(),
// Watch the `public` directory and refresh the
// browser on changes when not in production
!production && livereload('public'),
// If we're building for production (npm run build
// instead of npm run dev), minify
production && terser()
],
watch: {
clearScreen: false
}
};
================================================
FILE: svelte-app/scripts/setupTypeScript.js
================================================
// @ts-check
/** This script modifies the project to support TS code in .svelte files like:
As well as validating the code for CI.
*/
/** To work on this script:
rm -rf test-template template && git clone sveltejs/template test-template && node scripts/setupTypeScript.js test-template
*/
import fs from "fs"
import path from "path"
import { argv } from "process"
import url from 'url';
const __filename = url.fileURLToPath(import.meta.url);
const __dirname = url.fileURLToPath(new URL('.', import.meta.url));
const projectRoot = argv[2] || path.join(__dirname, "..")
// Add deps to pkg.json
const packageJSON = JSON.parse(fs.readFileSync(path.join(projectRoot, "package.json"), "utf8"))
packageJSON.devDependencies = Object.assign(packageJSON.devDependencies, {
"svelte-check": "^3.0.0",
"svelte-preprocess": "^5.0.0",
"@rollup/plugin-typescript": "^11.0.0",
"typescript": "^4.9.0",
"tslib": "^2.5.0",
"@tsconfig/svelte": "^3.0.0"
})
// Add script for checking
packageJSON.scripts = Object.assign(packageJSON.scripts, {
"check": "svelte-check"
})
// Write the package JSON
fs.writeFileSync(path.join(projectRoot, "package.json"), JSON.stringify(packageJSON, null, " "))
// mv src/main.js to main.ts - note, we need to edit rollup.config.js for this too
const beforeMainJSPath = path.join(projectRoot, "src", "main.js")
const afterMainTSPath = path.join(projectRoot, "src", "main.ts")
fs.renameSync(beforeMainJSPath, afterMainTSPath)
// Switch the app.svelte file to use TS
const appSveltePath = path.join(projectRoot, "src", "App.svelte")
let appFile = fs.readFileSync(appSveltePath, "utf8")
appFile = appFile.replace("
{#if isOpen}