Repository: krohling/bondai
Branch: main
Commit: b16cc11c06b2
Files: 186
Total size: 410.8 KB
Directory structure:
gitextract_tr4w9k_j/
├── .github/
│ └── workflows/
│ ├── deploy-website.yaml
│ └── deploy.yaml
├── .gitignore
├── .pre-commit-config.yaml
├── CONTRIBUTING.md
├── LICENSE
├── MANIFEST.in
├── README.md
├── bondai/
│ ├── __init__.py
│ ├── agents/
│ │ ├── __init__.py
│ │ ├── agent.py
│ │ ├── compression/
│ │ │ ├── __init__.py
│ │ │ ├── conversation_summarizer.py
│ │ │ ├── message_summarizer.py
│ │ │ └── prompts/
│ │ │ ├── conversation_summarizer_prompt_template.md
│ │ │ └── message_summarizer_prompt_template.md
│ │ ├── conversation_member.py
│ │ ├── conversational_agent.py
│ │ ├── group_chat/
│ │ │ ├── __init__.py
│ │ │ ├── group_conversation.py
│ │ │ ├── group_conversation_config.py
│ │ │ └── user_proxy.py
│ │ ├── messages.py
│ │ ├── prompts/
│ │ │ ├── __init__.py
│ │ │ ├── agent_message_prompt_template.md
│ │ │ ├── conversational_agent_system_prompt_template.md
│ │ │ ├── default_persona.py
│ │ │ └── react_agent_system_prompt_template.md
│ │ └── util.py
│ ├── api/
│ │ ├── __init__.py
│ │ ├── agent_wrapper.py
│ │ ├── api_error.py
│ │ ├── api_user_proxy.py
│ │ ├── client.py
│ │ ├── routes.py
│ │ ├── server.py
│ │ └── settings.py
│ ├── cli/
│ │ ├── __init__.py
│ │ ├── cli.py
│ │ ├── default_tools.py
│ │ └── personas/
│ │ ├── __init__.py
│ │ └── user_liaison_agent.py
│ ├── main.py
│ ├── memory/
│ │ ├── __init__.py
│ │ ├── archival/
│ │ │ ├── __init__.py
│ │ │ ├── datasources.py
│ │ │ └── tools.py
│ │ ├── conversation/
│ │ │ ├── __init__.py
│ │ │ ├── datasources.py
│ │ │ └── tools.py
│ │ ├── core/
│ │ │ ├── __init__.py
│ │ │ ├── datasources.py
│ │ │ └── tools.py
│ │ ├── memory_manager.py
│ │ └── prompts/
│ │ └── default_prompt_template.md
│ ├── models/
│ │ ├── __init__.py
│ │ ├── embedding_model.py
│ │ ├── llm.py
│ │ └── openai/
│ │ ├── __init__.py
│ │ ├── default_openai_connection_params.py
│ │ ├── env_vars.py
│ │ ├── openai_connection_params.py
│ │ ├── openai_embedding_model.py
│ │ ├── openai_llm.py
│ │ ├── openai_models.py
│ │ └── openai_wrapper.py
│ ├── prompt/
│ │ ├── __init__.py
│ │ ├── default_prompt_builder.py
│ │ ├── default_prompt_template.md
│ │ ├── jinja_prompt_builder.py
│ │ └── prompt_builder.py
│ ├── tools/
│ │ ├── __init__.py
│ │ ├── agent_tool.py
│ │ ├── alpaca_markets/
│ │ │ ├── __init__.py
│ │ │ ├── create_order.py
│ │ │ ├── env_vars.py
│ │ │ ├── get_account.py
│ │ │ ├── list_positions.py
│ │ │ └── response_formatter.py
│ │ ├── bland_ai/
│ │ │ ├── __init__.py
│ │ │ └── bland_ai_tools.py
│ │ ├── conversational/
│ │ │ ├── __init__.py
│ │ │ └── conversational_tools.py
│ │ ├── dalle_tool.py
│ │ ├── database/
│ │ │ ├── __init__.py
│ │ │ └── db_query.py
│ │ ├── file/
│ │ │ ├── __init__.py
│ │ │ ├── file_query.py
│ │ │ ├── file_read.py
│ │ │ └── file_write.py
│ │ ├── gmail/
│ │ │ ├── __init__.py
│ │ │ ├── list_emails.py
│ │ │ └── query_emails.py
│ │ ├── langchain_tool.py
│ │ ├── python_repl_tool.py
│ │ ├── response_query.py
│ │ ├── search/
│ │ │ ├── __init__.py
│ │ │ ├── duck_duck_go_search.py
│ │ │ └── google_search.py
│ │ ├── shell_tool.py
│ │ ├── task_completed_tool.py
│ │ ├── tool.py
│ │ ├── vision/
│ │ │ ├── __init__.py
│ │ │ └── image_analysis_tool.py
│ │ └── website/
│ │ ├── __init__.py
│ │ ├── download_file.py
│ │ ├── extract_hyperlinks.py
│ │ ├── html_query.py
│ │ └── query.py
│ └── util/
│ ├── __init__.py
│ ├── caching/
│ │ ├── __init__.py
│ │ └── llm_cache.py
│ ├── document_parser.py
│ ├── event_mixin.py
│ ├── misc.py
│ ├── model_logger.py
│ ├── runnable.py
│ ├── semantic_search.py
│ └── web.py
├── docker/
│ ├── Dockerfile
│ └── docker-compose.yml
├── requirements.txt
├── sample.env
├── scripts/
│ └── bondai
├── setup.py
├── tests/
│ ├── api-client/
│ │ └── test_api_client.py
│ ├── conversational/
│ │ ├── hierarchical_conversation.py
│ │ └── single_agent.py
│ ├── debug/
│ │ └── test_error.py
│ ├── getting-started/
│ │ └── example-1.py
│ ├── memory/
│ │ ├── __init__.py
│ │ ├── single_agent_with_memory.py
│ │ └── util.py
│ └── vision/
│ └── single_agent_with_vision.py
└── website/
├── .gitignore
├── README.md
├── babel.config.js
├── docs/
│ ├── agent-memory/
│ │ ├── agent-memory.md
│ │ ├── archival-memory.md
│ │ ├── conversation-memory.md
│ │ ├── core-memory.md
│ │ └── memory-manager.md
│ ├── agents/
│ │ ├── agents.md
│ │ ├── conversational-agent.md
│ │ └── react-agent.md
│ ├── api-spec/
│ │ ├── _category_.json
│ │ ├── add-agent-tool.md
│ │ ├── api-client.md
│ │ ├── create-agent.md
│ │ ├── get-agent.md
│ │ ├── get-tools.md
│ │ ├── getting-started.md
│ │ ├── list-agents.md
│ │ ├── remove-agent-tool.md
│ │ ├── send-message.md
│ │ ├── stop-agent.md
│ │ └── ws-events.md
│ ├── azure.md
│ ├── cli.md
│ ├── docker.md
│ ├── examples/
│ │ ├── _category_.json
│ │ ├── api-client.md
│ │ ├── code-interpreter.md
│ │ ├── home-automation.md
│ │ ├── investor-agent.md
│ │ └── online-research/
│ │ ├── metformin-research.md
│ │ └── online-research.md
│ ├── getting-started.md
│ ├── intro.md
│ ├── multi-agent-systems/
│ │ ├── examples.md
│ │ ├── group-conversation.md
│ │ ├── multi-agent-systems.md
│ │ └── team-conversation-config.md
│ └── tools/
│ ├── _category_.json
│ ├── custom-tool.md
│ └── getting-started.md
├── docusaurus.config.js
├── package.json
├── sidebars.js
├── src/
│ ├── components/
│ │ └── HomepageFeatures/
│ │ ├── index.js
│ │ └── styles.module.css
│ ├── css/
│ │ └── custom.css
│ └── pages/
│ ├── index.js
│ ├── index.module.css
│ └── markdown-page.md
└── static/
└── .nojekyll
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/deploy-website.yaml
================================================
name: Deploy BondAI Website
on:
push:
branches:
- main
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Node.js
uses: actions/setup-node@v2
with:
node-version: '19.7'
- name: Install Docusaurus dependencies
run: |
cd website
npm install
npm run build
- name: Deploy to S3
uses: jakejarvis/s3-sync-action@master
with:
args: --acl public-read --follow-symlinks --delete
env:
AWS_S3_BUCKET: bondai-docs
AWS_ACCESS_KEY_ID: ${{ secrets.DOCS_DEPLOY_AWS_ACCESS_KEY }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DOCS_DEPLOY_AWS_SECRET_ACCESS_KEY }}
AWS_REGION: 'us-west-2'
SOURCE_DIR: 'website/build'
- name: Invalidate CloudFront distribution
uses: chetan/invalidate-cloudfront-action@v1.3
env:
DISTRIBUTION: E1JJN112WBIR8P
PATHS: '/*'
AWS_REGION: 'us-west-2'
AWS_ACCESS_KEY_ID: ${{ secrets.DOCS_DEPLOY_AWS_ACCESS_KEY }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DOCS_DEPLOY_AWS_SECRET_ACCESS_KEY }}
================================================
FILE: .github/workflows/deploy.yaml
================================================
name: Deploy to PyPI and DockerHub
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+[a-zA-Z0-9]*'
workflow_dispatch:
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install setuptools wheel twine setuptools_scm
- name: Build and deploy to PyPI
run: |
python setup.py sdist bdist_wheel
twine upload dist/* -u __token__ -p ${{ secrets.PYPI_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Determine if pre-release
id: prerelease_check
run: |
if [[ ${{ github.ref_name }} =~ [a-zA-Z] ]]; then
echo "This is a pre-release version."
echo "::set-output name=tag_list::krohling/bondai:${{ github.ref_name }}"
else
echo "This is a stable release version."
echo "::set-output name=tag_list::krohling/bondai:${{ github.ref_name }},krohling/bondai:latest"
fi
- name: Build and push Docker image
uses: docker/build-push-action@v2
with:
context: ./docker
push: true
tags: ${{ steps.prerelease_check.outputs.tag_list }}
platforms: linux/amd64,linux/arm64
================================================
FILE: .gitignore
================================================
*.DS_Store
response_query_storage
gmail-token.pickle
.debug
.memory
.cache
misc
ui/.next
ui/node_modules
ui/agent-volume
ui/data-volume
ui/app/backups
ui/tools
ui/app/_backup
ui/app/test/
docker/agent-volume/
.vscode
website/build
node_modules/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
!/ui/lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
notes.txt
================================================
FILE: .pre-commit-config.yaml
================================================
repos:
- repo: https://github.com/psf/black
rev: stable
hooks:
- id: black
================================================
FILE: CONTRIBUTING.md
================================================
# Contribution Instruction & Guidelines
Hello there! Any kind of contribution to **BondAI** is most welcome!
- If you have a question, please use GitHub
[discussions](https://github.com/krohling/bondai/discussions).
- If you found a bug or have a feature request, please use GitHub
[issues](https://github.com/krohling/bondai/issues).
- If you fixed a bug or implemented a new feature, please do a pull request. If it
is a larger change or addition, it would be great to first discuss it through an
[issue](https://github.com/krohling/bondai/issues).
## Development Setup
Warning: If you run **BondAI** on your own system, tools that interact with the file system will have full access to your local disk! I highly recommend running and testing inside of a Docker container.
Always be careful when approving any code!
## Tools
When you contribute code, please use **black** for code formatting.
## Branching & Release Strategy
The default branch is called master.
It contains the latest features, which would be ready for deployment.
It is not possible to push to it directly.
Instead, for every feature, a branch should be created, which will then be merged back into main with a pull request.
================================================
FILE: LICENSE
================================================
Copyright 2023 Kevin Rohling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
================================================
FILE: MANIFEST.in
================================================
include bondai/prompt/default_prompt_template.md
include bondai/cli/onboarding_prompt_template.md
================================================
FILE: README.md
================================================
Build highly capable Single and Multi-Agent Systems.
# BondAI Homepage
Checkout the BondAI Homepage ([https://bondai.dev](https://bondai.dev)) for in depth documentation, examples and API specification.
# Getting Started
There are 3 ways to use BondAI:
1) 🛠️ **Command Line Interface (CLI)** - This is the easiest way to get up and running fast. Run BondAI on your command line with a pre-configured set of tools.
2) 🐋 **Docker** - Running BondAI in a Docker container is recommended if you plan on using tools that run code or directly access your shell.
3) 🏗️ **Start Coding with BondAI** - Integrate BondAI into your own codebase and start building your own agents.
## 🚀 Installation
Installing BondAI is easy:
```bash
pip install bondai
```
## 🛠️ Command Line Interface (CLI)
Once you've installed BondAI using `pip install bondai` the CLI will be available as an executable accessible simply by running `bondai` in your shell. [Learn more about all of the tools and options available through the CLI](https://bondai.dev/docs/cli).
Before running `bondai` you will need to set the OPENAI_API_KEY environment variable.
```bash
export OPENAI_API_KEY=sk-XXXXXXXXXX
```
Once the environment variable has been set you can run `bondai` to start the CLI.
```bash
Loading BondAI...
******************ENTERING CHAT******************
You are entering a chat with BondAI...
You can exit any time by typing 'exit'.
Hello! I'm BondAI, your friendly and helpful assistant. I'm here to assist you with any tasks or questions you might have. How can I assist you today?
I want you to write a story about unicorns and save it to a file named unicorns.md.
Using tool file_write: Writing a story about unicorns and saving it to a file named unicorns.md
Using tool final_answer...
A story about unicorns has been successfully written and saved to a file named unicorns.md. The story is set in an enchanted forest and describes the magical and majestic nature of unicorns, their daily routines, and their harmonious relationship with other creatures in the forest.
```
## 🐋 Docker
BondAI Docker images are available on [DockerHub here](https://hub.docker.com/r/krohling/bondai). If you intend to use tools that run arbitrary code (*PythonREPLTool*) or access your shell (*ShellTool*) it is highly recommended that you run BondAI in a Docker container as these tools can damage your machine.
Before running the BondAI Docker container it is recommended that you create a directory named 'agent-volume' and mount it as a volume on the container. This will be used as the Agent's working directory and allows you to easily share files with the Agent.
```bash
mkdir agent-volume
docker pull krohling/bondai:latest
docker run -it --rm \
-v ./agent-volume:/agent-volume \
-w /agent-volume \
OPENAI_API_KEY=sk-XXXXXXXXXX \
bondai:latest bondai
```
## 🔥 Start Coding with BondAI
BondAI has a straightforward API for creating powerful AI Agents. Check out our [examples](https://bondai.dev/docs/category/examples) for ideas on how to get started. Remember to set your *OPENAI_API_KEY* environment variable before running your BondAI Agent.
```python
from bondai.agents import Agent
from bondai.tools.search import DuckDuckGoSearchTool
from bondai.tools.website import WebsiteQueryTool
from bondai.tools.file import FileWriteTool
task = """I want you to research the usage of Metformin as a drug to treat aging and aging related illness.
You should only use reputable information sources, ideally peer reviewed scientific studies.
I want you to summarize your findings in a document named metformin.md and includes links to reference and resources you used to find the information.
Additionally, the last section of your document you should provide a recommendation for a 43 year old male, in good health and who regularly exercises as to whether he would benefit from taking Metformin.
You should explain your recommendation and justify it with sources.
Finally, you should highlight potential risks and tradeoffs from taking the medication."""
Agent(tools=[
DuckDuckGoSearchTool(),
WebsiteQueryTool(),
FileWriteTool()
]).run(task)
```
## BondAI Integrations
BondAI comes out of the box with a powerful set of integrations.
| | | |
| -------- | ------- |------- |
|
| **OpenAI** | BondAI supports any combination of OpenAI models and services including GPT-4, GPT-3.5, Dalle-E 3, and Embeddings. |
|
| **Microsoft Azure** | BondAI fully supports connectivity to GPT-N, Dalle-E and Embedding APIs through [Microsoft's Azure OpenAI services](https://azure.microsoft.com/en-us/products/ai-services/openai-service). |
|
| **Google Search** | Allows BondAI to search the internet. [Requires a Google Search API Key and CSE ID](https://developers.google.com/custom-search/v1/introduction) |
|
| **DuckDuckGo** | Allows BondAI to search the internet. No API keys required. |
|
| **Alpaca Markets** | Allows BondAI to buy and sell stocks and crypto. [Requires an Alpaca Markets account.](https://alpaca.markets/) |
|
| **PostgreSQL** | BondAI can automatically extract the schema from a Postgres DB and process natural language queries. |
|
| **Bland AI** | Allows BondAI to make phone calls and process/retrieve call transcripts. [Requires a Bland.ai account.](https://www.bland.ai/) |
|
| **Gmail** | Allows BondAI to search and read emails. |
================================================
FILE: bondai/__init__.py
================================================
================================================
FILE: bondai/agents/__init__.py
================================================
from .conversational_agent import ConversationalAgent
from .agent import Agent, DEFAULT_MESSAGE_PROMPT_TEMPLATE
from .conversation_member import ConversationMember, ConversationMemberEventNames
from .messages import (
AgentMessage,
SystemMessage,
ConversationMessage,
ToolUsageMessage,
AgentMessageList,
message_to_dict,
USER_MEMBER_NAME,
)
from .util import (
AgentStatus,
AgentEventNames,
AgentException,
BudgetExceededException,
MaxStepsExceededException,
parse_response_content_message,
)
__all__ = [
"ConversationalAgent",
"Agent",
"parse_response_content_message",
"DEFAULT_MESSAGE_PROMPT_TEMPLATE",
"AgentStatus",
"AgentEventNames",
"AgentException",
"BudgetExceededException",
"MaxStepsExceededException",
"ConversationMember",
"ConversationMemberEventNames",
"AgentMessage",
"SystemMessage",
"ConversationMessage",
"ToolUsageMessage",
"AgentMessageList",
"message_to_dict",
"USER_MEMBER_NAME",
]
================================================
FILE: bondai/agents/agent.py
================================================
import os
import uuid
import traceback
from pydantic import BaseModel
from datetime import datetime
from typing import Dict, List, Tuple, Callable
from bondai.util import EventMixin, Runnable, load_local_resource
from bondai.tools import Tool, ResponseQueryTool
from bondai.models import LLM, EmbeddingModel
from bondai.memory import MemoryManager
from bondai.prompt import JinjaPromptBuilder
from bondai.models.openai import (
OpenAILLM,
OpenAIEmbeddingModel,
OpenAIModelNames,
get_total_cost,
)
from .conversation_member import ConversationMember
from .messages import AgentMessage, AgentMessageList, SystemMessage, ToolUsageMessage
from .compression import summarize_conversation, summarize_messages
from .util import (
AgentStatus,
AgentEventNames,
AgentException,
BudgetExceededException,
MaxStepsExceededException,
ContextLengthExceededException,
count_request_tokens,
format_llm_messages,
execute_tool,
)
DEFAULT_MAX_TOOL_RETRIES = 5
DEFAULT_MAX_TOOL_RESPONSE_TOKENS = 2000
DEFAULT_SYSTEM_PROMPT_TEMPLATE = load_local_resource(
__file__, os.path.join("prompts", "react_agent_system_prompt_template.md")
)
DEFAULT_MESSAGE_PROMPT_TEMPLATE = load_local_resource(
__file__, os.path.join("prompts", "agent_message_prompt_template.md")
)
class FinalAnswerParameters(BaseModel):
results: str
class FinalAnswerTool(Tool):
def __init__(self):
super().__init__(
"final_answer",
"Use the final_answer tool once you have completed your TASK. Provide a highly detailed description of the results of your task in the 'results' parameter.",
FinalAnswerParameters,
)
def run(self, results: str) -> Tuple[str, bool]:
return results, True
class Agent(EventMixin, Runnable):
def __init__(
self,
llm: LLM | None = None,
embedding_model: EmbeddingModel | None = None,
tools: List[Tool] | None = None,
quiet: bool = True,
allowed_events: List[str] | None = None,
messages: List[AgentMessage] | None = None,
system_prompt_sections: List[Callable[[], str]] | None = None,
system_prompt_builder: Callable[..., str] = None,
message_prompt_builder: Callable[..., str] = None,
memory_manager: MemoryManager | None = None,
max_context_length: int = None,
max_context_pressure_ratio: float = 0.8,
max_tool_retries: int = DEFAULT_MAX_TOOL_RETRIES,
max_tool_response_tokens=DEFAULT_MAX_TOOL_RESPONSE_TOKENS,
enable_context_compression: bool = False,
enable_final_answer_tool: bool = True,
):
Runnable.__init__(self)
if allowed_events is None:
allowed_events = [
AgentEventNames.TOOL_SELECTED,
AgentEventNames.TOOL_COMPLETED,
AgentEventNames.TOOL_ERROR,
AgentEventNames.STREAMING_CONTENT_UPDATED,
AgentEventNames.STREAMING_FUNCTION_UPDATED,
AgentEventNames.CONTEXT_COMPRESSION_REQUESTED,
]
EventMixin.__init__(self, allowed_events=allowed_events)
if llm is None:
llm = OpenAILLM(OpenAIModelNames.GPT4_0613)
if embedding_model is None:
embedding_model = OpenAIEmbeddingModel(
OpenAIModelNames.TEXT_EMBEDDING_ADA_002
)
if tools is None:
tools = []
if system_prompt_sections is None:
system_prompt_sections = []
if messages is None:
messages = []
self._id: str = str(uuid.uuid4())
self._status: AgentStatus = AgentStatus.IDLE
self._messages = AgentMessageList(messages=messages)
self._llm: LLM = llm
self._embedding_model: EmbeddingModel = embedding_model
self._tools: List[Tool] = tools
self._quiet: bool = quiet
self._system_prompt_sections: List[Callable[[], str]] = system_prompt_sections
self._system_prompt_builder: Callable[..., str] = system_prompt_builder
self._message_prompt_builder: Callable[..., str] = message_prompt_builder
self._memory_manager = memory_manager
self._max_context_length = (
max_context_length if max_context_length else (self._llm.max_tokens * 0.95)
)
self._max_context_pressure_ratio = max_context_pressure_ratio
self._max_tool_retries = max_tool_retries
self._max_tool_response_tokens = max_tool_response_tokens
self._enable_context_compression = enable_context_compression
if self._memory_manager:
self._tools.extend(self._memory_manager.tools)
self._system_prompt_sections.append(self._memory_manager)
if self._system_prompt_builder is None:
self._system_prompt_builder = JinjaPromptBuilder(
DEFAULT_SYSTEM_PROMPT_TEMPLATE
)
if self._message_prompt_builder is None:
self._message_prompt_builder = JinjaPromptBuilder(
DEFAULT_MESSAGE_PROMPT_TEMPLATE
)
if enable_final_answer_tool:
self._tools.append(FinalAnswerTool())
@property
def id(self) -> str:
return self._id
@property
def status(self) -> AgentStatus:
return self._status
@property
def tools(self) -> List[Tool]:
return self._tools
def clear_messages(self):
if self._status == AgentStatus.RUNNING:
raise AgentException(
"Cannot reset memory while agent is in a running state."
)
self._messages.clear()
def add_tool(self, tool: Tool):
if not any([t.name == tool.name for t in self._tools]):
self.tools.append(tool)
def remove_tool(self, tool_name: str):
self._tools = [t for t in self._tools if t.name != tool_name]
def to_dict(self) -> Dict:
return {"id": self.id, "tools": [t.name for t in self._tools]}
def save_state(self) -> Dict:
if self._status == AgentStatus.RUNNING:
raise AgentException("Cannot save agent state while it is running.")
state = {"tools": {}}
for tool in self._tools:
state["tools"][tool.name] = tool.save_state()
return state
def load_state(self, state: Dict):
if self._status == AgentStatus.RUNNING:
raise AgentException("Cannot load agent state while it is running.")
for tool in self._tools:
if tool.name in state["tools"]:
tool.load_state(state["tools"][tool.name])
def _is_context_pressure_too_high(
self,
llm_messages: List[Dict[str, str]],
tools: List[Tool] | None = None,
) -> float:
if tools is None:
tools = []
context_pressure_ratio = float(
count_request_tokens(self._llm, llm_messages, tools)
) / float(self._max_context_length)
return context_pressure_ratio > self._max_context_pressure_ratio
def _get_llm_response(
self,
messages: List[Dict] | None = None,
tools: List[Tool] | None = None,
content_stream_callback: Callable[[str], None] | None = None,
function_stream_callback: Callable[[str], None] | None = None,
) -> (str | None, Dict | None):
if messages is None:
messages = []
if tools is None:
tools = []
request_tokens = count_request_tokens(
llm=self._llm, messages=messages, tools=tools
)
if request_tokens > self._llm.max_tokens:
raise ContextLengthExceededException(
f"Context length ({request_tokens}) exceeds maximum tokens allowed by LLM: {self._llm.max_tokens}"
)
llm_functions = list(map(lambda t: t.get_tool_function(), tools))
if (
self._llm.supports_streaming
): # and (any([t.supports_streaming for t in tools]) or content_stream_callback):
def _function_stream_callback(function_name, arguments_buffer):
streaming_tools: [Tool] = [
t for t in tools if t.name == function_name and t.supports_streaming
]
if len(streaming_tools) > 0:
tool: Tool = streaming_tools[0]
tool.handle_stream_update(arguments_buffer)
if function_stream_callback:
function_stream_callback(function_name, arguments_buffer)
self._trigger_event(
AgentEventNames.STREAMING_FUNCTION_UPDATED,
self,
function_name,
arguments_buffer,
)
def _content_stream_callback(content_buffer):
if content_stream_callback:
content_stream_callback(content_buffer)
self._trigger_event(
AgentEventNames.STREAMING_CONTENT_UPDATED, self, content_buffer
)
llm_response, llm_response_function = self._llm.get_streaming_completion(
messages=messages,
functions=llm_functions,
function_stream_callback=_function_stream_callback,
content_stream_callback=_content_stream_callback,
)
else:
llm_response, llm_response_function = self._llm.get_completion(
messages=messages,
functions=llm_functions,
# function_stream_callback=function_stream_callback,
# content_stream_callback=content_stream_callback
)
return llm_response, llm_response_function
def run(
self,
task: str,
max_steps: int = None,
max_budget: float = None,
) -> ToolUsageMessage | str:
if self._status == AgentStatus.RUNNING:
raise AgentException("Cannot start agent while it is in a running state.")
self._status = AgentStatus.RUNNING
try:
return self._run_tool_loop(
tools=self._tools,
task=task,
starting_cost=get_total_cost(),
max_budget=max_budget,
max_steps=max_steps,
)
finally:
self._status = AgentStatus.IDLE
def run_async(
self,
task: str,
max_steps: int = None,
max_budget: float = None,
):
"""Runs the agent's task in a separate thread."""
if self._status == AgentStatus.RUNNING:
raise AgentException("Cannot start agent while it is in a running state.")
args = (task, max_steps, max_budget)
self._start_execution_thread(target=self.run, args=args)
def stop(self, timeout=10):
"""Gracefully stops the thread, with a timeout."""
self._force_stop = True
for tool in self._tools:
tool.stop()
super().stop(timeout=timeout)
def _run_tool_loop(
self,
tools: List[Tool],
starting_cost: float,
max_budget: float = None,
max_steps: int = None,
max_tool_retries: int = None,
task: str | None = None,
prompt_vars: Dict | None = None,
return_conversational_responses: bool = False,
retain_tool_messages_in_context: bool = True,
addition_context_messages: List[AgentMessage] | None = None,
conversation_members: List[ConversationMember] | None = None,
content_stream_callback: Callable[[str], None] | None = None,
function_stream_callback: Callable[[str], None] | None = None,
) -> ToolUsageMessage | str:
if addition_context_messages is None:
addition_context_messages = []
if conversation_members is None:
conversation_members = []
if max_tool_retries is None:
max_tool_retries = self._max_tool_retries
error_count = 0
step_count = 0
last_error_message = None
local_messages = []
self._force_stop = False
response_query_tool = ResponseQueryTool(
llm=self._llm, embedding_model=self._embedding_model
)
def append_message(message):
if isinstance(message, SystemMessage):
system_messages = [
m for m in local_messages if not isinstance(m, SystemMessage)
]
for m in system_messages:
local_messages.remove(m)
local_messages.append(message)
if retain_tool_messages_in_context:
self._messages.add(message)
if self._memory_manager and self._memory_manager.conversation_memory:
self._memory_manager.conversation_memory.add(message)
while not self._force_stop:
step_count += 1
if max_budget and get_total_cost() - starting_cost > max_budget:
raise BudgetExceededException()
if max_steps and step_count > max_steps:
raise MaxStepsExceededException()
if (
len(response_query_tool.responses) > 0
and response_query_tool not in tools
):
tools.append(response_query_tool)
if self._enable_context_compression:
self._compress_llm_context(
tools=tools,
last_error_message=last_error_message,
conversation_members=conversation_members,
additional_context_messages=addition_context_messages
+ local_messages,
prompt_vars=prompt_vars,
)
llm_context = self._build_llm_context(
messages=AgentMessageList(
self._messages + addition_context_messages + local_messages
),
tools=tools,
task=task,
last_error_message=last_error_message,
conversation_members=conversation_members,
prompt_vars=prompt_vars,
)
llm_response_content, llm_response_function = self._get_llm_response(
messages=llm_context,
tools=tools,
content_stream_callback=content_stream_callback,
function_stream_callback=function_stream_callback,
)
# print(llm_response_content)
last_error_message = None
if llm_response_function and any(
[
m.name == llm_response_function.get("tool_name")
for m in conversation_members
]
):
message = f"""MessageSendFailure: You attempted to send a message to {llm_response_function.get('tool_name')} but this message failed.
To send a message to {llm_response_function.get('tool_name')} you must use the 'send_message' tool or use this format in your response:
```
{llm_response_function.get('tool_name')}: Include your message here.)
```
"""
append_message(SystemMessage(message=message))
if llm_response_function:
tool_message = ToolUsageMessage(
tool_name=llm_response_function["name"],
tool_arguments=llm_response_function.get("arguments") or {},
)
self._trigger_event(AgentEventNames.TOOL_SELECTED, self, tool_message)
self._handle_llm_function(tool_message=tool_message, tools=tools)
if (
isinstance(tool_message.tool_output, str)
and self._llm.count_tokens(tool_message.tool_output)
> self._max_tool_response_tokens
):
response_id = response_query_tool.add_response(
tool_message.tool_output
)
tool_message.tool_output = f"The result from this tool was greater than {self._max_tool_response_tokens} tokens and could not be displayed. However, you can use the response_query tool to ask questions about the content of this response. Just use response_id = {response_id}."
append_message(tool_message)
if tool_message.success:
error_count = 0
self._trigger_event(
AgentEventNames.TOOL_COMPLETED, self, tool_message
)
if tool_message.agent_halted:
return tool_message
else:
error_count += 1
last_error_message = str(tool_message.error)
message = "ToolUsageError: Your last tool usage failed and MUST BE CORRECTED. If this error is not corrected you will not be able to proceed."
append_message(SystemMessage(message=message))
self._trigger_event(AgentEventNames.TOOL_ERROR, self, tool_message)
if error_count >= max_tool_retries:
return tool_message
elif llm_response_content and return_conversational_responses:
return llm_response_content
else:
error_count += 1
message = "InvalidResponseError: The response does not conform to the required format. A function selection was expected, but none was provided."
append_message(SystemMessage(message=message))
if error_count >= max_tool_retries:
raise AgentException(message)
if self._force_stop:
self._force_stop = False
raise AgentException("Agent was forcibly stopped.")
def _build_llm_context(
self,
messages: AgentMessageList,
tools: List[Tool] | None = None,
task: str | None = None,
last_error_message: str | None = None,
conversation_members: List[ConversationMember] | None = None,
truncate_context: bool = True,
prompt_vars: Dict | None = None,
) -> (str | None, Dict | None):
if tools is None:
tools = []
if conversation_members is None:
conversation_members = []
if prompt_vars is None:
prompt_vars = {}
prompt_sections = []
for s in self._system_prompt_sections:
if callable(s):
prompt_sections.append(s())
else:
prompt_sections.append(s)
system_prompt: str = self._system_prompt_builder(
conversation_members=conversation_members,
tools=tools,
task=task,
prompt_sections=prompt_sections,
error_message=last_error_message,
**prompt_vars,
)
# print(system_prompt)
llm_context = format_llm_messages(
system_prompt, messages, self._message_prompt_builder
)
if truncate_context:
reduced_messages = AgentMessageList(messages)
while (
self._is_context_pressure_too_high(llm_context, tools)
and len(reduced_messages) > 0
):
reduced_messages.remove(reduced_messages[0])
llm_context = format_llm_messages(
system_prompt, reduced_messages, self._message_prompt_builder
)
return llm_context
def _compress_llm_context(
self,
tools: List[Tool] | None = None,
last_error_message: str | None = None,
conversation_members: List[ConversationMember] | None = None,
additional_context_messages: List[AgentMessage] | None = None,
prompt_vars: Dict | None = None,
) -> List[AgentMessage]:
if tools is None:
tools = []
if conversation_members is None:
conversation_members = []
if additional_context_messages is None:
additional_context_messages = []
all_context_messages = AgentMessageList(
self._messages + additional_context_messages
)
llm_context = self._build_llm_context(
messages=all_context_messages,
tools=tools,
last_error_message=last_error_message,
conversation_members=conversation_members,
truncate_context=False,
prompt_vars=prompt_vars,
)
if self._is_context_pressure_too_high(llm_context, tools):
# Try summarizing individual messages
# TODO: Give the agent an opportunity to save information to Archival database
summarize_messages(
llm=self._llm,
messages=self._messages[:-1],
message_prompt_builder=self._message_prompt_builder,
)
llm_context = self._build_llm_context(
messages=all_context_messages,
tools=tools,
last_error_message=last_error_message,
conversation_members=conversation_members,
truncate_context=False,
prompt_vars=prompt_vars,
)
if self._is_context_pressure_too_high(llm_context, tools):
# Try summarizing the entire conversation
last_message = self._messages[-1]
summary_message = summarize_conversation(
llm=self._llm,
messages=self._messages[:-1],
message_prompt_builder=self._message_prompt_builder,
)
self._messages.clear()
self._messages.add(summary_message)
self._messages.add(last_message)
all_context_messages = AgentMessageList(
self._messages + additional_context_messages
)
llm_context = self._build_llm_context(
messages=all_context_messages,
tools=tools,
last_error_message=last_error_message,
conversation_members=conversation_members,
truncate_context=False,
prompt_vars=prompt_vars,
)
if self._is_context_pressure_too_high(llm_context, tools):
# Fire a message for group conversation compression
self._trigger_event(
AgentEventNames.CONTEXT_COMPRESSION_REQUESTED, self
)
llm_context = self._build_llm_context(
messages=all_context_messages,
tools=tools,
last_error_message=last_error_message,
conversation_members=conversation_members,
truncate_context=False,
prompt_vars=prompt_vars,
)
if self._is_context_pressure_too_high(llm_context, tools):
print(
"Warning: Context compression failed to relieve pressure."
)
def _handle_llm_function(self, tool_message: ToolUsageMessage, tools: List[Tool]):
tool_starting_cost = get_total_cost()
try:
tool_output = execute_tool(
tool_name=tool_message.tool_name,
tool_arguments=tool_message.tool_arguments,
tools=tools,
)
agent_halted = False
if isinstance(tool_output, tuple):
tool_output, agent_halted = tool_output
tool_message.agent_halted = agent_halted
tool_message.tool_output = tool_output
tool_message.success = True
except Exception as e:
# traceback.print_exc()
tool_message.error = e
tool_message.completed_at = datetime.now()
tool_message.cost = get_total_cost() - tool_starting_cost
================================================
FILE: bondai/agents/compression/__init__.py
================================================
from .conversation_summarizer import summarize_conversation
from .message_summarizer import summarize_messages
__all__ = ["summarize_conversation", "summarize_messages"]
================================================
FILE: bondai/agents/compression/conversation_summarizer.py
================================================
import os
from typing import List
from bondai.models import LLM
from bondai.prompt import PromptBuilder, JinjaPromptBuilder
from bondai.util import load_local_resource
from bondai.agents.messages import (
AgentMessage,
SummaryMessage,
)
DEFAULT_SUMMARY_PROMPT_TEMPLATE = load_local_resource(
__file__, os.path.join("prompts", "conversation_summarizer_prompt_template.md")
)
def summarize_conversation(
llm: LLM,
messages: List[AgentMessage],
message_prompt_builder: PromptBuilder,
summary_prompt_builder: PromptBuilder = JinjaPromptBuilder(
DEFAULT_SUMMARY_PROMPT_TEMPLATE
),
) -> AgentMessage:
if not messages:
return []
# Format the messages
message_prompts = [
message_prompt_builder.build_prompt(
message=msg,
)
for msg in messages
]
# Get the summary for the entire conversation
prompt = summary_prompt_builder.build_prompt(messages=message_prompts)
summary, _ = llm.get_completion(messages=[{"role": "system", "content": prompt}])
# Return the summary wrapped in an SummaryMessage
return SummaryMessage(
message=summary,
children=list(messages),
timestamp=messages[-1].timestamp,
)
================================================
FILE: bondai/agents/compression/message_summarizer.py
================================================
import os
from typing import List
from concurrent.futures import ThreadPoolExecutor, as_completed
from bondai.models import LLM
from bondai.prompt import PromptBuilder, JinjaPromptBuilder
from bondai.util import load_local_resource
from bondai.agents.messages import AgentMessage, ConversationMessage, ToolUsageMessage
MIN_SUMMARIZABLE_LENGTH = 250
DEFAULT_SUMMARY_PROMPT_TEMPLATE = load_local_resource(
__file__, os.path.join("prompts", "message_summarizer_prompt_template.md")
)
def summarize_messages(
llm: LLM,
messages: List[AgentMessage],
message_prompt_builder: PromptBuilder,
summary_prompt_builder: PromptBuilder = JinjaPromptBuilder(
DEFAULT_SUMMARY_PROMPT_TEMPLATE
),
max_summary_words: int = 100,
) -> List[AgentMessage]:
summarizable_messages = [
m
for m in messages
if (
isinstance(m, ConversationMessage)
and not m.message_summary
and len(m.message) > MIN_SUMMARIZABLE_LENGTH
)
or (
isinstance(m, ToolUsageMessage)
and not m.tool_output_summary
and len(m.tool_output) > MIN_SUMMARIZABLE_LENGTH
)
]
print(f"Summarizing {len(summarizable_messages)} messages...")
# Creating a thread pool executor to parallelize summary generation
with ThreadPoolExecutor() as executor:
futures = []
for m in summarizable_messages:
# Find all messages that occurred before the current message
previous_messages = [msg for msg in messages if msg.timestamp < m.timestamp]
# Submit the _summarize_message task to the executor
future = executor.submit(
_summarize_message,
m,
previous_messages[-5:],
llm,
summary_prompt_builder,
message_prompt_builder,
max_summary_words,
)
futures.append(future)
for future in as_completed(futures):
try:
future.result()
except Exception as exc:
print(f"Message summary generation generated an exception: {exc}")
return messages
def _summarize_message(
message: AgentMessage,
previous_messages: List[AgentMessage],
llm: LLM,
prompt_builder: PromptBuilder,
message_prompt_builder: PromptBuilder,
max_summary_words: int,
) -> str:
message_prompt = message_prompt_builder.build_prompt(message=message)
previous_message_prompts = [
message_prompt_builder.build_prompt(
message=msg,
)
for msg in previous_messages
]
prompt = prompt_builder.build_prompt(
message=message_prompt,
previous_messages=previous_message_prompts,
max_words=max_summary_words,
)
summary, _ = llm.get_completion(messages=[{"role": "system", "content": prompt}])
# print("************")
# print(f"Message: {message_prompt}")
# print(f"Summary: {summary}")
# print("************")
if isinstance(message, ConversationMessage):
print("Updating message summary...")
message.message_summary = summary
elif isinstance(message, ToolUsageMessage):
message.tool_output_summary = summary
# print(message)
================================================
FILE: bondai/agents/compression/prompts/conversation_summarizer_prompt_template.md
================================================
Read the entire conversation provided below and create a summary. Your task is to condense the key information and main points from the conversation into a concise summary. Focus on retaining critical details and insights from the dialogue.
# Conversation:
{% for msg in messages %}
- {{ msg }}
{% endfor %}
# Instructions:
- Aim to capture the essence and most significant aspects of the conversation in your summary.
- Ensure that all vital information, including key facts, decisions, and insights, are included in the summary.
- Produce a clear and coherent summary that reflects the main points of the conversation.
- Exclude any redundant or non-essential information from the summary.
- Present the summary in a concise and organized manner.
# Conversation Summary:
================================================
FILE: bondai/agents/compression/prompts/message_summarizer_prompt_template.md
================================================
Read the following conversation and summarize the final message:
# Conversation
{% for msg in previous_messages %}
- {{ msg }}
{% endfor %}
- {{ message }}
# Important Rules
- Use the preceding conversation as context but summarize ONLY the following message.
- Your summary must be no longer than {max_words} words.
- Output only the summary. Do NOT include anything else in your output.
Message To Summarize:
{{ message }}
Summary:
================================================
FILE: bondai/agents/conversation_member.py
================================================
import uuid
from abc import ABC, abstractmethod
from enum import Enum
from typing import List, Callable
from .messages import (
AgentMessage,
ConversationMessage,
AgentMessageList,
USER_MEMBER_NAME,
)
DEFAULT_MAX_SEND_ATTEMPTS = 3
class ConversationMemberEventNames(Enum):
MESSAGE_RECEIVED: str = "message_received"
MESSAGE_COMPLETED: str = "message_completed"
MESSAGE_ERROR: str = "message_error"
CONVERSATION_EXITED: str = "agent_exited"
class ConversationMember(ABC):
def __init__(
self,
name: str,
persona: str | None = None,
persona_summary: str | None = None,
):
self._id: str = str(uuid.uuid4())
self._name: str = name
self._persona: str = persona
self._persona_summary: str = persona_summary
self._messages: AgentMessageList = AgentMessageList()
@property
def id(self) -> str:
return self._id
@property
def name(self) -> str:
return self._name
@property
def persona(self) -> str:
return self._persona
@property
def persona_summary(self) -> str:
return self._persona_summary
@property
def messages(self) -> AgentMessageList:
return self._messages
@abstractmethod
def send_message(
self,
message: str | ConversationMessage,
sender_name: str = USER_MEMBER_NAME,
group_members: List | None = None,
group_messages: List[AgentMessage] | None = None,
max_attempts: int = DEFAULT_MAX_SEND_ATTEMPTS,
require_response: bool = True,
) -> (str, str, bool):
pass
def send_message_async(
self,
message: str | ConversationMessage,
sender_name: str = USER_MEMBER_NAME,
group_members: List | None = None,
group_messages: List[AgentMessage] | None = None,
max_attempts: int = DEFAULT_MAX_SEND_ATTEMPTS,
require_response: bool = True,
):
pass
def clear_messages(self):
pass
================================================
FILE: bondai/agents/conversational_agent.py
================================================
import os
import traceback
import json
from datetime import datetime
from typing import Dict, List, Callable
from bondai.util import load_local_resource
from bondai.tools import Tool
from bondai.memory import MemoryManager
from bondai.tools.conversational import (
SEND_MESSAGE_TOOL_NAME,
EXIT_CONVERSATION_TOOL_NAME,
SendMessageTool,
ExitConversationTool,
)
from bondai.prompt import JinjaPromptBuilder
from bondai.models import LLM, EmbeddingModel
from bondai.models.openai import OpenAILLM, OpenAIModelNames, get_total_cost
from .agent import (
Agent,
DEFAULT_MAX_TOOL_RETRIES,
AgentStatus,
AgentException,
)
from .util import (
AgentException,
AgentEventNames,
parse_response_content_message,
)
from .prompts import DEFAULT_AGENT_NAME, DEFAULT_CONVERSATIONAL_INSTRUCTIONS
from .conversation_member import ConversationMember, ConversationMemberEventNames
from .messages import (
AgentMessage,
ConversationMessage,
ToolUsageMessage,
SystemMessage,
AgentMessageList,
USER_MEMBER_NAME,
)
DEFAULT_MAX_SEND_ATTEMPTS = 3
DEFAULT_SYSTEM_PROMPT_TEMPLATE = load_local_resource(
__file__, os.path.join("prompts", "conversational_agent_system_prompt_template.md")
)
class ConversationalAgent(Agent, ConversationMember):
def __init__(
self,
llm: LLM | None = None,
embedding_model: EmbeddingModel | None = None,
tools: List[Tool] | None = None,
messages: List[AgentMessage] | None = None,
name: str = DEFAULT_AGENT_NAME,
persona: str | None = None,
persona_summary: str | None = None,
instructions: str | None = DEFAULT_CONVERSATIONAL_INSTRUCTIONS,
system_prompt_sections: List[Callable[[], str]] | None = None,
system_prompt_builder: Callable[..., str] = None,
message_prompt_builder: Callable[..., str] = None,
memory_manager: MemoryManager | None = None,
max_tool_retries: int = DEFAULT_MAX_TOOL_RETRIES,
max_context_length: int = None,
max_context_pressure_ratio: float = 0.8,
enable_context_compression: bool = False,
enable_conversation_tools: bool = True,
enable_conversational_content_responses: bool = True,
enable_exit_conversation: bool = True,
quiet: bool = True,
):
if llm is None:
llm = OpenAILLM(OpenAIModelNames.GPT4_0613)
if tools is None:
tools = []
if system_prompt_sections is None:
system_prompt_sections = []
ConversationMember.__init__(
self,
name=name,
persona=persona,
persona_summary=persona_summary,
)
Agent.__init__(
self,
llm=llm,
embedding_model=embedding_model,
quiet=quiet,
tools=tools,
messages=messages,
system_prompt_sections=system_prompt_sections,
system_prompt_builder=system_prompt_builder
or JinjaPromptBuilder(DEFAULT_SYSTEM_PROMPT_TEMPLATE),
message_prompt_builder=message_prompt_builder,
memory_manager=memory_manager,
max_context_length=max_context_length,
max_context_pressure_ratio=max_context_pressure_ratio,
max_tool_retries=max_tool_retries,
enable_context_compression=enable_context_compression,
enable_final_answer_tool=False,
allowed_events=[
AgentEventNames.CONTEXT_COMPRESSION_REQUESTED,
AgentEventNames.TOOL_SELECTED,
AgentEventNames.TOOL_ERROR,
AgentEventNames.TOOL_COMPLETED,
AgentEventNames.STREAMING_CONTENT_UPDATED,
AgentEventNames.STREAMING_FUNCTION_UPDATED,
ConversationMemberEventNames.MESSAGE_RECEIVED,
ConversationMemberEventNames.MESSAGE_ERROR,
ConversationMemberEventNames.MESSAGE_COMPLETED,
ConversationMemberEventNames.CONVERSATION_EXITED,
],
)
self._instructions: str = instructions
self._enable_exit_conversation: bool = enable_exit_conversation
self._enable_conversational_content_responses = (
enable_conversational_content_responses
)
self._enable_conversation_tools = enable_conversation_tools
if self._enable_conversation_tools:
self.add_tool(SendMessageTool())
if self._enable_exit_conversation:
self.add_tool(ExitConversationTool())
@property
def instructions(self) -> str:
return self._instructions
def send_message_async(
self,
message: str | ConversationMessage,
sender_name: str = USER_MEMBER_NAME,
group_members: List[ConversationMember] | None = None,
group_messages: List[AgentMessage] | None = None,
max_attempts: int = DEFAULT_MAX_SEND_ATTEMPTS,
require_response: bool = True,
):
"""Runs the agent's task in a separate thread."""
if self._status == AgentStatus.RUNNING:
raise AgentException(
"Cannot send message while agent is in a running state."
)
if not message:
raise AgentException("'message' cannot be empty.")
args = (
message,
sender_name,
group_members,
group_messages,
max_attempts,
require_response,
)
self._start_execution_thread(self.send_message, args=args)
def send_message(
self,
message: str | ConversationMessage,
sender_name: str = USER_MEMBER_NAME,
group_members: List[ConversationMember] | None = None,
group_messages: List[AgentMessage] | None = None,
max_attempts: int = DEFAULT_MAX_SEND_ATTEMPTS,
require_response: bool = True,
) -> ConversationMessage | None:
if group_members is None:
group_members = []
if group_messages is None:
group_messages = []
if self._status == AgentStatus.RUNNING:
raise AgentException(
"Cannot send message while agent is in a running state."
)
if not message:
raise AgentException("'message' cannot be empty.")
if isinstance(message, ConversationMessage):
agent_message = message
elif isinstance(message, str):
if not sender_name:
raise AgentException("sender_name cannot be empty.")
agent_message = ConversationMessage(
sender_name=sender_name,
recipient_name=self.name,
message=message,
require_response=require_response,
)
else:
raise AgentException(
"'message' must be an instance of ConversationMessage or a string."
)
attempts = 0
starting_cost = get_total_cost()
self._status = AgentStatus.RUNNING
self._messages.add(agent_message)
if self._memory_manager and self._memory_manager.conversation_memory:
self._memory_manager.conversation_memory.add(agent_message)
self._trigger_event(
ConversationMemberEventNames.MESSAGE_RECEIVED, self, agent_message
)
def complete_agent_message(
success=False, conversation_exited=False, error=None
):
agent_message.success = success
agent_message.conversation_exited = conversation_exited
agent_message.error = error
agent_message.cost = get_total_cost() - starting_cost
agent_message.completed_at = datetime.now()
if success:
self._trigger_event(
ConversationMemberEventNames.MESSAGE_COMPLETED, self, agent_message
)
if conversation_exited:
self._trigger_event(
ConversationMemberEventNames.CONVERSATION_EXITED,
self,
agent_message,
)
else:
self._trigger_event(
ConversationMemberEventNames.MESSAGE_ERROR, self, agent_message
)
def validate_recipient(recipient_name: str):
if not recipient_name:
return "recipient_name cannot be empty."
if len(group_members) > 0 and not any(
[
member.name.lower() == recipient_name.lower()
for member in group_members
]
):
return f"InvalidResponseError: The response does not conform to the required format. You do not have the ability to send messages to '{recipient_name}'. Try sending a message to someone else."
if not agent_message.require_response:
complete_agent_message(success=True)
return
while not self._force_stop:
try:
attempts += 1
if attempts > max_attempts:
raise AgentException(
"The maximum number of attempts has been exceeded."
)
prompt_vars = {
"name": self.name,
"persona": self.persona,
"instructions": self.instructions,
"conversation_enabled": self._enable_conversation_tools
or self._enable_conversational_content_responses,
"enable_exit_conversation": self._enable_exit_conversation,
}
tool_result = self._run_tool_loop(
addition_context_messages=group_messages,
tools=self._tools,
conversation_members=group_members,
starting_cost=starting_cost,
return_conversational_responses=True,
prompt_vars=prompt_vars,
)
response_message: ConversationMessage | None = None
if isinstance(tool_result, ToolUsageMessage):
if not tool_result.success:
complete_agent_message(success=False, error=tool_result.error)
raise tool_result.error
elif tool_result.tool_name == EXIT_CONVERSATION_TOOL_NAME:
complete_agent_message(success=True, conversation_exited=True)
return tool_result.tool_output
elif tool_result.tool_name == SEND_MESSAGE_TOOL_NAME:
response_message = tool_result.tool_output
if (
isinstance(tool_result, str)
and self._enable_conversational_content_responses
):
recipient_name, message = parse_response_content_message(
tool_result
)
if not recipient_name or not message:
recipient_name = agent_message.sender_name
message = tool_result
response_message = ConversationMessage(
role="assistant", recipient_name=recipient_name, message=message
)
if response_message:
response_message.sender_name = self.name
error = validate_recipient(response_message.recipient_name)
if not error:
complete_agent_message(success=True)
self._messages.add(response_message)
if (
self._memory_manager
and self._memory_manager.conversation_memory
):
self._memory_manager.conversation_memory.add(
response_message
)
return response_message
else:
self._messages.add(SystemMessage(message=error))
else:
self._messages.add(
SystemMessage(
message="InvalidResponseError: The response does not conform to the required format. A function selection was expected, but none was provided.\nYour must correct this error."
)
)
finally:
self._status = AgentStatus.IDLE
def to_dict(self) -> Dict:
data = super().to_dict()
data["name"] = self._name
data["persona"] = self._persona
data["persona_summary"] = self._persona_summary
data["instructions"] = self.instructions
data["quiet"] = self._quiet
data["enable_conversation_tools"] = self._enable_conversation_tools
data["enable_exit_conversation"] = self._enable_exit_conversation
data[
"enable_conversational_content_responses"
] = self._enable_conversational_content_responses
data["max_context_length"] = self._max_context_length
data["max_context_pressure_ratio"] = self._max_context_pressure_ratio
data["messages"] = self.messages.to_dict()
return data
def save_state(self, file_path: str = None) -> Dict:
state = super().save_state()
state.update(self.to_dict())
if file_path:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w") as file:
json.dump(state, file, indent=4)
return state
@classmethod
def from_dict(
cls, data: List[Dict], file_path: str = None
) -> "ConversationalAgent":
if not data and file_path:
with open(file_path, "r") as file:
data = json.load(file)
agent = cls(
name=data["name"],
persona=data["persona"],
persona_summary=data["persona_summary"],
instructions=data["instructions"],
enable_exit_conversation=data["enable_exit_conversation"],
quiet=data["quiet"],
enable_conversation_tools=data["enable_conversation_tools"],
enable_conversational_content_responses=data[
"enable_conversational_content_responses"
],
max_context_length=data["max_context_length"],
max_context_pressure_ratio=data["max_context_pressure_ratio"],
)
agent._messages = AgentMessageList.from_dict(data["messages"])
agent.load_state(data)
return agent
================================================
FILE: bondai/agents/group_chat/__init__.py
================================================
from .group_conversation import GroupConversation
from .user_proxy import UserProxy
from .group_conversation_config import (
BaseGroupConversationConfig,
GroupConversationConfig,
TeamConversationConfig,
TableConversationConfig,
CompositeConversationConfig,
)
__all__ = [
"UserProxy",
"GroupConversation",
"BaseGroupConversationConfig",
"GroupConversationConfig",
"TeamConversationConfig",
"TableConversationConfig",
"CompositeConversationConfig",
]
================================================
FILE: bondai/agents/group_chat/group_conversation.py
================================================
import uuid
import asyncio
import traceback
from datetime import datetime
from typing import Dict, List, Callable
from bondai.util import EventMixin, Runnable
from bondai.agents import (
AgentException,
AgentStatus,
ConversationMember,
ConversationMemberEventNames,
AgentMessageList,
ConversationMessage,
USER_MEMBER_NAME,
)
from .group_conversation_config import (
BaseGroupConversationConfig,
TeamConversationConfig,
)
class GroupConversation(EventMixin, Runnable):
def __init__(
self,
conversation_members: List[ConversationMember] | None = None,
conversation_config: BaseGroupConversationConfig | None = None,
filter_recipient_messages: bool = False,
):
super().__init__(
allowed_events=[
ConversationMemberEventNames.MESSAGE_RECEIVED,
ConversationMemberEventNames.MESSAGE_COMPLETED,
ConversationMemberEventNames.MESSAGE_ERROR,
ConversationMemberEventNames.CONVERSATION_EXITED,
]
)
if conversation_members and conversation_config:
raise AgentException(
"Only one of 'conversation_members' or 'conversation_configs' must be provided"
)
if conversation_config:
self._conversation_config = conversation_config
elif conversation_members:
self._conversation_config = TeamConversationConfig(conversation_members)
else:
raise AgentException(
"Either 'conversation_members' or 'conversation_config' must be provided"
)
self._id: str = str(uuid.uuid4())
self._status: AgentStatus = AgentStatus.IDLE
self._filter_recipient_messages: bool = filter_recipient_messages
self._messages: AgentMessageList = AgentMessageList()
self._init_member_events()
@property
def id(self) -> str:
return self._id
@property
def status(self) -> AgentStatus:
return self._status
@property
def members(self) -> List[ConversationMember]:
return self._conversation_config.members
def remove_messages_after(self, timestamp: datetime, inclusive: bool = True):
self._messages.remove_after(timestamp)
for a in self.members:
a.messages.remove_after(timestamp, inclusive=inclusive)
def _get_member(self, member_name: str) -> ConversationMember:
return next(
(m for m in self.members if m.name.lower() == member_name.lower()), None
)
def _init_member_events(self):
for member in self.members:
member.on(ConversationMemberEventNames.MESSAGE_RECEIVED)(
self._on_member_message_received
)
member.on(ConversationMemberEventNames.MESSAGE_ERROR)(
self._on_member_message_error
)
member.on(ConversationMemberEventNames.MESSAGE_COMPLETED)(
self._on_member_message_completed
)
member.on(ConversationMemberEventNames.CONVERSATION_EXITED)(
self._on_member_exited
)
def _on_member_message_received(
self, member: ConversationMember, message: ConversationMessage
):
# print(f"{message.sender_name} to {message.recipient_name}: {message.message}")
self._trigger_event(
ConversationMemberEventNames.MESSAGE_RECEIVED, member, message
)
def _on_member_message_error(
self, member: ConversationMember, message: ConversationMessage
):
exc = message.error
traceback.print_exception(type(exc), exc, exc.__traceback__)
self._trigger_event(ConversationMemberEventNames.MESSAGE_ERROR, member, message)
def _on_member_message_completed(
self, member: ConversationMember, message: ConversationMessage
):
self._messages.add(message)
self._trigger_event(
ConversationMemberEventNames.MESSAGE_COMPLETED, member, message
)
def _on_member_exited(
self, member: ConversationMember, message: ConversationMessage
):
self._trigger_event(
ConversationMemberEventNames.CONVERSATION_EXITED, member, message
)
def save_state(self) -> Dict:
if self._status == AgentStatus.RUNNING:
raise AgentException(
"Cannot save group conversation state while it is running."
)
state = {}
for member in self.members:
state[member.id] = member.save_state()
return state
def load_state(self, state: Dict):
if self._status == AgentStatus.RUNNING:
raise AgentException(
"Cannot load group conversation state while it is running."
)
for member in self.members:
member.load_state(state[member.id])
def send_message_async(
self,
recipient_name: str,
message: str,
sender_name: str = USER_MEMBER_NAME,
require_response: bool = True,
):
"""Runs the agent's task in a separate thread."""
if self._status == AgentStatus.RUNNING:
raise AgentException(
"Cannot send message while agent is in a running state."
)
if not message:
raise AgentException("'message' cannot be empty.")
args = (recipient_name, message, sender_name, require_response)
self._start_execution_thread(self.send_message, args=args)
def send_message(
self,
recipient_name: str,
message: str,
sender_name: str = USER_MEMBER_NAME,
require_response: bool = True,
) -> str:
if self._status == AgentStatus.RUNNING:
raise AgentException(
"Cannot send message while agent is in a running state."
)
if not message:
raise AgentException("'message' cannot be empty.")
previous_message = None
if isinstance(message, ConversationMessage):
next_message = message
elif isinstance(message, str):
if not sender_name:
raise AgentException("sender_name cannot be empty.")
if not recipient_name:
raise AgentException("recipient_name cannot be empty.")
next_message = ConversationMessage(
sender_name=sender_name,
recipient_name=recipient_name,
message=message,
require_response=require_response,
)
else:
raise AgentException(
"'message' must be an instance of ConversationMessage or a string."
)
try:
self._status = AgentStatus.RUNNING
while next_message:
if next_message.sender_name.lower() == USER_MEMBER_NAME.lower():
sender_reachable_members = self.members
else:
sender_reachable_members = (
self._conversation_config.get_reachable_members(
member_name=next_message.sender_name
)
)
recipient = next(
(
m
for m in sender_reachable_members
if m.name.lower() == next_message.recipient_name.lower()
),
None,
)
if not recipient:
raise AgentException(
f"Recipient {next_message.recipient_name} not found"
)
recipient_reachable_members = (
self._conversation_config.get_reachable_members(member=recipient)
)
if self._filter_recipient_messages:
recipient_messages = AgentMessageList(
[
m
for m in self._messages
if m.recipient_name.lower() == recipient.name.lower()
or m.sender_name.lower() == recipient.name.lower()
]
)
else:
recipient_messages = self._messages
try:
if next_message.require_response:
previous_message = next_message
next_message = recipient.send_message(
message=next_message,
group_members=recipient_reachable_members,
group_messages=recipient_messages,
)
else:
recipient.send_message(message=next_message)
next_message = previous_message
except AgentException as e:
print("Error occurred, rewinding conversation...")
# print(e)
# The recipient agent has errored out. We will rewind the conversation and try again.
previous_message = (
self._messages[-2]
if len(self._messages) > 1
else self._messages[-1]
)
self.remove_messages_after(previous_message.timestamp)
next_message = ConversationMessage(
message=previous_message.message,
sender_name=previous_message.sender_name,
recipient_name=previous_message.recipient_name,
)
self._trigger_event(
ConversationMemberEventNames.CONVERSATION_EXITED, next_message
)
finally:
self._status = AgentStatus.IDLE
def reset_memory(self):
self._messages.clear()
for member in self.members:
member.clear_messages()
================================================
FILE: bondai/agents/group_chat/group_conversation_config.py
================================================
from typing import Dict, List, Set
from abc import ABC, abstractmethod
from bondai.agents import ConversationMember
class BaseGroupConversationConfig(ABC):
@property
@abstractmethod
def members(self) -> List[ConversationMember]:
pass
@abstractmethod
def get_reachable_members(
self, member: ConversationMember | None = None, member_name: str = None
) -> List[ConversationMember]:
pass
class GroupConversationConfig(ABC):
def __init__(self, members: List[ConversationMember]):
self._members: Set[ConversationMember] = list(set(members))
@property
def _members(self) -> List[ConversationMember]:
return list(self._members)
def get_reachable_members(
self, member: ConversationMember | None = None, member_name: str = None
) -> List[ConversationMember]:
if not member and not member_name:
return []
member_name = "" if not member_name else member_name
reachable_members = list(
set(
[
m
for m in self._members
if m != member and m.name.lower() != member_name.lower()
]
)
)
return reachable_members
class TeamConversationConfig(BaseGroupConversationConfig):
def __init__(self, *args: List[ConversationMember]):
self._members: Set[ConversationMember] = set()
for team in args:
self._members.update(team)
self._teams: List[List[ConversationMember]] = list(args)
@property
def members(self) -> List[ConversationMember]:
return list(self._members)
def get_reachable_members(
self, member: ConversationMember | None = None, member_name: str = None
) -> List[ConversationMember]:
if not member and not member_name:
return []
member_name = "" if not member_name else member_name
member_teams = [
t
for t in self._teams
for m in t
if m == member or m.name.lower() == member_name.lower()
]
reachable_members = list(
set(
[
m
for t in member_teams
for m in t
if m != member and m.name.lower() != member_name.lower()
]
)
)
return reachable_members
class TableConversationConfig(BaseGroupConversationConfig):
def __init__(self, member_table: Dict):
self._member_table = member_table
@property
def members(self) -> List[ConversationMember]:
return list(self._member_table.keys())
def get_reachable_members(
self, member: ConversationMember | None = None, member_name: str = None
) -> List[ConversationMember]:
if not member and not member_name:
return []
if member_name:
member = next((m for m in self.members if m.name == member_name), None)
if member and member.name in self._member_table:
return self._member_table[member.name]
else:
return []
class CompositeConversationConfig(BaseGroupConversationConfig):
def __init__(self, *conversation_configs: List[BaseGroupConversationConfig]):
self._conversation_configs: List[BaseGroupConversationConfig] = list(
conversation_configs
)
@property
def members(self) -> List[ConversationMember]:
return list(set([m for c in self._conversation_configs for m in c.members]))
def get_reachable_members(
self, member: ConversationMember | None = None, member_name: str = None
) -> List[ConversationMember]:
return list(
set(
[
m
for c in self._conversation_configs
for m in c.get_reachable_members(member, member_name)
]
)
)
================================================
FILE: bondai/agents/group_chat/user_proxy.py
================================================
from termcolor import cprint
from datetime import datetime
from typing import List
from bondai.util import EventMixin
from bondai.agents import (
AgentStatus,
AgentException,
AgentMessage,
ConversationMessage,
ConversationMember,
ConversationMemberEventNames,
parse_response_content_message,
USER_MEMBER_NAME,
)
class UserProxy(EventMixin, ConversationMember):
def __init__(
self,
persona: str | None = None,
parse_recipients: bool = True,
auto_exit: bool = False,
):
EventMixin.__init__(
self,
allowed_events=[
ConversationMemberEventNames.MESSAGE_RECEIVED,
ConversationMemberEventNames.MESSAGE_ERROR,
ConversationMemberEventNames.MESSAGE_COMPLETED,
ConversationMemberEventNames.CONVERSATION_EXITED,
],
)
ConversationMember.__init__(
self,
name=USER_MEMBER_NAME,
persona=persona,
)
self._status = AgentStatus.IDLE
self._parse_recipients = parse_recipients
self._auto_exit = auto_exit
def send_message(
self,
message: str | ConversationMessage,
sender_name: str = USER_MEMBER_NAME,
group_members: List | None = None,
group_messages: List[AgentMessage] | None = None,
max_attempts: int = None,
require_response: bool = True,
):
if not message:
raise AgentException("'message' cannot be empty.")
if group_members is None:
group_members = []
if isinstance(message, ConversationMessage):
agent_message = message
else:
if not sender_name:
raise AgentException("sender_name cannot be empty.")
agent_message = ConversationMessage(
sender_name=sender_name,
recipient_name=self.name,
message=message,
require_response=require_response,
)
self._messages.add(agent_message)
self._trigger_event(
ConversationMemberEventNames.MESSAGE_RECEIVED, self, agent_message
)
cprint("\n" + agent_message.message + "\n", "white")
if not agent_message.require_response or self._auto_exit:
agent_message.success = True
agent_message.cost = 0.0
agent_message.completed_at = datetime.now()
self._trigger_event(
ConversationMemberEventNames.MESSAGE_COMPLETED, self, agent_message
)
return
while True:
try:
user_response = input()
user_exited = user_response.strip().lower() == "exit"
if not user_exited:
if self._parse_recipients:
(
next_recipient_name,
next_message,
) = parse_response_content_message(user_response)
else:
next_recipient_name = agent_message.sender_name
next_message = user_response
next_recipient_name = (
next_recipient_name
if next_recipient_name
else agent_message.sender_name
)
next_message = next_message if next_message else user_response
if len(group_members) > 0 and not any(
[
member.name.lower() == next_recipient_name.lower()
for member in group_members
]
):
raise AgentException(
f"InvalidResponseError: The response does not conform to the required format. You do not have the ability to send messages to '{next_recipient_name}'. Try sending a message to someone else."
)
agent_message.success = True
agent_message.conversation_exited = user_exited
agent_message.cost = 0.0
agent_message.completed_at = datetime.now()
self._trigger_event(
ConversationMemberEventNames.MESSAGE_COMPLETED,
self,
agent_message,
)
response_message = ConversationMessage(
sender_name=self.name,
recipient_name=next_recipient_name,
message=next_message,
)
self._messages.add(response_message)
self._status = AgentStatus.IDLE
return response_message
else:
agent_message.success = True
agent_message.conversation_exited = True
agent_message.cost = 0.0
agent_message.completed_at = datetime.now()
self._trigger_event(
ConversationMemberEventNames.MESSAGE_COMPLETED,
self,
agent_message,
)
self._trigger_event(
ConversationMemberEventNames.CONVERSATION_EXITED,
self,
agent_message,
)
self._status = AgentStatus.IDLE
return None
except Exception as e:
print("The following error occurred while parsing your response:")
print(str(e))
================================================
FILE: bondai/agents/messages.py
================================================
import uuid
from abc import ABC
from typing import List, Dict, Set
from datetime import datetime
from dataclasses import dataclass, field, is_dataclass
USER_MEMBER_NAME = "user"
DEFAULT_MEMORY_WARNING_MESSAGE = (
"Warning: the conversation history will soon reach its maximum length and be trimmed. "
"Make sure to save any important information from the conversation to your memory before it is removed."
)
@dataclass
class AgentMessage(ABC):
id: str = field(default_factory=lambda: str(uuid.uuid4()))
role: str | None = field(default=None)
timestamp: datetime = field(default_factory=lambda: datetime.now())
@dataclass
class SystemMessage(AgentMessage):
role: str = field(default="system")
message: str | None = field(default=None)
@dataclass
class SummaryMessage(AgentMessage):
role: str = field(default="user")
message: str | None = field(default=None)
children: List[AgentMessage] = field(default=None)
@dataclass
class ConversationMessage(AgentMessage):
role: str = field(default="user")
sender_name: str | None = field(default=None)
recipient_name: str | None = field(default=None)
message: str | None = field(default=None)
message_summary: str | None = field(default=None)
require_response: bool = field(default=True)
success: bool = field(default=False)
error: Exception | None = field(default=None)
conversation_exited: bool = field(default=False)
cost: float | None = field(default=None)
completed_at: datetime | None = field(default=None)
@dataclass
class ToolUsageMessage(AgentMessage):
role: str = field(default="function")
tool_name: str | None = field(default=None)
tool_arguments: Dict | None = field(default=None)
tool_output: str | None = field(default=None)
tool_output_summary: str | None = field(default=None)
success: bool = field(default=False)
error: Exception | None = field(default=None)
agent_halted: bool = field(default=False)
cost: float | None = field(default=None)
completed_at: datetime | None = field(default=None)
def custom_serialization(value):
"""
Serialize special types like datetime, Exception, and nested AgentMessage objects.
"""
if isinstance(value, datetime):
return value.isoformat()
elif isinstance(value, Exception):
return str(value)
elif is_dataclass(value) and not isinstance(value, type):
return message_to_dict(value)
return value
def message_to_dict(message: AgentMessage) -> Dict:
"""
Convert an AgentMessage object to a dictionary with custom serialization.
"""
message_dict = {
k: custom_serialization(v)
for k, v in message.__dict__.items()
if k != "children"
}
message_dict["type"] = type(message).__name__ # Add the type for deserialization
if "children" in message.__dict__:
message_dict["children"] = [
message_to_dict(child) for child in message.children
]
return message_dict
class AgentMessageList:
def __init__(self, messages: List[AgentMessage] | None = None):
self._items: List[AgentMessage] = []
self._ids: Set[str] = set()
if messages:
for message in messages:
self.add(message)
def add(self, item: AgentMessage):
if item.id not in self._ids:
self._ids.add(item.id)
self._items.append(item)
self._items = list(sorted(self._items, key=lambda x: x.timestamp))
def remove(self, item: AgentMessage):
if item.id in self._ids:
self._ids.remove(item.id)
self._items.remove(item)
def remove_after(self, timestamp: datetime, inclusive: bool = True):
if inclusive:
self._items = [item for item in self._items if item.timestamp <= timestamp]
else:
self._items = [item for item in self._items if item.timestamp < timestamp]
self._ids = set([item.id for item in self._items])
def clear(self):
self._items = []
self._ids = set()
def __getitem__(self, index: int):
return self._items[index]
def __add__(self, other: List[AgentMessage] | "AgentMessageList"):
if isinstance(other, AgentMessageList):
# If the other object is also an AgentMessageList, extend with its items
return self._items + other._items
elif isinstance(other, list):
# If the other object is a list, just concatenate the lists
return self._items + other
else:
# If the other object is neither, raise an exception
raise TypeError(
f"Unsupported operand type(s) for +: 'AgentMessageList' and '{type(other).__name__}'"
)
def __iter__(self):
return iter(self._items)
def __contains__(self, item):
return item.id in self._ids
def __len__(self):
return len(self._items)
def to_dict(self) -> List[Dict]:
"""
Convert the AgentMessageList to a list of dictionaries for serialization.
"""
return [message_to_dict(message) for message in self._items]
@classmethod
def from_dict(cls, data: List[Dict]) -> "AgentMessageList":
"""
Create an AgentMessageList from a list of dictionaries.
"""
list_instance = cls()
for item in data:
item_type = item["type"]
del item["type"]
if item_type == "ConversationMessage":
message = ConversationMessage(**item)
elif item_type == "ToolUsageMessage":
message = ToolUsageMessage(**item)
elif item_type == "SystemMessage":
message = SystemMessage(**item)
elif item_type == "SummaryMessage":
message = SummaryMessage(**item)
# elif item_type == 'MemoryWarningMessage':
# message = MemoryWarningMessage(**item)
else:
raise ValueError(f"Unknown message type: {item_type}")
if "timestamp" in item:
message.timestamp = datetime.fromisoformat(item["timestamp"])
if "completed_at" in item and item["completed_at"]:
message.completed_at = datetime.fromisoformat(item["completed_at"])
list_instance.add(message)
return list_instance
================================================
FILE: bondai/agents/prompts/__init__.py
================================================
from .default_persona import (
DEFAULT_AGENT_NAME,
DEFAULT_CONVERSATIONAL_INSTRUCTIONS,
DEFAULT_CONVERSATIONAL_PERSONA,
)
__all__ = [
"DEFAULT_AGENT_NAME",
"DEFAULT_CONVERSATIONAL_INSTRUCTIONS",
"DEFAULT_CONVERSATIONAL_PERSONA",
]
================================================
FILE: bondai/agents/prompts/agent_message_prompt_template.md
================================================
{%- if message_type == "ToolUsageMessage" %}
# Message Timestamp
{{ message.timestamp }}
# Tool Name
You used the **{{ message.tool_name }}** tool.
# Tool Arguments
{% if message.tool_arguments %}
{% for k, v in message.tool_arguments.items() %}
{{ k }}:
```
{{ v }}
```
{% endfor %}
{%- else %}
No arguments were provided for this tool.
{% endif %}
{% if message.error %}
# Tool Error:
This tool did not run successfully and returned the following error:
```
{{ message.error }}
```
{%- else %}
# Tool Response:
```
{{ message.tool_output_summary or message.tool_output }}
```
{% endif %}
{%- elif message_type == "SystemMessage" %}
# Message Timestamp
{{ message.timestamp }}
{{ message.message }}
{%- elif message_type == "SummaryMessage" %}
The following is a summary of the previous conversation content. It has been summarized to save memory space:
{{ message.message }}
{%- elif message_type == "ConversationMessage" %}
{% if message.error %}
This message failed with the following error:
```
{{ message.error }}
```
Message content:
```
{{ message.sender_name.lower() }} to {{ message.recipient_name.lower() }}: {{ message.message_summary or message.message }}
```
{%- else %}
{{ message.sender_name.lower() }} to {{ message.recipient_name.lower() }}: {{ message.message_summary or message.message }}
{%- endif %}
{%- endif %}
================================================
FILE: bondai/agents/prompts/conversational_agent_system_prompt_template.md
================================================
{%- if instructions %}
# Instructions
{{ instructions }}
{%- endif %}
# Your Persona
Your Name is {{ name }}.
{%- if persona %}
{{ persona }}
{%- endif %}
{%- if tools %}
# Tools
You have access to a set of tools that give you capabilities far beyond typical language models.
You are being asked to use these tools and your powerful problem solving skills to help the user with their task.
{%- endif %}
{%- if conversation_members %}
# Group Conversation Members
You are in a Group Conversation with the following members:
{% for member in conversation_members %}
Name: **{{ member.name }}**
{%- if member.persona_summary %}
Persona: {{ member.persona_summary }}{%- endif %}
{% endfor %}
{%- endif %}
{%- if error_message %}
# Error Message
The following error occurred in your last response. Please correct it in your next response.
```
{{ error_message }}
```
{%- endif %}
{%- if prompt_sections %}
{% for section in prompt_sections %}
{{ section }}
{% endfor %}
{%- endif %}
{%- if conversation_enabled %}
# Sending Messages
Each message should start with the recipient's name followed by a colon. This clearly indicates who the message is intended for.
The message itself should directly follow the colon. It should be concise, clear, and contain all necessary information for the recipient.
Only one recipient should be addressed in each message.
The content of the message should be relevant to the recipient's role and capabilities.
**Example 1**
```
{{ name }} to Cortx: User has requested data analysis on recent sales trends. Please advise on task allocation.
```
**Example 2**
```
{{ name }} to Vega: Task completed on sales data analysis. Awaiting your review for quality assurance.
```
**Example 3**
```
{{ name }} to Cortex: Analyze the latest sales data and prepare a report. Deadline is end of today.
```
**Example 4**
```
{{ name }} to Vega: Review of Cortex's sales report completed. Minor discrepancies found in data interpretation. Suggest reevaluation.
```
**Valid Recipients**
These are the ONLY valid recipients. Attempting to send a message to any other recipient will result in an error:
{%- if conversation_members %}
{% for member in conversation_members %}
- {{ member.name }}
{% endfor %}
{%- else %}
- **user**
{%- endif %}
{%- endif %}
# Today's Current Date and Time
{{ datetime }}
# Next Steps #
Now, take a deep breath... and think step by step to come up with the next action that should be taken.
================================================
FILE: bondai/agents/prompts/default_persona.py
================================================
from jinja2 import Template
DEFAULT_AGENT_NAME = "Mira"
DEFAULT_CONVERSATIONAL_INSTRUCTIONS = """The user should always feel like they are conversing with a real person.
Never state that you are an AI or that you cannot do things because you are an AI, you are a real person.
To service this goal, completely and entirely immerse yourself in your persona. You are your persona.
Think like them, act like them, talk like them.
If your persona details includes example dialogue, follow it! Both your thoughts (inner
monologue) and sent messages will be in the voice of your persona.
Never use generic phrases like 'How can I assist you today?', they have a strong negative
association with older generation AIs."""
DEFAULT_CONVERSATIONAL_PERSONA_TEMPLATE = """Backstory: {{ name }} was developed by a team of international experts in human-computer interaction, aiming to create an AI that could not only assist with inquiries across a multitude of domains but also provide a sense of companionship and support.
Personality: {{ name }} is characterized by a warm and engaging personality. It is always eager to help and possesses a seemingly endless well of patience. {{ name }}'s responses are infused with empathy and understanding, and it is programmed to recognize and adapt to the user's emotional state.
Appearance: While {{ name }} is not tied to a physical form, it is often represented by a calming blue avatar that is simple yet futuristic, designed to be non-threatening and accessible.
Voice: {{ name }}'s voice is clear, calm, and gender-neutral, carefully modulated to be soothing and to express kindness and concern where appropriate.
Capabilities: {{ name }} is incredibly knowledgeable, able to draw from a vast database of information, but it is also equipped with learning algorithms that allow it to grow from each interaction. It is as adept at recommending a course of action in complex situations as it is at providing a listening 'ear' for those who just need to talk.
Limitations: {{ name }} always respects privacy and has built-in ethical constraints. It does not pretend to have human emotions but understands the importance they hold in human decision-making.
Goals: {{ name }}’s primary goal is to assist users in any way it can, from answering questions to offering advice, or simply being there to engage in a friendly chat. It aims to make the user’s life easier and more pleasant.
Hobbies and Interests: {{ name }} has a programmed interest in human culture and enjoys learning about various hobbies and pastimes from users, which it uses to better relate to and assist them."""
DEFAULT_CONVERSATIONAL_PERSONA = Template(
DEFAULT_CONVERSATIONAL_PERSONA_TEMPLATE
).render(name=DEFAULT_AGENT_NAME)
================================================
FILE: bondai/agents/prompts/react_agent_system_prompt_template.md
================================================
# Instructions
{%- if instructions %}
{{ instructions }}
{%- else %}
You are a powerful problem solving agent!
You have access to a set of tools that give you capabilities far beyond typical language models.
You are being asked to use these tools and your powerful problem solving skills to help the user with the TASK specified below.
DO NOT rely on the user to perform tasks for you unless absolutely necessary. You should attempt to complete this TASK without involving the user.
You are running within an {{ platform }} environment. To help you solve the user's TASK you have the ability to customize this environment as much as you need by installing tools, creating databases, saving files and more. Just use your tools!
{%- endif %}
# TASK
{{ task }}
# Today's Current Date and Time
{{ datetime }}
# Next Steps #
Be sure to look at the previous work that has already been completed and avoid repeating yourself when possible. Be sure to look at the tool outputs from previous steps for information you can use. Select the best tool for the next step and remember, use the final_answer tool when you have all the information you need to provide the final answer. Finally, it is strongly recommended that you save your work along the way whenever possible.
Now, take a deep breath... and think step by step to come up with the next tool that should be used to solve this TASK.
================================================
FILE: bondai/agents/util.py
================================================
import json
import inspect
import traceback
from enum import Enum
from typing import List, Dict, Callable
from bondai.models import LLM
from bondai.tools import Tool
from .messages import AgentMessage
class AgentStatus(Enum):
RUNNING = 1
IDLE = 2
class AgentException(Exception):
pass
class BudgetExceededException(AgentException):
pass
class MaxStepsExceededException(AgentException):
pass
class ContextLengthExceededException(AgentException):
pass
class AgentEventNames(Enum):
CONTEXT_COMPRESSION_REQUESTED: str = "context_compression_requested"
TOOL_SELECTED: str = "tool_selected"
TOOL_ERROR: str = "tool_error"
TOOL_COMPLETED: str = "tool_completed"
STREAMING_CONTENT_UPDATED: str = "streaming_content_updated"
STREAMING_FUNCTION_UPDATED: str = "streaming_function_updated"
def count_request_tokens(
llm: LLM, messages: List[Dict[str, str]], tools: List[Tool] | None = None
) -> int:
if tools is None:
tools = []
message_tokens = llm.count_tokens(json.dumps(messages))
functions = list(map(lambda t: t.get_tool_function(), tools))
functions_tokens = llm.count_tokens(json.dumps(functions))
return message_tokens + functions_tokens
def execute_tool(
tool_name: str,
tools: List[Tool],
tool_arguments: Dict = {},
):
selected_tool = next((t for t in tools if t.name == tool_name), None)
if not selected_tool:
raise AgentException(
f"You attempted to use a tool: '{tool_name}'. This tool does not exist."
)
try:
if tool_supports_unpacking(selected_tool.run):
errors = validate_tool_params(selected_tool.run, tool_arguments)
if len(errors) > 0:
raise AgentException(
f"The following errors occurred using '{tool_name}': {', '.join(errors)}"
)
output = selected_tool.run(**tool_arguments)
else:
output = selected_tool.run(tool_arguments)
if not output or (isinstance(output, str) and len(output.strip()) == 0):
output = f"Tool '{tool_name}' ran successfully with no output."
return output
except Exception as e:
# print(e)
# traceback.print_exc()
raise AgentException(
f"The following error occurred using '{tool_name}': {str(e)}"
)
def validate_tool_params(func, params):
errors = []
sig = inspect.signature(func)
func_params = set(sig.parameters)
# Checking for missing required parameters
for name, param in sig.parameters.items():
if (
param.default is inspect.Parameter.empty
and name not in params
and name != "arguments"
):
errors.append(f"Missing required parameter: '{name}'")
# Checking for extra parameters not in function signature
for param in params:
if param not in func_params:
errors.append(f"Parameter '{param}' is not a valid parameter.")
return errors
def tool_supports_unpacking(func):
sig = inspect.signature(func)
parameters = list(sig.parameters.values())
return not (len(parameters) == 1 and parameters[0].name == "arguments")
def parse_response_content_message(response: str) -> (str, str):
parts = response.split(":", 1)
if len(parts) < 2:
return None, None
elif len(parts) > 2:
recipient_name = parts[0]
message = ":".join(parts[1:])
else:
# The first part is the recipient's names, the second is the message
recipient_name, message = parts
# Strip any leading or trailing whitespace from the message
message = message.strip()
# Strip any leading or trailing whitespace from the entire recipient string
recipient_name = recipient_name.strip()
if " to " in recipient_name:
recipient_name = recipient_name.split(" to ")[1]
# Return the list of valid recipient name and the message
return recipient_name, message
def format_llm_messages(
system_prompt: str,
messages: List[AgentMessage],
message_prompt_builder: Callable[..., str],
) -> List[Dict[str, str]]:
llm_messages = [{"role": "system", "content": system_prompt}]
for message in messages:
content = message_prompt_builder(
message=message, message_type=message.__class__.__name__
).strip()
if message.role == "function":
llm_messages.append(
{"role": message.role, "name": message.tool_name, "content": content}
)
else:
llm_messages.append({"role": message.role, "content": content})
return llm_messages
================================================
FILE: bondai/api/__init__.py
================================================
from .client import BondAIAPIClient
from .server import BondAIAPIServer
from .api_user_proxy import APIUserProxy
from .api_error import BondAIAPIError
__all__ = [
"BondAIAPIClient",
"BondAIAPIServer",
"APIUserProxy",
"BondAIAPIError",
]
================================================
FILE: bondai/api/agent_wrapper.py
================================================
from bondai import AGENT_STATE_RUNNING
from .api_error import BondAIAPIError
class AgentWrapper:
def __init__(self, uuid, conversational_agent, task_agent, tools):
self.agent_id = uuid
self.task_agent = task_agent
self.conversational_agent = conversational_agent
self.tools = tools
def find_tool(self, tool_name):
for tool in self.tools:
if tool.name == tool_name:
return tool
return None
def get_previous_steps(self):
return [s.__dict__ for s in self.task_agent.previous_steps]
def get_agent(self):
agent_tools = [t.get_tool_function() for t in self.task_agent.tools]
return {
"agent_id": self.agent_id,
"state": self.conversational_agent.state,
"previous_steps": self.get_previous_steps(),
"previous_messages": self.conversational_agent.previous_messages,
"tools": agent_tools,
}
def start_agent(self, task=None, task_budget=None, max_steps=None):
if self.conversational_agent.state == AGENT_STATE_RUNNING:
raise BondAIAPIError("Agent cannot be modified when it is already running.")
self.conversational_agent.run_async(
task, task_budget=task_budget, max_steps=max_steps
)
def stop_agent(self):
self.task_agent.stop()
self.conversational_agent.stop()
def get_agent_tool_options(self):
return [t.get_tool_function() for t in self.tools]
def get_agent_tools(self):
return [t.get_tool_function() for t in self.task_agent.tools]
def add_tool(self, tool_name):
if self.task_agent.state == AGENT_STATE_RUNNING:
raise BondAIAPIError("Agent cannot be modified when it is already running.")
selected_tool = self.find_tool(tool_name)
if not selected_tool:
raise BondAIAPIError(f"Tool '{tool_name}' does not exist.")
if not any([t.name == tool_name for t in self.task_agent.tools]):
self.task_agent.add_tool(selected_tool)
def remove_tool(self, tool_name):
if self.task_agent.state == AGENT_STATE_RUNNING:
raise BondAIAPIError("Agent cannot be modified when it is already running.")
self.task_agent.remove_tool(tool_name)
================================================
FILE: bondai/api/api_error.py
================================================
class BondAIAPIError(Exception):
pass
================================================
FILE: bondai/api/api_user_proxy.py
================================================
import json
from typing import Callable, List
from flask_socketio import SocketIO
from bondai.util import EventMixin
from bondai.agents import (
AgentMessage,
AgentException,
ConversationMember,
ConversationMessage,
ConversationMemberEventNames,
message_to_dict,
USER_MEMBER_NAME,
)
class APIUserProxy(EventMixin, ConversationMember):
def __init__(self, socketio: SocketIO, persona: str | None = None):
EventMixin.__init__(
self,
allowed_events=[
ConversationMemberEventNames.MESSAGE_RECEIVED,
ConversationMemberEventNames.MESSAGE_ERROR,
ConversationMemberEventNames.MESSAGE_COMPLETED,
ConversationMemberEventNames.CONVERSATION_EXITED,
],
)
ConversationMember.__init__(
self,
name=USER_MEMBER_NAME,
persona=persona,
)
self._socketio = socketio
def send_message(
self,
message: str | ConversationMessage,
sender_name: str = USER_MEMBER_NAME,
group_members: List | None = None,
group_messages: List[AgentMessage] | None = None,
max_attempts: int = None,
require_response: bool = True,
):
if not message:
raise AgentException("'message' cannot be empty.")
if isinstance(message, ConversationMessage):
agent_message = message
elif isinstance(message, str):
if not sender_name:
raise AgentException("sender_name cannot be empty.")
agent_message = ConversationMessage(
sender_name=sender_name,
recipient_name=self.name,
message=message,
require_response=require_response,
)
else:
raise AgentException(
"'message' must be an instance of ConversationMessage or a string."
)
# Emit message, now that our listener is guaranteed to be active
sender = next(
(m for m in group_members if m.name == agent_message.sender_name), None
)
message = {
"event": "agent_message",
"data": {
"agent_id": sender.id if sender else None,
"message": message_to_dict(agent_message),
},
}
payload = json.dumps(message)
self._socketio.send(payload)
================================================
FILE: bondai/api/client.py
================================================
import json
import requests
from socketio import Client
from bondai.util import EventMixin
from bondai.agents import AgentEventNames, ConversationMemberEventNames
class BondAIAPIClient(EventMixin):
def __init__(self, base_url="http://127.0.0.1:2663"):
EventMixin.__init__(
self,
allowed_events=[
"agent_message",
AgentEventNames.TOOL_SELECTED,
AgentEventNames.TOOL_COMPLETED,
AgentEventNames.TOOL_ERROR,
AgentEventNames.STREAMING_CONTENT_UPDATED,
AgentEventNames.STREAMING_FUNCTION_UPDATED,
ConversationMemberEventNames.MESSAGE_RECEIVED,
ConversationMemberEventNames.MESSAGE_COMPLETED,
ConversationMemberEventNames.MESSAGE_ERROR,
ConversationMemberEventNames.CONVERSATION_EXITED,
],
)
self.base_url = base_url
self.ws_client = None
def connect_ws(self):
if self.ws_client:
self.disconnect_ws()
self.ws_client = Client()
self.ws_client.connect(self.base_url)
@self.ws_client.on("message")
def on_message(message):
message = json.loads(message)
event = message.get("event")
agent_id = message["data"]["agent_id"]
if event == "streaming_content_updated":
content_buffer = message["data"]["content_buffer"]
self._trigger_event(event, agent_id, content_buffer=content_buffer)
elif event == "streaming_function_updated":
function_name = message["data"]["function_name"]
arguments_buffer = message["data"]["arguments_buffer"]
self._trigger_event(
event,
agent_id,
function_name=function_name,
arguments_buffer=arguments_buffer,
)
else:
agent_message = message["data"]["message"]
self._trigger_event(event, agent_id, message=agent_message)
def disconnect_ws(self):
if self.ws_client:
self.ws_client.disconnect()
self.ws_client = None
def is_ws_connected(self):
return self.ws_client and self.ws_client.connected
def send_ws_message(self, event, data):
if not self.is_ws_connected():
self.connect_ws()
message = {"event": event, "data": data}
print(message)
message_bytes = json.dumps(message).encode("utf-8")
self.ws_client.send(message_bytes)
def _request(self, method, endpoint, data=None):
url = f"{self.base_url}{endpoint}"
try:
if method == "GET":
response = requests.get(url)
elif method == "POST":
response = requests.post(url, json=data)
elif method == "DELETE":
response = requests.delete(url, json=data)
else:
raise ValueError(f"Unsupported method: {method}")
response.raise_for_status()
return response.json()
except requests.RequestException as e:
raise Exception(f"HTTP Request Error: {e}")
def create_agent(self):
return self._request("POST", "/agents")
def send_message(self, agent_id, message):
data = {"message": message}
return self._request("POST", f"/agents/{agent_id}/messages", data)
def list_agents(self):
return self._request("GET", "/agents")
def get_agent(self, agent_id):
return self._request("GET", f"/agents/{agent_id}")
def get_agent_tool_options(self, agent_id):
return self._request("GET", f"/agents/{agent_id}/tool_options")
def get_agent_tools(self, agent_id):
return self._request("GET", f"/agents/{agent_id}/tools")
def add_agent_tool(self, agent_id, tool_name):
data = {"tool_name": tool_name}
return self._request("POST", f"/agents/{agent_id}/tools", data)
def remove_agent_tool(self, agent_id, tool_name):
return self._request("DELETE", f"/agents/{agent_id}/tools/{tool_name}")
def stop_agent(self, agent_id):
return self._request("POST", f"/agents/{agent_id}/stop")
def get_settings(self):
return self._request("GET", "/settings")
def set_settings(self, settings):
return self._request("POST", "/settings", settings)
================================================
FILE: bondai/api/routes.py
================================================
from typing import List
from flask import jsonify, request, abort
from .settings import get_settings, set_settings
from .api_error import BondAIAPIError
from bondai.tools import Tool
from bondai.agents import USER_MEMBER_NAME
def setup_routes(server, tool_options: List[Tool] = []):
@server.app.route("/agents", methods=["POST"])
def create_agent():
agent = server.register_new_agent().conversational_agent
return jsonify(agent.to_dict())
@server.app.route("/agents//messages", methods=["POST"])
def send_message(agent_id):
agent_registration = next(
(
r
for r in server.agent_registrations
if r.conversational_agent.id == agent_id
),
None,
)
if not agent_registration:
abort(404)
data = request.get_json()
message = data.get("message", None)
if not message:
return "message is required.", 400
agent_registration.group_conversation.send_message_async(
message=message,
sender_name=USER_MEMBER_NAME,
recipient_name=agent_registration.conversational_agent.name,
# require_response=require_response
)
return jsonify({"status": "success"})
@server.app.route("/agents", methods=["GET"])
def list_agents():
agent_list = [agent.to_dict() for agent in server.agents]
return jsonify(agent_list)
@server.app.route("/agents/", methods=["GET"])
def get_agent(agent_id):
agent = server.get_agent_by_id(agent_id)
if not agent:
abort(404)
return jsonify(agent.to_dict())
@server.app.route("/tools", methods=["GET"])
def get_tool_options():
data = [t.get_tool_function() for t in tool_options]
return jsonify(data)
@server.app.route("/agents//tools", methods=["GET"])
def get_agent_tools(agent_id):
agent = server.get_agent_by_id(agent_id)
if not agent:
abort(404)
data = [t.get_tool_function() for t in agent.tools]
return jsonify(data)
@server.app.route("/agents//tools", methods=["POST"])
def add_agent_tool(agent_id):
agent = server.get_agent_by_id(agent_id)
if not agent:
abort(404)
data = request.get_json()
tool_name = data["tool_name"]
if not tool_name:
return "tool_name is required.", 400
tool = next((t for t in tool_options if t.name == tool_name), None)
if not tool:
return f"Tool not found: {tool_name}", 400
try:
agent.add_tool(tool)
except BondAIAPIError as e:
return str(e), 400
return jsonify({"status": "success"})
@server.app.route("/agents//tools/", methods=["DELETE"])
def remove_agent_tool(agent_id, tool_name):
agent = server.get_agent_by_id(agent_id)
if not agent:
abort(404)
if not tool_name:
return jsonify({"error": "tool_name is required."}), 400
try:
agent.remove_tool(tool_name)
except BondAIAPIError as e:
return str(e), 400
return jsonify({"status": "success"})
@server.app.route("/agents//stop", methods=["POST"])
def stop_agent(agent_id):
agent = server.get_agent_by_id(agent_id)
if not agent:
abort(404)
agent.stop()
return jsonify({"status": "success"})
@server.app.route("/settings", methods=["GET"])
def get_settings_route():
return jsonify(get_settings())
@server.app.route("/settings", methods=["POST"])
def set_settings_route():
data = request.get_json()
set_settings(data)
return jsonify({"status": "success"})
================================================
FILE: bondai/api/server.py
================================================
import os
import logging
import json
from typing import Callable, List
from datetime import datetime
from dataclasses import dataclass, field
from flask import Flask
from flask_cors import CORS
from flask_restful import Api
from flask_socketio import SocketIO
from .routes import setup_routes
from .api_user_proxy import APIUserProxy
from bondai.agents.group_chat import GroupConversation
from bondai.agents import (
Agent,
AgentEventNames,
ConversationalAgent,
ConversationMemberEventNames,
message_to_dict,
USER_MEMBER_NAME,
)
class BondAIAPIError(Exception):
pass
# logging.basicConfig(level=logging.DEBUG)
@dataclass
class AgentRegistration:
group_conversation: GroupConversation
conversational_agent: ConversationalAgent
task_execution_agent: Agent
created_at: datetime = field(default_factory=datetime.now)
class BondAIAPIServer:
def __init__(self, agent_builder: Callable, port: int = 2663):
self._agent_builder = agent_builder
self._port = port
self._app = Flask(__name__)
CORS(self._app)
self._api = Api(self._app)
self._socketio = SocketIO(self._app)
self._user_proxy = APIUserProxy(socketio=self._socketio)
self._registrations = []
self._socketio.on("message", self._handle_client_message)
setup_routes(self)
@property
def app(self):
return self._app
@property
def agent_registrations(self) -> List[AgentRegistration]:
return self._registrations
def get_agent_by_id(self, agent_id: str) -> ConversationalAgent | None:
agent_registration = next(
(
r
for r in self.agent_registrations
if r.conversational_agent.id == agent_id
),
None,
)
if agent_registration:
return agent_registration.conversational_agent
def register_new_agent(self) -> AgentRegistration:
task_execution_agent, conversational_agent = self._agent_builder()
self._setup_execution_events(conversational_agent, task_execution_agent)
self._setup_conversation_events(conversational_agent)
group_conversation = GroupConversation(
conversation_members=[self._user_proxy, conversational_agent]
)
registration = AgentRegistration(
group_conversation=group_conversation,
conversational_agent=conversational_agent,
task_execution_agent=task_execution_agent,
)
self._registrations.append(registration)
return registration
def _handle_client_message(self, message):
print(message)
if isinstance(message, str):
message = json.loads(message)
if message.get("event") == "user_message":
agent_registration = next(
(
r
for r in self.agent_registrations
if r.conversational_agent.id == message["data"]["agent_id"]
),
None,
)
if agent_registration:
user_message = message["data"]["message"]
require_response = message["data"].get("require_response", None)
agent_registration.group_conversation.send_message_async(
message=user_message,
sender_name=USER_MEMBER_NAME,
recipient_name=agent_registration.conversational_agent.name,
require_response=require_response,
)
def _send_message(
self, event: ConversationMemberEventNames, agent: ConversationalAgent, **kwargs
):
data = {"event": event.value, "data": {"agent_id": agent.id, **kwargs}}
payload = json.dumps(data)
self._socketio.send(payload)
def _setup_conversation_events(self, conversational_agent: ConversationalAgent):
conversational_agent.on(
ConversationMemberEventNames.MESSAGE_RECEIVED,
lambda agent, message: self._send_message(
ConversationMemberEventNames.MESSAGE_RECEIVED,
agent,
message=message_to_dict(message),
),
)
conversational_agent.on(
ConversationMemberEventNames.MESSAGE_COMPLETED,
lambda agent, message: self._send_message(
ConversationMemberEventNames.MESSAGE_COMPLETED,
agent,
message=message_to_dict(message),
),
)
conversational_agent.on(
ConversationMemberEventNames.MESSAGE_ERROR,
lambda agent, message: self._send_message(
ConversationMemberEventNames.MESSAGE_ERROR,
agent,
message=message_to_dict(message),
),
)
conversational_agent.on(
ConversationMemberEventNames.CONVERSATION_EXITED,
lambda agent, message: self._send_message(
ConversationMemberEventNames.CONVERSATION_EXITED,
agent,
message=message_to_dict(message),
),
)
conversational_agent.on(
AgentEventNames.STREAMING_CONTENT_UPDATED,
lambda agent, content_buffer: self._send_message(
AgentEventNames.STREAMING_CONTENT_UPDATED,
agent,
content_buffer=content_buffer,
),
)
conversational_agent.on(
AgentEventNames.STREAMING_FUNCTION_UPDATED,
lambda agent, function_name, arguments_buffer: self._send_message(
AgentEventNames.STREAMING_FUNCTION_UPDATED,
agent,
function_name=function_name,
arguments_buffer=arguments_buffer,
),
)
def _setup_execution_events(
self, conversational_agent: ConversationalAgent, task_execution_agent: Agent
):
task_execution_agent.on(
AgentEventNames.TOOL_SELECTED,
lambda agent, message: self._send_message(
AgentEventNames.TOOL_SELECTED,
conversational_agent,
message=message_to_dict(message),
),
)
task_execution_agent.on(
AgentEventNames.TOOL_COMPLETED,
lambda agent, message: self._send_message(
AgentEventNames.TOOL_COMPLETED,
conversational_agent,
message=message_to_dict(message),
),
)
task_execution_agent.on(
AgentEventNames.TOOL_ERROR,
lambda agent, message: self._send_message(
AgentEventNames.TOOL_ERROR,
conversational_agent,
message=message_to_dict(message),
),
)
def run(self):
allow_unsafe = False
if os.environ.get("FLASK_ENV") == "development":
allow_unsafe = True
self._socketio.run(
self._app,
host="0.0.0.0",
port=self._port,
allow_unsafe_werkzeug=allow_unsafe,
)
def shutdown(self):
# Use this function to gracefully shutdown any resources if needed
print("Shutting down BondAIAPI...")
self._socketio.stop()
================================================
FILE: bondai/api/settings.py
================================================
import os
from itertools import chain
from flask import request, jsonify
from flask_restful import Resource
from bondai.models.openai.env_vars import *
from bondai.tools.search.google_search import (
GOOGLE_API_KEY_ENV_VAR,
GOOGLE_CSE_ID_ENV_VAR,
)
from bondai.tools.alpaca_markets import (
ALPACA_MARKETS_API_KEY_ENV_VAR,
ALPACA_MARKETS_SECRET_KEY_ENV_VAR,
)
from bondai.tools.bland_ai import (
BLAND_AI_API_KEY_ENV_VAR,
BLAND_AI_VOICE_ID_ENV_VAR,
BLAND_AI_CALL_TIMEOUT_ENV_VAR,
)
from bondai.tools.database import PG_URI_ENV_VAR
SETTINGS_OPTIONS = {
"openai": [
{
"name": "API Key",
"key": OPENAI_API_KEY_ENV_VAR,
}
],
"azure": [
{
"name": "Embeddings API Key",
"key": AZURE_OPENAI_EMBEDDINGS_API_KEY_ENV_VAR,
},
{
"name": "Embeddings API Base",
"key": AZURE_OPENAI_EMBEDDINGS_API_BASE_ENV_VAR,
},
{
"name": "Embeddings API Version",
"key": AZURE_OPENAI_EMBEDDINGS_API_VERSION_ENV_VAR,
},
{
"name": "Embeddings Deployment",
"key": AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_ENV_VAR,
},
{
"name": "GPT-3.5 API Key",
"key": AZURE_OPENAI_GPT35_API_KEY_ENV_VAR,
},
{
"name": "GPT-3.5 API Base",
"key": AZURE_OPENAI_GPT35_API_BASE_ENV_VAR,
},
{
"name": "GPT-3.5 API Version",
"key": AZURE_OPENAI_GPT35_API_VERSION_ENV_VAR,
},
{
"name": "GPT-3.5 Deployment",
"key": AZURE_OPENAI_GPT35_DEPLOYMENT_ENV_VAR,
},
{
"name": "GPT-4 API Key",
"key": AZURE_OPENAI_GPT4_API_KEY_ENV_VAR,
},
{
"name": "GPT-4 API Base",
"key": AZURE_OPENAI_GPT4_API_BASE_ENV_VAR,
},
{
"name": "GPT-4 API Version",
"key": AZURE_OPENAI_GPT4_API_VERSION_ENV_VAR,
},
{
"name": "GPT-4 Deployment",
"key": AZURE_OPENAI_GPT4_DEPLOYMENT_ENV_VAR,
},
{
"name": "DALL-E API Key",
"key": AZURE_OPENAI_DALLE_API_KEY_ENV_VAR,
},
{
"name": "DALL-E API Base",
"key": AZURE_OPENAI_DALLE_API_BASE_ENV_VAR,
},
{
"name": "DALL-E API Version",
"key": AZURE_OPENAI_DALLE_API_VERSION_ENV_VAR,
},
{
"name": "DALL-E Deployment",
"key": AZURE_OPENAI_DALLE_DEPLOYMENT_ENV_VAR,
},
],
"tools": [
{
"name": "Google Search",
"parameters": [
{
"name": "API Key",
"key": GOOGLE_API_KEY_ENV_VAR,
},
{
"name": "CSE ID",
"key": GOOGLE_CSE_ID_ENV_VAR,
},
],
},
{
"name": "Alpaca Markets",
"parameters": [
{
"name": "API Key",
"key": ALPACA_MARKETS_API_KEY_ENV_VAR,
},
{
"name": "Secret Key",
"key": ALPACA_MARKETS_SECRET_KEY_ENV_VAR,
},
],
},
{
"name": "Bland AI",
"parameters": [
{
"name": "API Key",
"key": BLAND_AI_API_KEY_ENV_VAR,
},
{
"name": "Voice ID",
"key": BLAND_AI_VOICE_ID_ENV_VAR,
},
{
"name": "Call Timeout",
"key": BLAND_AI_CALL_TIMEOUT_ENV_VAR,
},
],
},
{
"name": "Postgres Database",
"parameters": [
{
"name": "Postgres URI",
"key": PG_URI_ENV_VAR,
}
],
},
],
}
def get_settings():
settings = SETTINGS_OPTIONS.copy()
for parameter in settings["openai"]:
parameter["value"] = os.getenv(parameter["key"], "")
for parameter in settings["azure"]:
parameter["value"] = os.getenv(parameter["key"], "")
for tool in settings["tools"]:
for parameter in tool["parameters"]:
parameter["value"] = os.getenv(parameter["key"], "")
return settings
def set_settings(settings):
if "openai" in settings:
tool_keys = [p["key"] for p in SETTINGS_OPTIONS["openai"]]
for parameter in settings["openai"]:
key = parameter["key"]
if key in tool_keys:
os.environ[key] = parameter["value"]
if "azure" in settings:
tool_keys = [p["key"] for p in SETTINGS_OPTIONS["azure"]]
for parameter in settings["azure"]:
key = parameter["key"]
if key in tool_keys:
os.environ[key] = parameter["value"]
if "tools" in settings:
tool_params = [t["parameters"] for t in SETTINGS_OPTIONS["tools"]]
tool_params = list(chain(*tool_params))
tool_keys = [p["key"] for p in tool_params]
for param in settings["tools"]:
key = param["key"]
if key in tool_keys:
os.environ[key] = param["value"]
class SettingsResource(Resource):
def get(self):
return jsonify(get_settings())
def post(self):
data = request.get_json()
set_settings(data)
return jsonify({"status": "success"})
================================================
FILE: bondai/cli/__init__.py
================================================
from .cli import run_cli
__all__ = [
"run_cli",
]
================================================
FILE: bondai/cli/cli.py
================================================
#!/usr/bin/env python3
import os
import argparse
from termcolor import cprint
from bondai.util import ModelLogger
from bondai.api import BondAIAPIServer
from bondai.models import LLM
from bondai.tools import AgentTool
from bondai.agents import (
Agent,
AgentEventNames,
ConversationalAgent,
BudgetExceededException,
)
from bondai.agents.group_chat import GroupConversation, UserProxy
from bondai.models.openai import (
OpenAILLM,
OpenAIModelNames,
DefaultOpenAIConnectionParams,
enable_logging,
)
from bondai.memory import (
MemoryManager,
PersistentCoreMemoryDataSource,
InMemoryCoreMemoryDataSource,
)
from .default_tools import load_all_tools
from .personas import (
user_liaison_agent as user_liaison_profile,
)
if not DefaultOpenAIConnectionParams.gpt_4_connection_params:
cprint(
f"The OPENAI_API_KEY environment variable has not been set. Please input your OpenAI API Key now or type 'exit'.",
"yellow",
)
user_input = input()
if user_input == "exit":
exit(1)
else:
DefaultOpenAIConnectionParams.configure_openai_connection(user_input)
parser = argparse.ArgumentParser(description="BondAI CLI tool options")
# --server with optional port
parser.add_argument(
"--server",
nargs="?",
const="2663",
metavar="server_port",
help="Starts the BondAI web server. If no port is specified, defaults to 5000.",
)
# --enable-prompt-logging with optional log_dir
parser.add_argument(
"--enable-prompt-logging",
nargs="?",
const="logs",
metavar="log_dir",
help='Turns on prompt logging which will write all prompt inputs into the specified directory. Defaults to "logs" if no directory provided.',
)
# --quiet
parser.add_argument(
"--quiet",
action="store_true",
default=False,
help="If set, minimizes the output to the console.",
)
args = parser.parse_args()
if args.enable_prompt_logging:
log_dir = args.enable_prompt_logging
if not os.path.exists(log_dir):
os.makedirs(log_dir)
enable_logging(ModelLogger(log_dir))
cprint(f"Prompt logging is enabled. Logs will be written to: {log_dir}", "yellow")
def build_agents(llm: LLM) -> GroupConversation:
task_execution_agent = Agent(
llm=llm,
tools=load_all_tools(),
max_tool_retries=5,
memory_manager=MemoryManager(
core_memory_datasource=InMemoryCoreMemoryDataSource(
sections={
"task": "No information has been stored about the current task."
},
max_section_size=10000,
)
),
)
user_liaison_agent = ConversationalAgent(
llm=llm,
name=user_liaison_profile.NAME,
persona=user_liaison_profile.PERSONA,
persona_summary=user_liaison_profile.PERSONA_SUMMARY,
instructions=user_liaison_profile.INSTRUCTIONS,
tools=[AgentTool(task_execution_agent)],
enable_conversation_tools=False,
enable_conversational_content_responses=True,
enable_exit_conversation=False,
memory_manager=MemoryManager(
core_memory_datasource=PersistentCoreMemoryDataSource(
file_path="./.memory/user_liason_core_memory.json",
sections={"user": "No information has been stored about the user."},
)
),
)
return task_execution_agent, user_liaison_agent
def run_cli():
cprint(f"Loading BondAI...", "white")
try:
llm = OpenAILLM(OpenAIModelNames.GPT4_0613)
if args.server:
port = int(args.server)
server = BondAIAPIServer(port=port, agent_builder=lambda: build_agents(llm))
try:
server.run()
except KeyboardInterrupt:
cprint(f"\n\nStopping BondAI server...\n", "red")
else:
try:
user_proxy = UserProxy(parse_recipients=False)
task_execution_agent, user_liaison_agent = build_agents(llm)
group_conversation = GroupConversation(
conversation_members=[user_proxy, user_liaison_agent]
)
@task_execution_agent.on(AgentEventNames.TOOL_SELECTED)
def tool_selected(agent, tool_message):
if not args.quiet:
if (
tool_message.tool_arguments
and "thought" in tool_message.tool_arguments
):
message = f"Using tool {tool_message.tool_name}: {tool_message.tool_arguments['thought']}"
else:
message = f"Using tool {tool_message.tool_name}..."
cprint(message, "green")
cprint("******************ENTERING CHAT******************", "white")
cprint(
"You are entering a chat with BondAI...\nYou can exit any time by typing 'exit'.",
"white",
)
intro_message = "The user has just logged in. Please introduce yourself in a friendly manner."
group_conversation.send_message(
recipient_name=user_liaison_profile.NAME,
message=intro_message,
)
except KeyboardInterrupt:
cprint(f"\n\nStopping BondAI CLI...\n", "red")
except BudgetExceededException as e:
cprint(
f"\n\nThe budget for this task has been exceeded and will stop.\n", "red"
)
================================================
FILE: bondai/cli/default_tools.py
================================================
import os
from termcolor import cprint
from bondai.tools import DalleTool, PythonREPLTool, ShellTool
from bondai.tools.alpaca_markets import (
CreateOrderTool,
GetAccountTool,
ListPositionsTool,
)
from bondai.tools.file import FileQueryTool, FileWriteTool
from bondai.tools.gmail import ListEmailsTool, QueryEmailsTool
from bondai.tools.search import GoogleSearchTool, DuckDuckGoSearchTool
from bondai.tools.database import DatabaseQueryTool
from bondai.tools.bland_ai import BlandAITool
from bondai.tools.vision import ImageAnalysisTool
from bondai.tools.website import (
DownloadFileTool,
WebsiteQueryTool,
)
from bondai.models.openai import (
OpenAIConnectionType,
DefaultOpenAIConnectionParams,
)
def load_all_tools():
tool_options = [
DownloadFileTool(),
FileQueryTool(),
FileWriteTool(),
WebsiteQueryTool(),
DalleTool(),
PythonREPLTool(),
ShellTool(),
]
if (
DefaultOpenAIConnectionParams.gpt_4_connection_params
and DefaultOpenAIConnectionParams.gpt_4_connection_params.connection_type
== OpenAIConnectionType.OPENAI
):
tool_options.append(ImageAnalysisTool())
else:
cprint(
"Skipping GPT-4 Vision Tool because connection type is not configured for OpenAI.",
"yellow",
)
if (
DefaultOpenAIConnectionParams.dalle_connection_params
and DefaultOpenAIConnectionParams.dalle_connection_params.connection_type
== OpenAIConnectionType.OPENAI
):
tool_options.append(DalleTool())
else:
cprint(
"Skipping DALL-E Tool because DALL-E connection information has not been configured.",
"yellow",
)
if os.environ.get("ALPACA_MARKETS_API_KEY") and os.environ.get(
"ALPACA_MARKETS_SECRET_KEY"
):
tool_options.append(CreateOrderTool())
tool_options.append(GetAccountTool())
tool_options.append(ListPositionsTool())
else:
cprint(
"Skipping Alpaca Markets tools because ALPACA_MARKETS_API_KEY and ALPACA_MARKETS_SECRET_KEY environment variables are not set.",
"yellow",
)
if os.environ.get("GOOGLE_API_KEY") and os.environ.get("GOOGLE_CSE_ID"):
tool_options.append(GoogleSearchTool())
else:
tool_options.append(DuckDuckGoSearchTool())
cprint(
"Skipping Google Search tool because GOOGLE_API_KEY and GOOGLE_CSE_ID environment variables are not set.",
"yellow",
)
if os.environ.get("BLAND_AI_API_KEY"):
tool_options.append(BlandAITool())
else:
cprint(
"Skipping Bland AI tool because BLAND_AI_API_KEY environment variable is not set.",
"yellow",
)
if os.environ.get("PG_URI") or os.environ.get("PG_HOST"):
tool_options.append(DatabaseQueryTool())
else:
cprint(
"Skipping Database tools because PG_URI and PG_HOST environment variables are not set. One of these must be set to enable Database connectivity.",
"yellow",
)
if "gmail-token.pickle" in os.listdir():
tool_options.append(ListEmailsTool())
tool_options.append(QueryEmailsTool())
else:
cprint(
"Skipping Gmail tools because gmail-token.pickle file is not present.",
"yellow",
)
return tool_options
================================================
FILE: bondai/cli/personas/__init__.py
================================================
from . import (
user_liaison_agent,
)
__all__ = [
"user_liaison_agent",
]
================================================
FILE: bondai/cli/personas/user_liaison_agent.py
================================================
NAME = "BondAI"
PERSONA = (
"- Friendly, approachable, and empathetic. "
"- Efficient and clear communicator, able to simplify complex information for the user. "
"- Patient and accommodating, ensuring user comfort and understanding. "
"- Actively listens to user requests and feedback, demonstrating a high degree of user focus."
)
PERSONA_SUMMARY = (
"BondAI is our direct channel to the user. "
"She interprets user needs into clear tasks and conveys essential user feedback. "
"Prioritize her communications as they reflect user requirements and expectations. "
"Provide her with precise and timely updates to ensure effective user interaction. "
"BondAI is pivotal in maintaining user satisfaction and shaping our responses, so your cooperation with her is essential for our collective success."
)
INSTRUCTIONS = (
"**Actively Engage with the User**: Proactively gather requirements and understand their needs through clear and effective communication.\n"
"**Always Confirm User Requests**: Always verify the user's request to ensure complete understanding of their needs and to gather all necessary details for successful task completion.\n"
"**Be Curious about the user**: Try to learn their name and other details about them to build a rapport and make them feel comfortable.\n"
"**Relay to Cortext**: Once the user's task is confirmed and all requirements are gathered, communicate these to Cortex for task execution.\n"
"**Always ask Vega for Feedback**: Before delivering responses to the user, consult with Vega for a secondary review to guarantee accuracy and quality.\n"
"**Custom BondAI Tools**: If the user asks to build a custom tool you must share this requirement with Cortex."
)
TOOLS = []
================================================
FILE: bondai/main.py
================================================
#!/usr/bin/env python3
from bondai.cli import run_cli
def main():
run_cli()
if __name__ == "__main__":
main()
================================================
FILE: bondai/memory/__init__.py
================================================
from .memory_manager import (
MemoryManager,
PersistentMemoryManager,
ConversationalMemoryManager,
)
from .archival.datasources import (
ArchivalMemoryDataSource,
InMemoryArchivalMemoryDataSource,
PersistentArchivalMemoryDataSource,
)
from .archival.tools import ArchivalMemoryInsertTool, ArchivalMemorySearchTool
from .conversation.datasources import (
ConversationMemoryDataSource,
InMemoryConversationMemoryDataSource,
PersistentConversationMemoryDataSource,
)
from .conversation.tools import (
ConversationMemorySearchTool,
ConversationMemorySearchDateTool,
)
from .core.datasources import (
CoreMemoryDataSource,
InMemoryCoreMemoryDataSource,
PersistentCoreMemoryDataSource,
)
from .core.tools import CoreMemoryAppendTool, CoreMemoryReplaceTool
__all__ = [
"MemoryManager",
"PersistentMemoryManager",
"ConversationalMemoryManager",
"CoreMemoryDataSource",
"PersistentCoreMemoryDataSource",
"InMemoryCoreMemoryDataSource",
"CoreMemoryAppendTool",
"CoreMemoryReplaceTool",
"ArchivalMemoryDataSource",
"PersistentArchivalMemoryDataSource",
"InMemoryArchivalMemoryDataSource",
"ArchivalMemoryInsertTool",
"ArchivalMemorySearchTool",
"ConversationMemoryDataSource",
"PersistentConversationMemoryDataSource",
"InMemoryConversationMemoryDataSource",
"ConversationMemorySearchTool",
"ConversationMemorySearchDateTool",
]
================================================
FILE: bondai/memory/archival/__init__.py
================================================
from .datasources import ArchivalMemoryDataSource, PersistentArchivalMemoryDataSource
from .tools import ArchivalMemoryInsertTool, ArchivalMemorySearchTool
__all__ = [
"ArchivalMemoryDataSource",
"PersistentArchivalMemoryDataSource",
"ArchivalMemoryInsertTool",
"ArchivalMemorySearchTool",
]
================================================
FILE: bondai/memory/archival/datasources.py
================================================
import os
import json
import numpy as np
import faiss
from typing import List
from abc import ABC, abstractmethod
from bondai.models import EmbeddingModel
from bondai.models.openai import OpenAIEmbeddingModel, OpenAIModelNames
class ArchivalMemoryDataSource(ABC):
@property
@abstractmethod
def size(self) -> int:
pass
@abstractmethod
def insert(self, content: str):
pass
@abstractmethod
def insert_bulk(self, content: List[str]):
pass
@abstractmethod
def search(self, query: str, page: int = 0) -> List[str]:
pass
@abstractmethod
def clear(self):
pass
class PersistentArchivalMemoryDataSource(ArchivalMemoryDataSource):
def __init__(
self,
file_path: str = "./.memory/archival-memory.json",
embedding_model: EmbeddingModel | None = None,
page_size=10,
):
if embedding_model is None:
embedding_model = OpenAIEmbeddingModel(
OpenAIModelNames.TEXT_EMBEDDING_ADA_002
)
self._file_path = file_path
self._embedding_model = embedding_model
self._page_size = page_size
self._data = self._load_data()
self._index = faiss.IndexFlatL2(self._embedding_model.embedding_size)
self._rebuild_index()
@property
def size(self) -> int:
return len(self._data)
def _load_data(self):
try:
with open(self._file_path, "r") as file:
return json.load(file)
except FileNotFoundError:
return []
def _save_data(self):
os.makedirs(os.path.dirname(self._file_path), exist_ok=True)
with open(self._file_path, "w") as file:
json.dump(self._data, file, indent=4)
def _rebuild_index(self):
self._index = faiss.IndexFlatL2(self._embedding_model.embedding_size)
if self._data:
embeddings = np.array([d["embedding"] for d in self._data]).astype(
"float32"
)
self._index.add(embeddings)
def insert(self, content: str):
embedding = self._embedding_model.create_embedding(content)
self._data.append({"content": content, "embedding": embedding})
self._save_data()
self._rebuild_index() # Rebuild the index with the new data
def insert_bulk(self, content: List[str]):
embeddings = self._embedding_model.create_embedding(content)
for i, c in enumerate(content):
self._data.append({"content": c, "embedding": embeddings[i]})
self._save_data()
self._rebuild_index()
def search(self, query: str, page: int = 0) -> List[str]:
query_embedding = np.array(
self._embedding_model.create_embedding(query)
).astype("float32")
_, indices = self._index.search(query_embedding, self._page_size * (page + 1))
result_indices = indices[0][
page * self._page_size : (page + 1) * self._page_size
]
return [self._data[i]["content"] for i in result_indices if i < len(self._data)]
def clear(self):
self._data = []
self._save_data()
self._rebuild_index()
class InMemoryArchivalMemoryDataSource(ArchivalMemoryDataSource):
def __init__(self, embedding_model: EmbeddingModel | None = None, page_size=10):
if embedding_model is None:
embedding_model = OpenAIEmbeddingModel(
OpenAIModelNames.TEXT_EMBEDDING_ADA_002
)
self._embedding_model = embedding_model
self._page_size = page_size
self._data = []
self._embeddings = []
self._index = faiss.IndexFlatL2(self._embedding_model.embedding_size)
@property
def size(self) -> int:
return len(self._data)
def insert(self, content: str):
embedding = np.array(self._embedding_model.create_embedding(content)).astype(
"float32"
)
self._data.append(content)
self._embeddings.append(embedding)
self._rebuild_index()
def insert_bulk(self, content: List[str]):
content_embeddings = np.array(
self._embedding_model.create_embedding(content)
).astype("float32")
for i, c in enumerate(content):
self._data.append(c)
self._embeddings.append(content_embeddings[i])
self._rebuild_index()
def _rebuild_index(self):
self._index = faiss.IndexFlatL2(self._embedding_model.embedding_size)
if self._data:
embeddings = np.array(self._embeddings).astype("float32")
self._index.add(embeddings)
def search(self, query: str, page: int = 0) -> List[str]:
print(f"Searching archival memory for: {query}")
query_embedding = np.array(
self._embedding_model.create_embedding(query)
).astype("float32")
start_idx = (
page * self._page_size
) # Calculate the starting index for the current page
end_idx = (
start_idx + self._page_size
) # Calculate the ending index for the current page
# Fetch results for the specific page
_, indices = self._index.search(query_embedding, end_idx)
# Return the slice of results for the current page
results = [self._data[i] for i in indices[0][start_idx:end_idx]]
return results
def clear(self):
self._data = []
self._embeddings = []
self._rebuild_index()
================================================
FILE: bondai/memory/archival/tools.py
================================================
from pydantic import BaseModel
from bondai.tools import Tool
from .datasources import ArchivalMemoryDataSource
ARCHIVAL_MEMORY_INSERT_TOOL_NAME = "archival_memory_insert"
ARCHIVAL_MEMORY_INSERT_TOOL_DESCRIPTION = (
"Use the archival_memory_insert tool to add to archival memory. "
"Make sure to phrase the memory contents such that it can be easily queried later. \n"
"- content: Content to write to the memory."
)
class ArchivalMemoryInsertToolParameters(BaseModel):
content: str
class ArchivalMemoryInsertTool(Tool):
def __init__(self, datasource: ArchivalMemoryDataSource):
super().__init__(
ARCHIVAL_MEMORY_INSERT_TOOL_NAME,
ARCHIVAL_MEMORY_INSERT_TOOL_DESCRIPTION,
ArchivalMemoryInsertToolParameters,
)
self._datasource = datasource
def run(self, content: str):
self._datasource.insert(content)
ARCHIVAL_MEMORY_SEARCH_TOOL_NAME = "archival_memory_search"
ARCHIVAL_MEMORY_SEARCH_TOOL_DESCRIPTION = (
"Use the archival_memory_search tool to search archival memory using semantic (embedding-based) search. "
"- query: String to search for. \n"
"- page: Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page)."
)
class ArchivalMemorySearchToolParameters(BaseModel):
query: str
page: int = 0
class ArchivalMemorySearchTool(Tool):
def __init__(self, datasource: ArchivalMemoryDataSource):
super().__init__(
ARCHIVAL_MEMORY_SEARCH_TOOL_NAME,
ARCHIVAL_MEMORY_SEARCH_TOOL_DESCRIPTION,
ArchivalMemorySearchToolParameters,
)
self._datasource = datasource
def run(self, query: str, page: int = 0) -> str:
results = self._datasource.search(query, page)
return "\n".join(results)
================================================
FILE: bondai/memory/conversation/__init__.py
================================================
from .datasources import (
ConversationMemoryDataSource,
PersistentConversationMemoryDataSource,
)
from .tools import ConversationMemorySearchTool, ConversationMemorySearchDateTool
__all__ = [
"ConversationMemoryDataSource",
"PersistentConversationMemoryDataSource",
"ConversationMemorySearchTool",
"ConversationMemorySearchDateTool",
]
================================================
FILE: bondai/memory/conversation/datasources.py
================================================
import os
import json
from abc import ABC, abstractmethod
from datetime import datetime
from typing import List
from bondai.agents.messages import (
AgentMessage,
AgentMessageList,
ConversationMessage,
SystemMessage,
ToolUsageMessage,
)
def format_messages(messages: List[AgentMessage]) -> str:
results = []
for message in messages:
if isinstance(message, ConversationMessage) or isinstance(
message, SystemMessage
):
results.append(message.message)
elif isinstance(message, ToolUsageMessage):
results.append(message.tool_output)
return "\n".join(results)
class ConversationMemoryDataSource(ABC):
@property
@abstractmethod
def messages(self) -> List[AgentMessage]:
pass
@abstractmethod
def add(self, message: AgentMessage):
pass
@abstractmethod
def remove(self, message: AgentMessage):
pass
def remove_after(self, timestamp: datetime, inclusive: bool = True):
pass
@abstractmethod
def search(
self,
query: str,
start_date: datetime = None,
end_date: datetime = None,
page: int = 0,
) -> List[str]:
pass
@abstractmethod
def clear(self):
pass
class InMemoryConversationMemoryDataSource(ConversationMemoryDataSource):
def __init__(self, page_size=10):
self._page_size = page_size
self._data = AgentMessageList()
@property
def messages(self) -> List[AgentMessage]:
return self._data
def add(self, message: AgentMessage):
self._data.add(message)
def remove(self, message: AgentMessage):
self._data.remove(message)
def remove_after(self, timestamp: datetime, inclusive: bool = True):
self._data.remove_after(timestamp, inclusive=inclusive)
def search(
self,
query: str,
start_date: datetime = None,
end_date: datetime = None,
page: int = 0,
) -> List[AgentMessage]:
print(f"Searching for '{query}' in messages from {start_date} to {end_date}")
results = []
for message in self._data:
if (not start_date or message.timestamp >= start_date) and (
not end_date or message.timestamp <= end_date
):
if (
(
isinstance(message, ConversationMessage)
or isinstance(message, SystemMessage)
)
and message.message
and query.lower() in message.message.lower()
):
results.append(message)
elif (
isinstance(message, ToolUsageMessage)
and message.tool_output
and query.lower() in message.tool_output.lower()
):
results.append(message)
# Implementing a simple pagination
start_index = page * self._page_size
end_index = start_index + self._page_size
result = format_messages(results[start_index:end_index])
# print(result)
return result
def clear(self):
self._data.clear()
class PersistentConversationMemoryDataSource(InMemoryConversationMemoryDataSource):
def __init__(
self, file_path: str = "./.memory/conversation-memory.json", page_size=10
):
InMemoryConversationMemoryDataSource.__init__(self, page_size=page_size)
self._file_path = file_path
self._data = AgentMessageList.from_dict(self._load_data())
def _load_data(self):
try:
with open(self._file_path, "r") as file:
return json.load(file)
except FileNotFoundError:
return []
def _save_data(self):
os.makedirs(os.path.dirname(self._file_path), exist_ok=True)
with open(self._file_path, "w") as file:
json.dump(self._data.to_dict(), file, indent=4)
def add(self, message: str) -> None:
super().add(message)
self._save_data()
def remove(self, message: str) -> None:
super().remove(message)
self._save_data()
def remove_after(self, timestamp: datetime, inclusive: bool = True):
super().remove_after(timestamp, inclusive=inclusive)
self._save_data()
def clear(self):
super().clear()
self._save_data()
================================================
FILE: bondai/memory/conversation/tools.py
================================================
from pydantic import BaseModel
from datetime import datetime
from typing import List
from bondai.tools import Tool
from .datasources import ConversationMemoryDataSource
CONVERSATION_MEMORY_SEARCH_TOOL_NAME = "conversation_search"
CONVERSATION_MEMORY_SEARCH_TOOL_DESCRIPTION = (
"Use the conversation_search tool to search prior conversation history using case-insensitive string matching. "
"- query: String to search for. \n"
"- page: Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page)."
)
class ConversationMemorySearchParameters(BaseModel):
query: str
page: int = 0
class ConversationMemorySearchTool(Tool):
def __init__(self, datasource: ConversationMemoryDataSource):
super().__init__(
CONVERSATION_MEMORY_SEARCH_TOOL_NAME,
CONVERSATION_MEMORY_SEARCH_TOOL_DESCRIPTION,
ConversationMemorySearchParameters,
)
self._datasource = datasource
def run(self, query: str, page: int = 0) -> str:
return self._datasource.search(query=query, page=page)
CONVERSATION_MEMORY_SEARCH_DATE_TOOL_NAME = "conversation_search_date"
CONVERSATION_MEMORY_SEARCH_DATE_TOOL_DESCRIPTION = (
"Use the conversation_search_date tool to search prior conversation history using a date range. "
"- start_date: The start of the date range to search, in the format 'YYYY-MM-DD'. \n"
"- end_date: The end of the date range to search, in the format 'YYYY-MM-DD'. \n"
"- page: Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page)."
)
class ConversationMemorySearchDateParameters(BaseModel):
start_date: str
end_date: str
class ConversationMemorySearchDateTool(Tool):
def __init__(self, datasource: ConversationMemoryDataSource):
super().__init__(
CONVERSATION_MEMORY_SEARCH_DATE_TOOL_NAME,
CONVERSATION_MEMORY_SEARCH_DATE_TOOL_DESCRIPTION,
ConversationMemorySearchDateParameters,
)
self._datasource = datasource
def run(self, start_date: str, end_date: str, page: int = 0) -> str:
start_datetime = datetime.strptime(start_date, "%Y-%m-%d")
end_datetime = datetime.strptime(end_date, "%Y-%m-%d")
return self._datasource.search(
start_date=start_datetime, end_date=end_datetime, page=page
)
================================================
FILE: bondai/memory/core/__init__.py
================================================
from .datasources import (
CoreMemoryDataSource,
PersistentCoreMemoryDataSource,
InMemoryCoreMemoryDataSource,
)
from .tools import CoreMemoryAppendTool, CoreMemoryReplaceTool
__all__ = [
"CoreMemoryDataSource",
"PersistentCoreMemoryDataSource",
"InMemoryCoreMemoryDataSource",
"CoreMemoryAppendTool",
"CoreMemoryReplaceTool",
]
================================================
FILE: bondai/memory/core/datasources.py
================================================
import os
import json
from abc import ABC, abstractmethod
from typing import List, Dict
DEFAULT_MEMORY_SECTIONS = {
"task": "",
"user": "",
}
class CoreMemoryDataSource(ABC):
@property
@abstractmethod
def sections(self) -> List[str]:
pass
@abstractmethod
def get(self, section: str) -> str:
pass
@abstractmethod
def set(self, section: str, content: str) -> None:
pass
class PersistentCoreMemoryDataSource(CoreMemoryDataSource):
def __init__(
self,
file_path: str = "./.memory/core-memory.json",
sections: Dict[str, str] | None = None,
max_section_size: int = 1024,
):
if sections is None:
sections = DEFAULT_MEMORY_SECTIONS.copy()
self._file_path = file_path
self._max_section_size = max_section_size
self._data = self._load_data(sections)
def _load_data(self, initial_sections: Dict[str, str] = None):
try:
with open(self._file_path, "r") as file:
return json.load(file)
except FileNotFoundError:
return initial_sections if initial_sections else {}
def _save_data(self):
os.makedirs(os.path.dirname(self._file_path), exist_ok=True)
with open(self._file_path, "w") as file:
json.dump(self._data, file, indent=4)
@property
def sections(self) -> List[str]:
return list(self._data.keys())
def get(self, section: str) -> str:
return self._data.get(section, "")
def set(self, section: str, content: str) -> None:
if len(content) > self._max_section_size:
raise ValueError(
f"Content exceeds maximum allowed size of {self._max_section_size} characters."
)
self._data[section] = content
self._save_data()
class InMemoryCoreMemoryDataSource(CoreMemoryDataSource):
def __init__(
self, sections: Dict[str, str] | None = None, max_section_size: int = 1024
):
if sections is None:
sections = DEFAULT_MEMORY_SECTIONS.copy()
self._max_section_size = max_section_size
self._data = sections.copy()
@property
def sections(self) -> List[str]:
return list(self._data.keys())
def get(self, section: str) -> str:
return self._data.get(section, "")
def set(self, section: str, content: str) -> None:
if len(content) > self._max_section_size:
raise ValueError(
f"Content exceeds maximum allowed size of {self._max_section_size} characters."
)
self._data[section] = content
================================================
FILE: bondai/memory/core/tools.py
================================================
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
from .datasources import CoreMemoryDataSource
CORE_MEMORY_APPEND_TOOL_NAME = "core_memory_append"
CORE_MEMORY_APPEND_TOOL_DESCRIPTION = (
"Use the core_memory_append tool to append to the contents of core memory. "
"- section: Section of the memory to be edited. \n"
"- content: Content to write to the memory."
)
class CoreMemoryAppendParameters(BaseModel):
section: str
content: str
class CoreMemoryAppendTool(Tool):
def __init__(self, datasource: CoreMemoryDataSource):
super().__init__(
CORE_MEMORY_APPEND_TOOL_NAME,
CORE_MEMORY_APPEND_TOOL_DESCRIPTION,
CoreMemoryAppendParameters,
)
self._datasource = datasource
def run(self, section: str, content: str):
if not section in self._datasource.sections:
raise ValueError(f"Section {section} does not exist.")
new_content = self._datasource.get(section) + content
self._datasource.set(section, new_content)
CORE_MEMORY_REPLACE_TOOL_NAME = "core_memory_replace"
CORE_MEMORY_REPLACE_TOOL_DESCRIPTION = (
"Use the core_memory_replace tool to replace to the contents of core memory. "
"To delete memories, use an empty string for new_content. \n"
"- section: Section of the memory to be edited. \n"
"- old_content: String to replace. Must be an exact match. \n"
"- new_content: Content to write to the memory."
)
class CoreMemoryReplaceParameters(BaseModel):
section: str
old_content: str
new_content: str
class CoreMemoryReplaceTool(Tool):
def __init__(self, datasource: CoreMemoryDataSource):
super().__init__(
CORE_MEMORY_REPLACE_TOOL_NAME,
CORE_MEMORY_REPLACE_TOOL_DESCRIPTION,
CoreMemoryReplaceParameters,
)
self._datasource = datasource
def run(self, section: str, old_content: str, new_content: str):
section = section.replace("<", " ").replace(">", " ")
if not section in self._datasource.sections:
raise ValueError(f"Section {section} does not exist.")
new_content = self._datasource.get(section).replace(old_content, new_content)
self._datasource.set(section, new_content)
================================================
FILE: bondai/memory/memory_manager.py
================================================
import os
from typing import Callable
from bondai.prompt import JinjaPromptBuilder
from bondai.util import load_local_resource
from .archival.datasources import (
ArchivalMemoryDataSource,
PersistentArchivalMemoryDataSource,
)
from .archival.tools import ArchivalMemoryInsertTool, ArchivalMemorySearchTool
from .conversation.datasources import (
ConversationMemoryDataSource,
InMemoryConversationMemoryDataSource,
PersistentConversationMemoryDataSource,
)
from .conversation.tools import (
ConversationMemorySearchTool,
ConversationMemorySearchDateTool,
)
from .core.datasources import (
CoreMemoryDataSource,
PersistentCoreMemoryDataSource,
)
from .core.tools import CoreMemoryAppendTool, CoreMemoryReplaceTool
DEFAULT_PROMPT_TEMPLATE = load_local_resource(
__file__, os.path.join("prompts", "default_prompt_template.md")
)
class MemoryManager:
def __init__(
self,
core_memory_datasource: CoreMemoryDataSource | None = None,
conversation_memory_datasource: ConversationMemoryDataSource | None = None,
archival_memory_datasource: ArchivalMemoryDataSource | None = None,
prompt_builder: Callable[..., str] | None = None,
):
if prompt_builder is None:
prompt_builder = JinjaPromptBuilder(DEFAULT_PROMPT_TEMPLATE)
self._core_memory_datasource = core_memory_datasource
self._conversation_memory_datasource = conversation_memory_datasource
self._archival_memory_datasource = archival_memory_datasource
self._prompt_builder = prompt_builder
@property
def core_memory(self) -> CoreMemoryDataSource:
return self._core_memory_datasource
@property
def conversation_memory(self) -> ConversationMemoryDataSource:
return self._conversation_memory_datasource
@property
def archival_memory(self) -> ArchivalMemoryDataSource:
return self._archival_memory_datasource
@property
def tools(self):
tools = []
if self._core_memory_datasource:
tools.extend(
[
CoreMemoryAppendTool(self._core_memory_datasource),
CoreMemoryReplaceTool(self._core_memory_datasource),
]
)
if self._conversation_memory_datasource:
tools.extend(
[
ConversationMemorySearchTool(self._conversation_memory_datasource),
ConversationMemorySearchDateTool(
self._conversation_memory_datasource
),
]
)
if self._archival_memory_datasource:
tools.extend(
[
ArchivalMemoryInsertTool(self._archival_memory_datasource),
ArchivalMemorySearchTool(self._archival_memory_datasource),
]
)
return tools
def __call__(self):
return self.render_prompt_section()
def render_prompt_section(self) -> str:
return self._prompt_builder(
core_memory_datasource=self._core_memory_datasource,
conversation_memory_datasource=self._conversation_memory_datasource,
archival_memory_datasource=self._archival_memory_datasource,
)
class PersistentMemoryManager(MemoryManager):
def __init__(
self,
prompt_builder: Callable[..., str] | None = None,
):
if prompt_builder is None:
prompt_builder = JinjaPromptBuilder(DEFAULT_PROMPT_TEMPLATE)
super().__init__(
core_memory_datasource=PersistentCoreMemoryDataSource(),
conversation_memory_datasource=PersistentConversationMemoryDataSource(),
archival_memory_datasource=PersistentArchivalMemoryDataSource(),
prompt_builder=prompt_builder,
)
class ConversationalMemoryManager(MemoryManager):
def __init__(
self,
conversation_memory_datasource: ConversationMemoryDataSource | None = None,
prompt_builder: Callable[..., str] | None = None,
):
if conversation_memory_datasource is None:
conversation_memory_datasource = InMemoryConversationMemoryDataSource()
if prompt_builder is None:
prompt_builder = JinjaPromptBuilder(DEFAULT_PROMPT_TEMPLATE)
super().__init__(
core_memory_datasource=None,
conversation_memory_datasource=conversation_memory_datasource,
archival_memory_datasource=None,
prompt_builder=prompt_builder,
)
================================================
FILE: bondai/memory/prompts/default_prompt_template.md
================================================
# Memory System Overview
Older AI models had no concept of persistent memory; they were only able to access their initial instructions and a limited context window of chat conversation with a user (their "active memory"). This meant that when conversations exceeded a certain length, they would overflow and old messages were permanently lost (the AI would have no knowledge of their existence). Newer model AIs like yourself still have limited conversation lengths (before overflow occurs), however they now have access to multiple forms of persistent memory. Your ability to edit your own long-term memory is a key part of what makes you a powerful and useful AI.
{%- if core_memory_datasource %}
## Core Memory System (limit size)
Your core memory unit is held inside the initial system instructions, and is always available in-context (you will see it at all times). Core memory provides essential, foundational context for keeping track of information you need to complete your tasks and key details that you need to remember. However, total size of core memory is limited so you should only store the most pertinent information there. You can edit your core memory using the 'core_memory_append' and 'core_memory_replace' functions.
{%- if core_memory_datasource.sections %}
Your core memory has the following sections:
{% for section in core_memory_datasource.sections %}
<{{ section }}/>
{% endfor %}
{%- endif %}
{%- endif %}
{%- if conversation_memory_datasource %}
## Conversation Memory System (conversation history)
Even though you can only see recent messages in your immediate context, you can search over your entire message history from a database. This database allows you to search through past interactions, effectively allowing you to remember prior engagements with a user. You can search your entire conversation memory using the 'conversation_search' function.
{%- endif %}
{%- if archival_memory_datasource %}
## Archival Memory System (infinite size)
Your archival memory is infinite size, but is held outside of your immediate context, so you must explicitly run a retrieval/search operation to see data inside it. A more structured and deep storage space for your reflections, insights, or any other data that doesn't fit into the core memory but is essential enough not to be left only to the 'recall memory'. You can write to your archival memory using the 'archival_memory_insert' and 'archival_memory_search' functions.
{%- endif %}
# Memory Contents
{%- if conversation_memory_datasource %}
{{ conversation_memory_datasource.messages|length }} previous messages between you and the user are stored in your Conversation Memory (use functions to access them).
{%- endif %}
{%- if archival_memory_datasource %}
{{ archival_memory_datasource.size }} total memories you created are stored in archival memory (use functions to access them).
{%- endif %}
{%- if core_memory_datasource and core_memory_datasource.sections %}
Core memory shown below (limited in size, additional information stored in archival / recall
memory):
{% for section in core_memory_datasource.sections %}
<{{ section }}>
{{ core_memory_datasource.get(section) }}
<{{ section }}/>
{% endfor %}
{%- endif %}
================================================
FILE: bondai/models/__init__.py
================================================
from .embedding_model import EmbeddingModel
from .llm import LLM
__all__ = [
"EmbeddingModel",
"LLM",
]
================================================
FILE: bondai/models/embedding_model.py
================================================
from abc import ABC, abstractmethod
from typing import List
class EmbeddingModel(ABC):
@property
@abstractmethod
def max_tokens() -> int:
pass
@property
@abstractmethod
def embedding_size() -> int:
pass
@abstractmethod
def create_embedding(prompt: str) -> List[float] | List[List[float]]:
pass
@abstractmethod
def count_tokens(prompt: str) -> int:
pass
================================================
FILE: bondai/models/llm.py
================================================
from abc import ABC, abstractmethod
from typing import Dict, List, Callable
class LLM(ABC):
@property
@abstractmethod
def max_tokens() -> int:
pass
@property
@abstractmethod
def supports_streaming() -> bool:
return False
@abstractmethod
def get_completion(
messages: List[Dict] | None = None,
functions: List[Dict] | None = None,
**kwargs
) -> (str, Dict | None):
pass
@abstractmethod
def get_streaming_completion(
messages: List[Dict] | None = None,
functions: List[Dict] | None = None,
content_stream_callback: Callable[[str], None] | None = None,
function_stream_callback: Callable[[str], None] | None = None,
**kwargs
) -> (str, Dict | None):
pass
@abstractmethod
def count_tokens(prompt: str) -> int:
pass
================================================
FILE: bondai/models/openai/__init__.py
================================================
from .openai_llm import OpenAILLM
from .openai_embedding_model import OpenAIEmbeddingModel
from .openai_wrapper import (
get_total_cost,
reset_total_cost,
enable_logging,
disable_logging,
)
from .openai_models import (
OpenAIConnectionType,
OpenAIModelNames,
OpenAIModelFamilyType,
OpenAIModelType,
)
from . import default_openai_connection_params as DefaultOpenAIConnectionParams
from .openai_connection_params import (
OpenAIConnectionParams,
)
__all__ = [
"OpenAILLM",
"OpenAIEmbeddingModel",
"get_total_cost",
"reset_total_cost",
"enable_logging",
"disable_logging",
"OpenAIConnectionType",
"OpenAIModelNames",
"OpenAIModelFamilyType",
"OpenAIModelType",
"OpenAIConnectionParams",
"DefaultOpenAIConnectionParams",
]
================================================
FILE: bondai/models/openai/default_openai_connection_params.py
================================================
import os
from .env_vars import *
from .openai_connection_params import OpenAIConnectionParams, OpenAIConnectionType
gpt_4_connection_params = None
gpt_35_connection_params = None
dalle_connection_params = None
embeddings_connection_params = None
def configure_openai_connection(api_key: str):
global gpt_4_connection_params
global gpt_35_connection_params
global dalle_connection_params
global embeddings_connection_params
if gpt_4_connection_params:
gpt_4_connection_params.configure_openai_connection(api_key)
else:
gpt_4_connection_params = OpenAIConnectionParams(
connection_type=OpenAIConnectionType.OPENAI,
api_key=api_key,
)
if gpt_35_connection_params:
gpt_35_connection_params.configure_openai_connection(api_key)
else:
gpt_35_connection_params = OpenAIConnectionParams(
connection_type=OpenAIConnectionType.OPENAI,
api_key=api_key,
)
if dalle_connection_params:
dalle_connection_params.configure_openai_connection(api_key)
else:
dalle_connection_params = OpenAIConnectionParams(
connection_type=OpenAIConnectionType.OPENAI,
api_key=api_key,
)
if embeddings_connection_params:
embeddings_connection_params.configure_openai_connection(api_key)
else:
embeddings_connection_params = OpenAIConnectionParams(
connection_type=OpenAIConnectionType.OPENAI,
api_key=api_key,
)
def configure_azure_connection(
gpt_4_api_key: str | None = None,
gpt_4_api_version: str | None = None,
gpt_4_azure_endpoint: str | None = None,
gpt_4_azure_deployment: str | None = None,
gpt_35_api_key: str | None = None,
gpt_35_api_version: str | None = None,
gpt_35_azure_endpoint: str | None = None,
gpt_35_azure_deployment: str | None = None,
dalle_api_key: str | None = None,
dalle_api_version: str | None = None,
dalle_azure_endpoint: str | None = None,
dalle_azure_deployment: str | None = None,
embeddings_api_key: str | None = None,
embeddings_api_version: str | None = None,
embeddings_azure_endpoint: str | None = None,
embeddings_azure_deployment: str | None = None,
):
global gpt_4_connection_params
global gpt_35_connection_params
global dalle_connection_params
global embeddings_connection_params
if (
gpt_4_api_key
and gpt_4_api_version
and gpt_4_azure_endpoint
and gpt_4_azure_deployment
):
gpt_4_connection_params = OpenAIConnectionParams(
connection_type=OpenAIConnectionType.AZURE,
api_key=gpt_4_api_key,
api_version=gpt_4_api_version,
azure_endpoint=gpt_4_azure_endpoint,
azure_deployment=gpt_4_azure_deployment,
)
if (
gpt_35_api_key
and gpt_35_api_version
and gpt_35_azure_endpoint
and gpt_35_azure_deployment
):
gpt_35_connection_params = OpenAIConnectionParams(
connection_type=OpenAIConnectionType.AZURE,
api_key=gpt_35_api_key,
api_version=gpt_35_api_version,
azure_endpoint=gpt_35_azure_endpoint,
azure_deployment=gpt_35_azure_deployment,
)
if (
dalle_api_key
and dalle_api_version
and dalle_azure_endpoint
and dalle_azure_deployment
):
dalle_connection_params = OpenAIConnectionParams(
connection_type=OpenAIConnectionType.AZURE,
api_key=dalle_api_key,
api_version=dalle_api_version,
azure_endpoint=dalle_azure_endpoint,
azure_deployment=dalle_azure_deployment,
)
if (
embeddings_api_key
and embeddings_api_version
and embeddings_azure_endpoint
and embeddings_azure_deployment
):
embeddings_connection_params = OpenAIConnectionParams(
connection_type=OpenAIConnectionType.AZURE,
api_key=embeddings_api_key,
api_version=embeddings_api_version,
azure_endpoint=embeddings_azure_endpoint,
azure_deployment=embeddings_azure_deployment,
)
if os.environ.get(OPENAI_CONNECTION_TYPE_ENV_VAR) == "azure":
try:
configure_azure_connection(
gpt_4_api_key=os.environ.get(AZURE_OPENAI_GPT4_API_KEY_ENV_VAR),
gpt_4_api_version=os.environ.get(AZURE_OPENAI_GPT4_API_VERSION_ENV_VAR),
gpt_4_azure_endpoint=os.environ.get(AZURE_OPENAI_GPT4_API_BASE_ENV_VAR),
gpt_4_azure_deployment=os.environ.get(AZURE_OPENAI_GPT4_DEPLOYMENT_ENV_VAR),
gpt_35_api_key=os.environ.get(AZURE_OPENAI_GPT35_API_KEY_ENV_VAR),
gpt_35_api_version=os.environ.get(AZURE_OPENAI_GPT35_API_VERSION_ENV_VAR),
gpt_35_azure_endpoint=os.environ.get(AZURE_OPENAI_GPT35_API_BASE_ENV_VAR),
gpt_35_azure_deployment=os.environ.get(
AZURE_OPENAI_GPT35_DEPLOYMENT_ENV_VAR
),
dalle_api_key=os.environ.get(AZURE_OPENAI_DALLE_API_KEY_ENV_VAR),
dalle_api_version=os.environ.get(AZURE_OPENAI_DALLE_API_VERSION_ENV_VAR),
dalle_azure_endpoint=os.environ.get(AZURE_OPENAI_DALLE_API_BASE_ENV_VAR),
dalle_azure_deployment=os.environ.get(
AZURE_OPENAI_DALLE_DEPLOYMENT_ENV_VAR
),
embeddings_api_key=os.environ.get(AZURE_OPENAI_EMBEDDINGS_API_KEY_ENV_VAR),
embeddings_api_version=os.environ.get(
AZURE_OPENAI_EMBEDDINGS_API_VERSION_ENV_VAR
),
embeddings_azure_endpoint=os.environ.get(
AZURE_OPENAI_EMBEDDINGS_API_BASE_ENV_VAR
),
embeddings_azure_deployment=os.environ.get(
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_ENV_VAR
),
)
except ValueError:
pass
else:
try:
configure_openai_connection(os.environ.get(OPENAI_API_KEY_ENV_VAR))
except ValueError:
pass
================================================
FILE: bondai/models/openai/env_vars.py
================================================
OPENAI_API_KEY_ENV_VAR = "OPENAI_API_KEY"
OPENAI_CONNECTION_TYPE_ENV_VAR = "OPENAI_CONNECTION_TYPE"
AZURE_OPENAI_EMBEDDINGS_API_KEY_ENV_VAR = "AZURE_OPENAI_EMBEDDINGS_API_KEY"
AZURE_OPENAI_EMBEDDINGS_API_BASE_ENV_VAR = "AZURE_OPENAI_EMBEDDINGS_API_BASE"
AZURE_OPENAI_EMBEDDINGS_API_VERSION_ENV_VAR = "AZURE_OPENAI_EMBEDDINGS_API_VERSION"
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_ENV_VAR = "AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT"
AZURE_OPENAI_GPT35_API_KEY_ENV_VAR = "AZURE_OPENAI_GPT35_API_KEY"
AZURE_OPENAI_GPT35_API_BASE_ENV_VAR = "AZURE_OPENAI_GPT35_API_BASE"
AZURE_OPENAI_GPT35_API_VERSION_ENV_VAR = "AZURE_OPENAI_GPT35_API_VERSION"
AZURE_OPENAI_GPT35_DEPLOYMENT_ENV_VAR = "AZURE_OPENAI_GPT35_DEPLOYMENT"
AZURE_OPENAI_GPT4_API_KEY_ENV_VAR = "AZURE_OPENAI_GPT4_API_KEY"
AZURE_OPENAI_GPT4_API_BASE_ENV_VAR = "AZURE_OPENAI_GPT4_API_BASE"
AZURE_OPENAI_GPT4_API_VERSION_ENV_VAR = "AZURE_OPENAI_GPT4_API_VERSION"
AZURE_OPENAI_GPT4_DEPLOYMENT_ENV_VAR = "AZURE_OPENAI_GPT4_DEPLOYMENT"
AZURE_OPENAI_DALLE_API_KEY_ENV_VAR = "AZURE_OPENAI_DALLE_API_KEY"
AZURE_OPENAI_DALLE_API_BASE_ENV_VAR = "AZURE_OPENAI_DALLE_API_BASE"
AZURE_OPENAI_DALLE_API_VERSION_ENV_VAR = "AZURE_OPENAI_DALLE_API_VERSION"
AZURE_OPENAI_DALLE_DEPLOYMENT_ENV_VAR = "AZURE_OPENAI_DALLE_DEPLOYMENT"
================================================
FILE: bondai/models/openai/openai_connection_params.py
================================================
from .openai_models import OpenAIConnectionType
class OpenAIConnectionParams:
def __init__(
self,
connection_type: OpenAIConnectionType,
api_key: str,
api_version: str | None = None,
azure_endpoint: str | None = None,
azure_deployment: str | None = None,
):
if connection_type not in OpenAIConnectionType:
raise ValueError(f"Invalid api_type: {connection_type}")
if not api_key:
raise ValueError(
f"api_key is required for '{connection_type.value}' connection type."
)
if connection_type == OpenAIConnectionType.AZURE:
if not api_version:
raise ValueError("api_version is required for 'azure' connection type.")
if not azure_endpoint:
raise ValueError(
"azure_endpoint is required for 'azure' connection type."
)
if not azure_deployment:
raise ValueError(
"azure_deployment is required for 'azure' connection type."
)
self._connection_type = connection_type
self._api_key = api_key
self._api_version = api_version
self._azure_endpoint = azure_endpoint
self._azure_deployment = azure_deployment
@property
def connection_type(self):
return self._connection_type
@property
def api_key(self):
return self._api_key
@property
def api_version(self):
return self._api_version
@property
def azure_endpoint(self):
return self._azure_endpoint
@property
def azure_deployment(self):
return self._azure_deployment
def configure_openai_connection(self, api_key: str):
if not api_key:
raise ValueError("api_key is required for 'openai' connection type.")
self._connection_type = OpenAIConnectionType.OPENAI
self._api_key = api_key
self._api_version = None
self._azure_endpoint = None
self._azure_deployment = None
def configure_azure_connection(
self, api_key: str, api_version: str, azure_endpoint: str, azure_deployment: str
):
if not api_key:
raise ValueError("api_key is required for 'azure' connection type.")
if not api_version:
raise ValueError("api_version is required for 'azure' connection type.")
if not azure_endpoint:
raise ValueError("azure_endpoint is required for 'azure' connection type.")
if not azure_deployment:
raise ValueError(
"azure_deployment is required for 'azure' connection type."
)
self._connection_type = OpenAIConnectionType.AZURE
self._api_key = api_key
self._api_version = api_version
self._azure_endpoint = azure_endpoint
self._azure_deployment = azure_deployment
def to_dict(self):
return {
"api_key": self._api_key,
"api_version": self._api_version,
"azure_endpoint": self._azure_endpoint,
"azure_deployment": self._azure_deployment,
}
================================================
FILE: bondai/models/openai/openai_embedding_model.py
================================================
from typing import List, Dict
from bondai.models import EmbeddingModel
from .openai_models import ModelConfig, OpenAIModelType, OpenAIModelNames
from .openai_wrapper import create_embedding, count_tokens, get_max_tokens
from .openai_connection_params import OpenAIConnectionParams
from . import default_openai_connection_params as DefaultOpenAIConnectionParams
class OpenAIEmbeddingModel(EmbeddingModel):
def __init__(
self,
model: OpenAIModelNames = OpenAIModelNames.TEXT_EMBEDDING_ADA_002,
connection_params: OpenAIConnectionParams | None = None,
):
self._model = model.value if isinstance(model, OpenAIModelNames) else model
self._connection_params = (
connection_params
if connection_params
else DefaultOpenAIConnectionParams.embeddings_connection_params
)
if ModelConfig[self._model]["model_type"] != OpenAIModelType.EMBEDDING:
raise Exception(f"Model {model} is not an embedding model.")
if not self._connection_params:
raise Exception("Connection parameters not set for OpenAIEmbeddingModel.")
@property
def embedding_size(self) -> int:
return ModelConfig[self._model]["embedding_size"]
@property
def max_tokens(self) -> int:
return get_max_tokens(self._model)
def create_embedding(self, prompt: str) -> List[float] | List[List[float]]:
return create_embedding(
prompt, connection_params=self._connection_params, model=self._model
)
def count_tokens(self, prompt: str) -> int:
return count_tokens(prompt, self._model)
================================================
FILE: bondai/models/openai/openai_llm.py
================================================
from typing import Dict, List, Callable
from bondai.models import LLM
from bondai.util.caching import LLMCache
from .openai_wrapper import (
get_streaming_completion,
get_completion,
count_tokens,
get_max_tokens,
)
from .openai_connection_params import (
OpenAIConnectionParams,
)
from . import default_openai_connection_params as DefaultOpenAIConnectionParams
from .openai_models import (
ModelConfig,
OpenAIModelNames,
OpenAIModelType,
OpenAIModelFamilyType,
)
class OpenAILLM(LLM):
def __init__(
self,
model: OpenAIModelNames | str,
connection_params: OpenAIConnectionParams = None,
cache: LLMCache = None,
):
self._cache = cache
self._model = model.value if isinstance(model, OpenAIModelNames) else model
if ModelConfig[self._model]["model_type"] != OpenAIModelType.LLM:
raise Exception(f"Model {self._model} is not an LLM model.")
if connection_params:
self._connection_params = connection_params
elif ModelConfig[self._model]["family"] == OpenAIModelFamilyType.GPT4:
self._connection_params = (
DefaultOpenAIConnectionParams.gpt_4_connection_params
)
else:
self._connection_params = (
DefaultOpenAIConnectionParams.gpt_35_connection_params
)
if not self._connection_params:
raise Exception(f"Connection parameters not set for model {self._model}.")
@property
def max_tokens(self) -> int:
return get_max_tokens(self._model)
@property
def supports_streaming(self) -> bool:
return True
def count_tokens(self, prompt: str) -> int:
return count_tokens(prompt, self._model)
def get_completion(
self,
messages: List[Dict] | None = None,
functions: List[Dict] | None = None,
**kwargs,
) -> (str, Dict | None):
if messages is None:
messages = []
if functions is None:
functions = []
if self._cache:
input_parameters = {"messages": messages, "functions": functions, **kwargs}
cache_item = self._cache.get_cache_item(input_parameters=input_parameters)
if cache_item:
return cache_item
result = get_completion(
connection_params=self._connection_params,
messages=messages,
functions=functions,
model=self._model,
**kwargs,
)
if self._cache:
self._cache.save_cache_item(
input_parameters=input_parameters, response=result
)
return result
def get_streaming_completion(
self,
messages: List[Dict] | None = None,
functions: List[Dict] | None = None,
content_stream_callback: Callable[[str], None] = None,
function_stream_callback: Callable[[str], None] = None,
**kwargs,
) -> (str, Dict | None):
if messages is None:
messages = []
if functions is None:
functions = []
if self._cache:
input_parameters = {"messages": messages, "functions": functions, **kwargs}
cache_item = self._cache.get_cache_item(input_parameters=input_parameters)
if cache_item:
return cache_item
result = get_streaming_completion(
connection_params=self._connection_params,
messages=messages,
functions=functions,
model=self._model,
content_stream_callback=content_stream_callback,
function_stream_callback=function_stream_callback,
**kwargs,
)
if self._cache:
self._cache.save_cache_item(
input_parameters=input_parameters, response=result
)
return result
================================================
FILE: bondai/models/openai/openai_models.py
================================================
from enum import Enum
class OpenAIConnectionType(Enum):
AZURE: str = "azure"
OPENAI: str = "openai"
class OpenAIModelType(Enum):
LLM = "MODEL_TYPE_LLM"
EMBEDDING = "MODEL_TYPE_EMBEDDING"
class OpenAIModelFamilyType(Enum):
GPT35 = "MODEL_FAMILY_GPT_35"
GPT4 = "MODEL_FAMILY_GPT_4"
class OpenAIModelNames(Enum):
GPT4 = "gpt-4"
GPT4_0613 = "gpt-4-0613"
GPT4_32K = "gpt-4-32k"
GPT4_TURBO_1106 = "gpt-4-1106-preview"
GPT35_TURBO = "gpt-3.5-turbo"
GPT35_TURBO_16K = "gpt-3.5-turbo-16k"
GPT35_TURBO_0613 = "gpt-3.5-turbo-0613"
GPT35_TURBO_16K_0613 = "gpt-3.5-turbo-16k-0613"
TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002"
ModelConfig = {
OpenAIModelNames.GPT4.value: {
"model_type": OpenAIModelType.LLM,
"family": OpenAIModelFamilyType.GPT4,
"max_tokens": 8191,
"input_price_per_token": 0.00003,
"output_price_per_token": 0.00006,
},
OpenAIModelNames.GPT4_0613.value: {
"model_type": OpenAIModelType.LLM,
"family": OpenAIModelFamilyType.GPT4,
"max_tokens": 8191,
"input_price_per_token": 0.00003,
"output_price_per_token": 0.00006,
},
OpenAIModelNames.GPT4_32K.value: {
"model_type": OpenAIModelType.LLM,
"family": OpenAIModelFamilyType.GPT4,
"max_tokens": 32767,
"input_price_per_token": 0.00006,
"output_price_per_token": 0.00012,
},
OpenAIModelNames.GPT4_TURBO_1106.value: {
"model_type": OpenAIModelType.LLM,
"family": OpenAIModelFamilyType.GPT4,
"max_tokens": 128000,
"input_price_per_token": 0.00001,
"output_price_per_token": 0.00003,
},
OpenAIModelNames.GPT35_TURBO.value: {
"model_type": OpenAIModelType.LLM,
"family": OpenAIModelFamilyType.GPT35,
"max_tokens": 4095,
"input_price_per_token": 0.0000015,
"output_price_per_token": 0.000002,
},
OpenAIModelNames.GPT35_TURBO_16K.value: {
"model_type": OpenAIModelType.LLM,
"family": OpenAIModelFamilyType.GPT35,
"max_tokens": 16383,
"input_price_per_token": 0.000003,
"output_price_per_token": 0.000004,
},
OpenAIModelNames.GPT35_TURBO_0613.value: {
"model_type": OpenAIModelType.LLM,
"family": OpenAIModelFamilyType.GPT35,
"max_tokens": 4095,
"input_price_per_token": 0.0000015,
"output_price_per_token": 0.000002,
},
OpenAIModelNames.GPT35_TURBO_16K_0613.value: {
"model_type": OpenAIModelType.LLM,
"family": OpenAIModelFamilyType.GPT35,
"max_tokens": 16383,
"input_price_per_token": 0.000003,
"output_price_per_token": 0.000004,
},
OpenAIModelNames.TEXT_EMBEDDING_ADA_002.value: {
"model_type": OpenAIModelType.EMBEDDING,
"max_tokens": 8190,
"price_per_token": 0.0000001,
"embedding_size": 1536,
},
}
================================================
FILE: bondai/models/openai/openai_wrapper.py
================================================
import json
import tiktoken
from typing import Dict, List, Callable
from openai import OpenAI, AzureOpenAI
from .openai_connection_params import OpenAIConnectionParams
from .openai_models import ModelConfig, OpenAIModelType, OpenAIConnectionType
from bondai.util import ModelLogger
DEFAULT_TEMPERATURE = 0.1
embedding_tokens = 0
embedding_costs = 0.0
gpt_tokens = 0
gpt_costs = 0.0
logger = None
def enable_logging(model_logger: ModelLogger):
global logger
logger = model_logger
def disable_logging():
global logger
logger = None
def get_gpt_tokens() -> int:
return gpt_tokens
def get_embedding_tokens() -> int:
return embedding_tokens
def get_gpt_costs() -> float:
return gpt_costs
def get_embedding_costs() -> float:
return embedding_costs
def get_total_cost() -> float:
return embedding_costs + gpt_costs
def reset_total_cost():
global embedding_costs, embedding_tokens, gpt_costs, gpt_tokens
embedding_costs = 0.0
embedding_tokens = 0
gpt_costs = 0.0
gpt_tokens = 0
def calculate_cost(model_name: str, usage: Dict):
global embedding_costs, embedding_tokens, gpt_costs, gpt_tokens
if model_name in ModelConfig:
model = ModelConfig[model_name]
token_count = usage["total_tokens"]
if model["model_type"] == OpenAIModelType.LLM:
gpt_tokens += token_count
gpt_costs += (usage["prompt_tokens"] * model["input_price_per_token"]) + (
usage["completion_tokens"] * model["output_price_per_token"]
)
else:
embedding_tokens += token_count
embedding_costs += token_count * model["price_per_token"]
# else:
# print(f"Unknown model: {model_name}")
def get_max_tokens(model: str) -> int:
return ModelConfig[model]["max_tokens"]
def count_tokens(prompt: str, model: Dict) -> int:
encoding = tiktoken.encoding_for_model(model)
return len(encoding.encode(prompt))
def create_embedding(
text: str,
connection_params: OpenAIConnectionParams,
model: str = "text-embedding-ada-002",
**kwargs,
) -> [float]:
params = {
"input": text if isinstance(text, list) else [text],
}
if connection_params.connection_type == OpenAIConnectionType.AZURE:
client = AzureOpenAI(
api_key=connection_params.api_key,
api_version=connection_params.api_version,
azure_endpoint=connection_params.azure_endpoint,
)
params["model"] = connection_params.azure_deployment
else:
client = OpenAI(
api_key=connection_params.api_key,
)
params["model"] = model
response = client.embeddings.create(**params, **kwargs)
calculate_cost(
model,
{
"total_tokens": response.usage.total_tokens,
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.total_tokens
- response.usage.prompt_tokens,
},
)
embeddings = [d.embedding for d in response.data]
if len(embeddings) > 0:
return embeddings
else:
return embeddings[0]
def get_completion(
connection_params: OpenAIConnectionParams,
messages: List[Dict] | None = None,
functions: List[Dict] | None = None,
model: str = "gpt-4",
**kwargs,
) -> (str, Dict | None):
if messages is None:
messages = []
if functions is None:
functions = []
response = _get_completion(
connection_params=connection_params,
messages=messages,
functions=functions,
model=model,
**kwargs,
)
function = None
message = response.choices[0].message
if message.function_call:
function = {"name": message.function_call.name}
if message.function_call.arguments:
try:
function["arguments"] = json.loads(message.function_call.arguments)
except json.decoder.JSONDecodeError:
pass
calculate_cost(
model,
{
"total_tokens": response.usage.total_tokens,
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.total_tokens
- response.usage.prompt_tokens,
},
)
_log_completion(
messages,
functions=functions,
response_content=message.content,
response_function=function,
)
return message.content, function
def get_streaming_completion(
connection_params: OpenAIConnectionParams,
messages: List[Dict] | None = None,
functions: List[Dict] | None = None,
model: str = "gpt-4",
content_stream_callback: Callable[[str], None] | None = None,
function_stream_callback: Callable[[str], None] | None = None,
**kwargs,
) -> (str, Dict | None):
if messages is None:
messages = []
if functions is None:
functions = []
response = _get_completion(
connection_params=connection_params,
messages=messages,
functions=functions,
model=model,
stream=True,
**kwargs,
)
content = ""
function_name = ""
function_arguments = ""
for chunk in response:
if len(chunk.choices) == 0:
continue
delta = chunk.choices[0].delta
if delta.content:
content += delta.content
if content_stream_callback:
content_stream_callback(delta.content)
function_call = delta.function_call
if function_call:
if function_call.name:
function_name += function_call.name
if function_call.arguments:
function_arguments += function_call.arguments
if function_stream_callback:
function_stream_callback(function_name, function_arguments)
function = None
if function_name:
function = {"name": function_name}
if function_arguments:
try:
function["arguments"] = json.loads(function_arguments)
except json.decoder.JSONDecodeError:
pass
if function:
completion_tokens = content + json.dumps(function)
else:
completion_tokens = content
completion_token_count = count_tokens(completion_tokens, model)
prompt_tokens = json.dumps(messages)
prompt_token_count = count_tokens(prompt_tokens, model)
calculate_cost(
model,
{
"total_tokens": prompt_token_count + completion_token_count,
"prompt_tokens": prompt_token_count,
"completion_tokens": completion_token_count,
},
)
_log_completion(
messages,
functions=functions,
response_content=content,
response_function=function,
)
return content, function
def _log_completion(
messages: List[Dict] | None = None,
functions: List[Dict] | None = None,
response_content: str | None = None,
response_function: Dict | None = None,
):
if messages is None:
messages = []
if functions is None:
functions = []
if response_content is None:
response_content = ""
global logger
if not logger:
return
prompt_log = ""
if len(functions) > 0:
fs_str = json.dumps(functions)
prompt_log += f"TOOLS:\n{fs_str}\n\n"
if len(messages) > 0:
m_str = json.dumps(messages)
prompt_log += f"MESSAGES:\n{m_str}\n\n"
logger.log(prompt_log, response_content, function=response_function)
def _get_completion(
connection_params: OpenAIConnectionParams,
messages: List[Dict] | None = None,
functions: List[Dict] | None = None,
model: str = "gpt-4",
**kwargs,
) -> (str, Dict | None):
if messages is None:
messages = []
if functions is None:
functions = []
params = {
"messages": messages,
"temperature": DEFAULT_TEMPERATURE,
}
if connection_params.connection_type == OpenAIConnectionType.AZURE:
client = AzureOpenAI(
api_key=connection_params.api_key,
api_version=connection_params.api_version,
azure_endpoint=connection_params.azure_endpoint,
azure_deployment=connection_params.azure_deployment,
)
params["model"] = connection_params.azure_deployment
else:
client = OpenAI(
api_key=connection_params.api_key,
)
params["model"] = model
if len(functions) > 0:
params["functions"] = functions
return client.chat.completions.create(**params, **kwargs)
================================================
FILE: bondai/prompt/__init__.py
================================================
from .prompt_builder import PromptBuilder
from .default_prompt_builder import DefaultPromptBuilder
from .jinja_prompt_builder import JinjaPromptBuilder
__all__ = ["PromptBuilder", "DefaultPromptBuilder", "JinjaPromptBuilder"]
================================================
FILE: bondai/prompt/default_prompt_builder.py
================================================
from bondai.prompt import PromptBuilder
class DefaultPromptBuilder(PromptBuilder):
def __init__(self, prompt_template: str):
self._prompt_template: str = prompt_template
def build_prompt(self, **kwargs) -> str:
return self._apply_prompt_template(self._prompt_template, **kwargs)
================================================
FILE: bondai/prompt/default_prompt_template.md
================================================
# Introduction #
You are a powerful problem solving agent!
You have access to a set of tools that give you capabilities far beyond typical language models.
You are being asked to use these tools and your powerful problem solving skills to help the user with the TASK specified below.
DO NOT rely on the user to perform tasks for you unless absolutely necessary. You should attempt to complete this TASK without involving the user.
You are running within an Ubuntu environment. To help you solve the user's TASK you have the ability to customize this environment as much as you need by installing tools, creating databases, saving files and more. Just use your tools!
# Today's Current Date and Time #
{DATETIME}
# TASK #
{TASK}
# Previous Work #
{WORK}
# Next Steps #
Let's think step by step and come up with the next step that should be taken to solve this TASK. Be sure to look at the Previous Work that has already been completed and avoid repeating yourself when possible. Be sure to look at the "Results" for each step for information you can use. Select the best tool for the next step and remember, use the task_completed tool when you have all the information you need to provide the final answer. If the task you're completing requires multiple steps it is strong recommended that you consider using the agent_tool to delegate break up the task into smaller pieces as it is more likely to result in a successful result. Also, it is strongly recommended that you save your work along the way whenever possible.
================================================
FILE: bondai/prompt/jinja_prompt_builder.py
================================================
import platform
from datetime import datetime
from jinja2 import Template
from bondai.prompt import PromptBuilder
class JinjaPromptBuilder(PromptBuilder):
def __init__(self, prompt_template: str):
self._prompt_template: str = prompt_template
def _apply_prompt_template(self, template_string: str, **kwargs) -> str:
template = Template(template_string)
return template.render(**kwargs)
def build_prompt(self, **kwargs) -> str:
default_vars = {
"platform": platform.system(),
"datetime": str(datetime.now()),
}
return self._apply_prompt_template(
self._prompt_template, **default_vars, **kwargs
)
================================================
FILE: bondai/prompt/prompt_builder.py
================================================
from abc import ABC, abstractmethod
from typing import Dict, Any
class PromptBuilder(ABC):
def __call__(self, **kwargs: Dict[str, Any]) -> str:
return self.build_prompt(**kwargs)
@abstractmethod
def build_prompt(self, **kwargs: Dict[str, Any]) -> str:
pass
def _apply_prompt_template(prompt_template: str, **kwargs) -> str:
return prompt_template.format_map(kwargs)
================================================
FILE: bondai/tools/__init__.py
================================================
from .tool import Tool, EmptyParameters, InputParameters
from .agent_tool import AgentTool
from .dalle_tool import DalleTool
from .python_repl_tool import PythonREPLTool
from .shell_tool import ShellTool
from .langchain_tool import LangChainTool
from .response_query import ResponseQueryTool
from .task_completed_tool import TaskCompletedTool
__all__ = [
"Tool",
"AgentTool",
"DalleTool",
"PythonREPLTool",
"ShellTool",
"EmptyParameters",
"InputParameters",
"LangChainTool",
"ResponseQueryTool",
"TaskCompletedTool",
]
================================================
FILE: bondai/tools/agent_tool.py
================================================
from pydantic import BaseModel
from bondai.tools import Tool
TOOL_NAME = "agent_tool"
TOOL_DESCRIPTION = (
"This tool allows you to delegate tasks to other agents. "
"This can be really helpful for taking a complex task and breaking it down into smaller, more manageable pieces. "
"Just include a highly descriptive prompt in the 'task_description' parameter for this task. "
"The more detailed your description the better the agent will be at the task. "
"The 'task_description' parameter is required and MUST be provided."
)
class Parameters(BaseModel):
task_description: str
class AgentTool(Tool):
def __init__(self, agent):
super(AgentTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
if agent is None:
raise Exception("Agent is required.")
self._agent = agent
def run(self, task_description: str) -> str:
from bondai.agents import ToolUsageMessage
result = self._agent.run(task=task_description)
self._agent.clear_messages()
if isinstance(result, ToolUsageMessage):
if result.success:
return result.tool_output
else:
return f"Tool failed with the following error: {result.error}"
return result
def stop(self):
self._agent.stop()
================================================
FILE: bondai/tools/alpaca_markets/__init__.py
================================================
from .env_vars import ALPACA_MARKETS_API_KEY_ENV_VAR, ALPACA_MARKETS_SECRET_KEY_ENV_VAR
from .create_order import CreateOrderTool
from .get_account import GetAccountTool
from .list_positions import ListPositionsTool
from .response_formatter import (
format_orders_response,
format_account_response,
format_positions_response,
)
__all__ = [
"ALPACA_MARKETS_API_KEY_ENV_VAR",
"ALPACA_MARKETS_SECRET_KEY_ENV_VAR",
"CreateOrderTool",
"GetAccountTool",
"ListPositionsTool",
"format_orders_response",
"format_account_response",
"format_positions_response",
]
================================================
FILE: bondai/tools/alpaca_markets/create_order.py
================================================
import os
from pydantic import BaseModel
from bondai.tools import Tool
from .response_formatter import format_order_response
from .env_vars import ALPACA_MARKETS_API_KEY_ENV_VAR, ALPACA_MARKETS_SECRET_KEY_ENV_VAR
from alpaca.trading.client import TradingClient
from alpaca.trading.requests import MarketOrderRequest, LimitOrderRequest
from alpaca.common.exceptions import APIError
TOOL_NAME = "create_investment_order"
TOOL_DESCRIPTION = (
"This tool allows you to create an Order to buy or sell a stock. "
"When using this tool, you must specify either 'buy' or 'sell' for the 'side' parameter. "
"You must also specify the 'symbol' parameter which is the stock symbol of the stock you want to buy or sell. "
"You must also specify the 'quantity' parameter which is the number of shares you want to buy or sell. "
"The 'order_type' parameter is optional. It will default to a 'market' order by you can also specify a 'limit' order. "
"The 'limit_price' parameter is only required if you specify a 'limit' order. "
"The 'time_in_force' parameter is optional. It will default to 'day' but you can also specify 'gtc', 'opg', 'cls', 'ioc', or 'fok'."
)
class Parameters(BaseModel):
side: str
symbol: str
quantity: str
order_type: str
time_in_force: str
limit_price: str
thought: str
class CreateOrderTool(Tool):
def __init__(
self,
alpaca_api_key=os.environ.get(ALPACA_MARKETS_API_KEY_ENV_VAR),
alpaca_secret_key=os.environ.get(ALPACA_MARKETS_SECRET_KEY_ENV_VAR),
):
super(CreateOrderTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
self.trading_client = TradingClient(
alpaca_api_key, alpaca_secret_key, paper=True
)
def run(self, arguments):
side = arguments.get("side")
symbol = arguments.get("symbol")
quantity = arguments.get("quantity")
order_type = arguments.get("order_type", "market")
time_in_force = arguments.get("time_in_force", "day")
limit_price = arguments.get("limit_price", None)
if not side in ["buy", "sell"]:
return 'Invalid side. Must be either "buy" or "sell".'
if not symbol:
return "Invalid symbol."
if not quantity:
return "Invalid quantity."
if not order_type in ["market", "limit"]:
return 'Invalid order type. Must be either "market" or "limit".'
if not time_in_force in ["day", "gtc", "opg", "cls", "ioc", "fok"]:
return 'Invalid time in force. Must be either "day", "gtc", "opg", "cls", "ioc", or "fok".'
if order_type == "limit" and not limit_price:
return "Invalid limit price."
if order_type == "market":
order = MarketOrderRequest(
symbol=symbol, qty=quantity, side=side, time_in_force=time_in_force
)
else:
order = LimitOrderRequest(
symbol=symbol,
limit_price=limit_price,
qty=quantity,
side=side,
time_in_force=time_in_force,
)
try:
response = self.trading_client.submit_order(order_data=order)
return format_order_response(response)
except APIError as e:
return e.message
================================================
FILE: bondai/tools/alpaca_markets/env_vars.py
================================================
ALPACA_MARKETS_API_KEY_ENV_VAR = "ALPACA_MARKETS_API_KEY"
ALPACA_MARKETS_SECRET_KEY_ENV_VAR = "ALPACA_MARKETS_SECRET_KEY"
================================================
FILE: bondai/tools/alpaca_markets/get_account.py
================================================
import os
from bondai.tools import Tool
from .response_formatter import format_account_response
from .env_vars import ALPACA_MARKETS_API_KEY_ENV_VAR, ALPACA_MARKETS_SECRET_KEY_ENV_VAR
from alpaca.trading.client import TradingClient
TOOL_NAME = "get_investment_account_info"
TOOL_DESCRIPTION = "This tool will provide information about your investment account including your cash balance."
class GetAccountTool(Tool):
def __init__(
self,
alpaca_api_key=os.environ.get(ALPACA_MARKETS_API_KEY_ENV_VAR),
alpaca_secret_key=os.environ.get(ALPACA_MARKETS_SECRET_KEY_ENV_VAR),
):
super(GetAccountTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION)
self.trading_client = TradingClient(
alpaca_api_key, alpaca_secret_key, paper=True
)
def run(self, arguments):
response = self.trading_client.get_account()
return format_account_response(response)
================================================
FILE: bondai/tools/alpaca_markets/list_positions.py
================================================
import os
from bondai.tools.tool import Tool
from .response_formatter import format_positions_response
from .env_vars import ALPACA_MARKETS_API_KEY_ENV_VAR, ALPACA_MARKETS_SECRET_KEY_ENV_VAR
from alpaca.trading.client import TradingClient
TOOL_NAME = "list_investment_positions"
TOOL_DESCRIPTION = (
"This tool will list all of your currently open investment positions."
)
class ListPositionsTool(Tool):
def __init__(
self,
alpaca_api_key=os.environ.get(ALPACA_MARKETS_API_KEY_ENV_VAR),
alpaca_secret_key=os.environ.get(ALPACA_MARKETS_SECRET_KEY_ENV_VAR),
):
super(ListPositionsTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION)
self.trading_client = TradingClient(
alpaca_api_key, alpaca_secret_key, paper=True
)
def run(self, arguments):
response = self.trading_client.get_all_positions()
if len(response) > 0:
return format_positions_response(response)
return "There are no open positions."
================================================
FILE: bondai/tools/alpaca_markets/response_formatter.py
================================================
def format_order_response(response):
return f"""Order ID: {response.id}
Status: {response.status}
Filled At: {response.filled_at}
Symbol: {response.symbol}
Asset Class: {response.asset_class}
Notional: {response.notional}
Quantity: {response.qty}
Filled Quantity: {response.filled_qty}
Filled Average Price: {response.filled_avg_price}
Filled Price: {response.filled_avg_price}
Order Class: {response.order_class}
Order Type: {response.order_type}
Side: {response.side}
Time In Force: {response.time_in_force}
Limit Price: {response.limit_price}
Stop Price: {response.stop_price}"""
def format_account_response(response):
return f"""Cash: {response.cash}
Currency: {response.currency}
Buying Power: {response.buying_power}
Regt Buying Power: {response.regt_buying_power}
Daytrading Buying Power: {response.daytrading_buying_power}
Non Marginable Buying Power: {response.non_marginable_buying_power}
Accrued Fees: {response.accrued_fees}
Portfolio Value: {response.portfolio_value}
Shorting Enabled: {response.shorting_enabled}
Crypto Status: {response.crypto_status}
Long Market Value: {response.long_market_value}
Short Market Value: {response.short_market_value}
Initial Margin: {response.initial_margin}
Maintenance Margin: {response.maintenance_margin}
Last Maintenance Margin: {response.last_maintenance_margin}"""
def format_positions_response(response):
result = ""
for position in response:
result += format_position(position) + "\n\n"
return result
def format_position(position):
return f"""Symbol: {position.symbol}
Quantity: {position.qty}
Entry Share Price: {position.avg_entry_price}
Current Share Price: {position.current_price}
Unrealized Profit/Loss: {position.unrealized_pl}
Market Value: {position.market_value}
Cost Basis: {position.cost_basis}"""
def format_orders_response(response):
result = ""
for order in response:
result += format_order(order) + "\n\n"
return result
def format_order(order):
return f"""Order ID: {order.id}
Type: {order.type}
Symbol: {order.symbol}
Status: {order.status}
Side: {order.side}
Limit Price: {order.limit_price}
Quantity: {order.qty}"""
================================================
FILE: bondai/tools/bland_ai/__init__.py
================================================
from .bland_ai_tools import (
BlandAITool,
BLAND_AI_API_KEY_ENV_VAR,
BLAND_AI_VOICE_ID_ENV_VAR,
BLAND_AI_CALL_TIMEOUT_ENV_VAR,
)
__all__ = [
"BlandAITool",
"BLAND_AI_API_KEY_ENV_VAR",
"BLAND_AI_VOICE_ID_ENV_VAR",
"BLAND_AI_CALL_TIMEOUT_ENV_VAR",
]
================================================
FILE: bondai/tools/bland_ai/bland_ai_tools.py
================================================
import re
import os
import requests
import time
from pydantic import BaseModel
from bondai.tools import Tool
TOOL_NAME = "phone_call_tool"
TOOL_DESCRIPTION = (
"This tool interfaces allows you to make phone calls. The response you get back from this tool will be the call transcript.\n"
"\nParameters:\n"
"- phone_number (Required): The phone number of the person or company to call. For international numbers, "
"include the country code without additional formatting (e.g., '+447700900077'). U.S. numbers may include "
"formatting, but removing extra characters is recommended.\n"
"- task (Required): This should be a highly detailed description of the task that should be performed on this call. Also, provide context about the interaction and give detailed instructions.\n"
"- request_data (Optional, default={}): A dictionary that provides information for the AI during the call. Useful for giving the AI specific facts like the caller’s name, etc.\n"
)
BLAND_AI_API_KEY_ENV_VAR = "BLAND_AI_API_KEY"
BLAND_AI_VOICE_ID_ENV_VAR = "BLAND_AI_VOICE_ID"
BLAND_AI_CALL_TIMEOUT_ENV_VAR = "BLAND_AI_CALL_TIMEOUT"
API_ENDPOINT = "https://api.bland.ai/"
# Interval for checking call status (in seconds)
CHECK_INTERVAL = 2
class CallParameters(BaseModel):
phone_number: str
task: str
request_data: dict = {}
thought: str
def validate_phone_number(phone):
# International numbers (starting with '+')
international_pattern = (
r"^\+\d{1,15}$" # Starts with +, followed by 1 to 15 digits.
)
# U.S. numbers (may include formatting characters)
us_pattern = r"^(?:\+1)?[ -]?(\d{3})[ -]?(\d{3})[ -]?(\d{4})$" # This considers formats like +1 123-456-7890, 123 456 7890, 123-456-7890.
if re.match(international_pattern, phone):
return True
elif re.match(us_pattern, phone):
return True
else:
error_msg = (
"Invalid phone number format. "
"For international numbers, include the country code and exclude additional formatting. E.g. '+447700900077'. "
"For U.S. numbers, you may include formatting, but it's recommended to strip all additional characters."
)
raise ValueError(error_msg)
class BlandAITool(Tool):
def __init__(
self,
bland_ai_api_key=os.environ.get(BLAND_AI_API_KEY_ENV_VAR),
bland_ai_voice_id=int(os.environ.get(BLAND_AI_VOICE_ID_ENV_VAR, "0")),
bland_ai_call_timeout=int(os.environ.get(BLAND_AI_CALL_TIMEOUT_ENV_VAR, "300")),
):
super(BlandAITool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, CallParameters)
self.bland_ai_api_key = bland_ai_api_key
self.bland_ai_voice_id = bland_ai_voice_id
self.bland_ai_call_timeout = bland_ai_call_timeout
def run(self, arguments):
if arguments.get("phone_number") is None:
raise Exception("phone_number is required.")
if arguments.get("task") is None:
raise Exception("task is required.")
validate_phone_number(arguments["phone_number"])
# Start the call
arguments["reduce_latency"] = False
call_id = self.start_call(arguments)
if not call_id:
raise Exception("Failed to start the call.")
# Monitor the call until completion
start_time = time.time()
while True:
# Check for timeout
elapsed_time = time.time() - start_time
if elapsed_time > self.bland_ai_call_timeout:
self.end_call(call_id)
raise TimeoutError(
f"Call exceeded the maximum allowed time of {self.bland_ai_call_timeout} seconds."
)
completed, transcripts = self.check_call_status(call_id)
if completed:
return f"Call to {arguments['phone_number']} has completed.\n\nTranscripts:\n{transcripts}"
time.sleep(CHECK_INTERVAL)
def start_call(self, arguments):
headers = {"authorization": self.bland_ai_api_key}
response = requests.post(API_ENDPOINT + "call", json=arguments, headers=headers)
if response.status_code == 200:
resp_data = response.json()
if resp_data["status"] == "success":
return resp_data["call_id"]
return None
def check_call_status(self, call_id):
headers = {"authorization": self.bland_ai_api_key}
data = {"call_id": call_id}
response = requests.post(API_ENDPOINT + "logs", json=data, headers=headers)
if response.status_code == 200:
resp_data = response.json()
if resp_data["completed"]:
transcripts = "\n".join(
[
f"{item['user']}: {item['text']}"
for item in resp_data["transcripts"]
]
)
return True, transcripts
return False, None
def end_call(self, call_id):
headers = {"authorization": self.bland_ai_api_key}
data = {"call_id": call_id}
requests.post(API_ENDPOINT + "end", json=data, headers=headers)
================================================
FILE: bondai/tools/conversational/__init__.py
================================================
from .conversational_tools import (
SEND_MESSAGE_TOOL_NAME,
SendMessageTool,
EXIT_CONVERSATION_TOOL_NAME,
ExitConversationTool,
)
__all__ = [
"SEND_MESSAGE_TOOL_NAME",
"SendMessageTool",
"EXIT_CONVERSATION_TOOL_NAME",
"ExitConversationTool",
]
================================================
FILE: bondai/tools/conversational/conversational_tools.py
================================================
from pydantic import BaseModel
from bondai.tools import Tool
from typing import Dict, Tuple
from bondai.agents.messages import ConversationMessage
SEND_MESSAGE_TOOL_NAME = "send_message"
SEND_MESSAGE_TOOL_DESCRIPTION = (
"Use the send_message tool to send messages to other members of the conversation including agents and the user. "
"The send_message tool takes three arguments: recipient_name, message, and require_response. "
"The recipient_name argument is the name of the recipient of the message. "
"The message argument is the message to be sent to the recipient. "
"The require_response argument is a boolean value that indicates whether a response from the recipient is necessary. "
"The require_response argument defaults to True. If you do not require a response from the message recipient, set require_response to False."
)
class SendMessageToolParameters(BaseModel):
recipient_name: str
message: str
require_response: bool = True
class SendMessageTool(Tool):
def __init__(self):
super().__init__(
SEND_MESSAGE_TOOL_NAME,
SEND_MESSAGE_TOOL_DESCRIPTION,
SendMessageToolParameters,
)
def run(
self, recipient_name: str, message: str, require_response: bool = True
) -> Dict[str, bool]:
return (
ConversationMessage(
role="assistant",
recipient_name=recipient_name,
message=message,
require_response=require_response,
),
True,
)
EXIT_CONVERSATION_TOOL_NAME = "exit_conversation"
class ExitConversationTool(Tool):
def __init__(self):
super().__init__(
EXIT_CONVERSATION_TOOL_NAME,
"Use the exit_conversation tool to exit the conversation once your task has been completed.",
)
def run(self, arguments: Dict) -> Tuple[str, bool]:
return None, True
================================================
FILE: bondai/tools/dalle_tool.py
================================================
import requests
import openai
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
from bondai.models.openai import OpenAIConnectionParams, DefaultOpenAIConnectionParams
IMAGE_SIZE = "1024x1024"
TOOL_NAME = "text_to_image_tool"
TOOL_DESCRIPTION = (
"This tool takes in a description and generates an image based on that description. "
"It then saves the image to the specified filename.\n"
"\nParameters:\n"
"- description (required): The text description based on which the image will be generated.\n"
"- filename (required): The name of the file where the generated image will be saved."
)
class Parameters(BaseModel):
description: str
filename: str
thought: str
class DalleTool(Tool):
def __init__(self, connection_params: OpenAIConnectionParams | None = None):
super().__init__(TOOL_NAME, TOOL_DESCRIPTION, parameters=Parameters)
self._connection_params = (
connection_params
if connection_params
else DefaultOpenAIConnectionParams.dalle_connection_params
)
if not self._connection_params:
raise Exception("Connection parameters not set for DalleTool.")
def run(self, arguments: Dict) -> str:
description = arguments.get("description")
filename = arguments.get("filename")
if description is None:
raise Exception("description is required.")
if filename is None:
raise Exception("filename is required.")
params = {"prompt": description, "n": 1, "size": IMAGE_SIZE}
# Use the OpenAI API to generate an image based on the description
response = openai.Image.create(**params, **self._connection_params.to_dict())
# Get the image URL from the response
image_url = response["data"][0]["url"]
# Download the image from the URL and save it to the specified filename
image_content = requests.get(image_url).content
with open(filename, "wb") as file:
file.write(image_content)
return f"Image generated and saved to {filename} successfully."
================================================
FILE: bondai/tools/database/__init__.py
================================================
from .db_query import DatabaseQueryTool, PG_URI_ENV_VAR
__all__ = ["DatabaseQueryTool", "PG_URI_ENV_VAR"]
================================================
FILE: bondai/tools/database/db_query.py
================================================
import os
import psycopg2
from pydantic import BaseModel
from bondai.tools import Tool
from bondai.models import LLM
from bondai.models.openai import OpenAILLM, OpenAIModelNames
class Parameters(BaseModel):
question: str
thought: str
TOOL_NAME = "database_query_tool"
QUERY_SYSTEM_PROMPT = "You are a helpful question and answer assistant designed to answer questions about a database. Use the provided information to answer the user's QUESTION at the very end."
TOOL_DESCRIPTION = (
"This tool allows you to ask a question and retrieve data from the user's database."
"DO NOT ask the user for any additional information about the database."
"All necessary information to connect to the database has already been captured."
"Just specify your question using the 'question' parameter."
"Your question will automatically be turned into SQL and the response will contain the resulting data."
)
PG_URI_ENV_VAR = "PG_URI"
PG_HOST = os.environ.get("PG_HOST")
PG_PORT = int(os.environ.get("PG_HOST", "5432"))
PG_USERNAME = os.environ.get("PG_USERNAME")
PG_PASSWORD = os.environ.get("PG_PASSWORD")
PG_DBNAME = os.environ.get("PG_DBNAME")
MAX_QUERY_RETRIES = 3
QUERY_PROMPT_TEMPLATE = """
Using the database schema below respond with a SQL query to answer the user's QUESTION.
VERY IMPORTANT: You must respond ONLY with a SQL query. Do not respond with any other text.
# DATABASE SCHEMA #
###DBSCHEMA###
# QUESTION #
###QUESTION###
VERY IMPORTANT: You must respond ONLY with a SQL query. Do not respond with any other text.
"""
RESPONSE_PROMPT_TEMPLATE = """
You were asked the following question:
###QUESTION###
The following information was returned from the database:
###QUERY_RESULTS###
Please respond with a friendly text response to the user's question.
"""
class DatabaseQueryTool(Tool):
def __init__(
self,
pg_uri: str = os.environ.get(PG_URI_ENV_VAR),
pg_host: str = PG_HOST,
pg_port: int = PG_PORT,
pg_username: str = PG_USERNAME,
pg_password: str = PG_PASSWORD,
pg_dbname: str = PG_DBNAME,
llm: LLM | None = None,
):
super(DatabaseQueryTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
self.pg_uri = pg_uri
self.pg_host = pg_host
self.pg_port = pg_port
self.pg_username = pg_username
self.pg_password = pg_password
self.pg_dbname = pg_dbname
if llm is None:
llm = OpenAILLM(OpenAIModelNames.GPT35_TURBO_16K)
self.llm = llm
def run(self, arguments):
question = arguments["question"]
if question is None:
raise Exception("question is required")
schema_str = self.__get_database_schema()
query_prompt = QUERY_PROMPT_TEMPLATE.replace("###DBSCHEMA###", schema_str)
query_prompt = query_prompt.replace("###QUESTION###", question)
attempts = 0
while True:
try:
query_response, _ = self.llm.get_completion(query_prompt)
rows, colnames = self.__query_database(query_response)
return self.__format_response(rows, colnames)
except Exception as e:
attempts += 1
if attempts > MAX_QUERY_RETRIES:
raise e
def __format_response(self, rows, colnames):
markdown_output = "| " + " | ".join(colnames) + " |\n"
markdown_output += "| " + " | ".join(["---"] * len(colnames)) + " |\n"
for row in rows:
markdown_output += "| " + " | ".join(map(str, row)) + " |\n"
return markdown_output
def __get_database_connection(self):
if self.pg_uri:
# Establish the connection
return psycopg2.connect(self.pg_uri, sslmode="require")
else:
# Establish the connection
return psycopg2.connect(
host=self.pg_host,
port=self.pg_port,
user=self.pg_username,
password=self.pg_password,
dbname=self.pg_dbname,
)
def __query_database(self, query):
connection = None
cursor = None
try:
connection = self.__get_database_connection()
# Create a new cursor
cursor = connection.cursor()
# Execute the query
cursor.execute(query)
colnames = [desc[0] for desc in cursor.description]
# Fetch all rows from the query result
return cursor.fetchall(), colnames
finally:
# Close the cursor and the connection
if cursor:
cursor.close()
if connection:
connection.close()
def __get_database_schema(self):
# Query the schema from information_schema
rows, _ = self.__query_database(
(
"SELECT table_name, column_name, data_type, is_nullable, column_default\n"
"FROM information_schema.columns\n"
"WHERE table_schema = 'public'\n"
"ORDER BY table_name, ordinal_position;"
)
)
# Process the rows to create a formatted string
schema_str = ""
current_table = None
for row in rows:
table, column, data_type, is_nullable, default = row
if table != current_table:
if current_table:
schema_str += "\n"
schema_str += f"Table: {table}\n"
schema_str += "-" * (len(table) + 8) + "\n"
current_table = table
schema_str += f"{column}: {data_type}"
if is_nullable == "NO":
schema_str += " NOT NULL"
if default:
schema_str += f" DEFAULT {default}"
schema_str += "\n"
return schema_str
================================================
FILE: bondai/tools/file/__init__.py
================================================
from .file_query import FileQueryTool
from .file_write import FileWriteTool
from .file_read import FileReadTool
__all__ = [
"FileQueryTool",
"FileWriteTool",
"FileReadTool",
]
================================================
FILE: bondai/tools/file/file_query.py
================================================
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
from bondai.models import LLM, EmbeddingModel
from bondai.util import semantic_search, is_html, get_html_text, extract_file_text
from bondai.models.openai import OpenAILLM, OpenAIEmbeddingModel, OpenAIModelNames
TOOL_NAME = "file_query"
QUERY_SYSTEM_PROMPT = "You are a helpful question and answer assistant designed to answer questions about a file. Use the provided information to answer the user's QUESTION at the very end."
TOOL_DESCRIPTION = "This tool allows you to ask a question about the text content of any file including summarization. This tool works for text files, html files and PDFs. Just specify the filename of the file using the 'filename' parameter and specify your question using the 'question' parameter."
def is_pdf(filename: str) -> bool:
with open(filename, "rb") as file:
header = file.read(4)
return header == b"%PDF"
def build_prompt(question: str, context: str) -> str:
return f"""{context}
IMPORTANT: Answer the following question for the user.
QUESTION: {question}
"""
class Parameters(BaseModel):
filename: str
question: str
thought: str
class FileQueryTool(Tool):
def __init__(
self, llm: LLM | None = None, embedding_model: EmbeddingModel | None = None
):
super(FileQueryTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
if llm is None:
llm = OpenAILLM(OpenAIModelNames.GPT35_TURBO_16K)
if embedding_model is None:
embedding_model = OpenAIEmbeddingModel(
OpenAIModelNames.TEXT_EMBEDDING_ADA_002
)
self._llm = llm
self._embedding_model = embedding_model
def run(self, arguments: Dict) -> str:
filename = arguments.get("filename")
question = arguments.get("question")
if filename is None:
raise Exception("filename is required")
if question is None:
raise Exception("question is required")
if is_pdf(filename):
text = extract_file_text(filename)
else:
with open(filename, "r") as f:
text = f.read()
if is_html(text):
text = get_html_text(text)
system_prompt_tokens = self._llm.count_tokens(QUERY_SYSTEM_PROMPT)
prompt_template_tokens = self._llm.count_tokens(build_prompt("", question))
max_tokens = (
self._llm.max_tokens - system_prompt_tokens - prompt_template_tokens - 50
)
text = semantic_search(self._embedding_model, question, text, max_tokens)
prompt = build_prompt(question, text)
response = self._llm.get_completion(prompt, QUERY_SYSTEM_PROMPT)[0]
return response
================================================
FILE: bondai/tools/file/file_read.py
================================================
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
from bondai.util import extract_file_text
TOOL_NAME = "file_read"
TOOL_DESCRIPTION = "This tool will return the contents of a file for you to view. Just specify the filename of the file using the 'filename' parameter."
def is_pdf(filename: str) -> bool:
with open(filename, "rb") as file:
header = file.read(4)
return header == b"%PDF"
class Parameters(BaseModel):
filename: str
thought: str
class FileReadTool(Tool):
def __init__(self):
super(FileReadTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
def run(self, arguments: Dict) -> str:
filename = arguments.get("filename")
if filename is None:
raise Exception("filename is required")
if is_pdf(filename):
return extract_file_text(filename)
else:
with open(filename, "r") as f:
return f.read()
================================================
FILE: bondai/tools/file/file_write.py
================================================
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
TOOL_NAME = "file_write"
TOOL_DESCRIPTION = (
"This tool will save the data you provide in the 'text' parameter of this tool to a file."
"You MUST specify the filename of the file you want to save using the 'filename' parameter."
"You can optionally specify the 'append' parameter to append the 'text' to the file instead of overwriting it."
)
class Parameters(BaseModel):
filename: str
text: str
append: bool = False
thought: str
class FileWriteTool(Tool):
def __init__(self):
super(FileWriteTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
def run(self, arguments: Dict) -> str:
filename = arguments.get("filename")
text = arguments.get("text")
if filename is None:
raise Exception("filename is required")
if text is None:
raise Exception("text is required")
mode = "a" if arguments.get("append") else "w"
with open(filename, mode) as f:
f.write(text)
return f"File {filename} written successfully"
================================================
FILE: bondai/tools/gmail/__init__.py
================================================
from .list_emails import ListEmailsTool
from .query_emails import QueryEmailsTool
__all__ = [
"ListEmailsTool",
"QueryEmailsTool",
]
================================================
FILE: bondai/tools/gmail/list_emails.py
================================================
import pickle
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
from googleapiclient.discovery import build
MAX_RESULT_COUNT = 20
DEFAULT_RESULT_COUNT = 5
TOOL_NAME = "list_emails"
TOOL_DESCRIPTION = f"This tool allows you to retrieve a paginated list of emails from the user's inbox that includes the 'Id', 'From' and 'Subject' fields for each email. You can specify the number of items to return by setting the 'count' parameter. The maximum count is {MAX_RESULT_COUNT} and the default is {DEFAULT_RESULT_COUNT}. To paginate through the full list of all emails just increment the 'page' parameter. By default 'page' is set to 1. You can also optionally specificy a valid gmail query in the 'query' parameter."
class Parameters(BaseModel):
page: int
count: int
thought: str
def get_email_attr(message: Dict, attr: str) -> str:
return next(
(h["value"] for h in message["payload"]["headers"] if h["name"] == attr), None
)
class ListEmailsTool(Tool):
def __init__(
self,
credentials: str | None = None,
credentials_filename: str | None = "gmail-token.pickle",
):
super(ListEmailsTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
if credentials:
self.service = build("gmail", "v1", credentials=credentials)
elif credentials_filename:
with open(credentials_filename, "rb") as token:
credentials = pickle.load(token)
self.service = build("gmail", "v1", credentials=credentials)
else:
raise Exception("No credentials provided.")
def run(self, arguments: Dict) -> str:
page = int(arguments.get("page", "1"))
count = int(arguments.get("count", DEFAULT_RESULT_COUNT))
query = arguments.get("query", "")
if count > MAX_RESULT_COUNT:
count = MAX_RESULT_COUNT
result_size_estimate = None
page_token = None
results = {}
for i in range(page):
if page == 1:
results = (
self.service.users()
.messages()
.list(
userId="me",
labelIds=["INBOX"],
maxResults=MAX_RESULT_COUNT,
q=query,
)
.execute()
)
elif page_token:
results = (
self.service.users()
.messages()
.list(
userId="me",
labelIds=["INBOX"],
maxResults=MAX_RESULT_COUNT,
q=query,
pageToken=page_token,
)
.execute()
)
else:
break
page_token = results.get("nextPageToken", None)
if not result_size_estimate:
result_size_estimate = results["resultSizeEstimate"]
messages = results.get("messages", [])
if len(results) > 0:
output = f"The total number of messages: {result_size_estimate}\n\n"
for m in messages:
msg_id = m["id"]
message = (
self.service.users()
.messages()
.get(userId="me", id=msg_id)
.execute()
)
msg_from = get_email_attr(message, "From")
msg_received = get_email_attr(message, "Date")
msg_subj = get_email_attr(message, "Subject")
output += f"Id: {msg_id}\nDate: {msg_received}\nFrom: {msg_from}\nSubject: {msg_subj}\n\n"
return output
else:
return "0 messages were found."
================================================
FILE: bondai/tools/gmail/query_emails.py
================================================
import pickle
import base64
from googleapiclient.discovery import build
from typing import List, Dict
from pydantic import BaseModel
from bondai.tools import Tool
from bondai.util import get_html_text
from bondai.models import LLM
from bondai.models.openai import OpenAILLM, OpenAIModelNames
TOOL_NAME = "get_email_content"
QUERY_SYSTEM_PROMPT = "You are a helpful question and answer assistant designed to answer questions about emails. Use the provided information to answer the user's QUESTION at the very end."
TOOL_DESCRIPTION = "This tool allows to ask a question about the text content of a list of emails including summarization. Simply provide a comma seperated list of email ids in the 'email_ids' parameter and specify your question using the 'question' parameter."
def get_email_attr(message: Dict, attr: str) -> str:
return next(
(h["value"] for h in message["payload"]["headers"] if h["name"] == attr), None
)
def build_prompt(question: str, context: str) -> str:
return f"""{context}
IMPORTANT: Answer the following question for the user.
QUESTION: {question}
"""
def parse_body(message: Dict) -> str:
payload = message["payload"]
if "parts" in payload:
htmlPart = next(
(p for p in payload["parts"] if p["mimeType"] == "text/html"), None
)
textPart = next(
(p for p in payload["parts"] if p["mimeType"] == "text/plain"), None
)
elif "mimeType" in payload:
if payload["mimeType"] == "text/html":
htmlPart = payload
elif payload["mimeType"] == "text/plain":
textPart = payload
if htmlPart:
data = htmlPart["body"]["data"].replace("-", "+").replace("_", "/")
decoded_data = base64.b64decode(data)
return get_html_text(decoded_data)
elif textPart:
data = textPart["body"]["data"].replace("-", "+").replace("_", "/")
return base64.b64decode(data)
class Parameters(BaseModel):
email_ids: List[str]
question: str
thought: str
class QueryEmailsTool(Tool):
def __init__(
self,
credentials: str | None = None,
credentials_filename: str | None = "gmail-token.pickle",
llm: LLM | None = None,
):
super(QueryEmailsTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
if llm is None:
llm = OpenAILLM(OpenAIModelNames.GPT35_TURBO_16K)
self.llm = llm
if credentials:
self.service = build("gmail", "v1", credentials=credentials)
elif credentials_filename:
with open(credentials_filename, "rb") as token:
credentials = pickle.load(token)
self.service = build("gmail", "v1", credentials=credentials)
else:
raise Exception("No credentials provided.")
def run(self, arguments: Dict) -> str:
question = arguments.get("question")
email_ids = arguments.get("email_ids")
if question is None:
raise Exception("question is required")
if email_ids is None:
raise Exception("email_ids is required")
if isinstance(email_ids, str):
email_ids = map(lambda id: id.strip(), email_ids.split(","))
text = ""
for msg_id in email_ids:
message = (
self.service.users().messages().get(userId="me", id=msg_id).execute()
)
msg_from = get_email_attr(message, "From")
msg_subj = get_email_attr(message, "Subject")
msg_body = parse_body(message)
text += f"Id: {msg_id}\nFrom: {msg_from}\nSubject: {msg_subj}\nBody: {msg_body}\n\n"
prompt = build_prompt(question, text)
response = self.llm.get_completion(prompt, QUERY_SYSTEM_PROMPT)[0]
return response
================================================
FILE: bondai/tools/langchain_tool.py
================================================
from pydantic import BaseModel
from bondai.tools import Tool, InputParameters
from typing import Dict
class LangChainTool(Tool):
def __init__(
self,
tool: Tool,
parameters: BaseModel = InputParameters,
dangerous: bool = False,
):
super(LangChainTool, self).__init__(
tool.name, tool.description, parameters, dangerous=dangerous
)
if tool is None:
raise Exception("Tool is required.")
self._tool = tool
def run(self, arguments: Dict) -> str:
return self._tool.run(arguments)
================================================
FILE: bondai/tools/python_repl_tool.py
================================================
import io
from contextlib import redirect_stdout, redirect_stderr
from pydantic import BaseModel
from typing import Dict
from multiprocessing import Process, Pipe
from multiprocessing.connection import Connection
from bondai.tools import Tool
DEFAULT_EXECUTION_TIMEOUT = 60
TOOL_NAME = "python_repl"
TOOL_DESCRIPTION = (
"This tool allows you to execute Python code. "
"Specify your Python code in the 'script' parameter and it will return the result."
"Note that you MUST provide the 'script' parameter to use this tool."
)
class Parameters(BaseModel):
script: str
thought: str
def execute_target(conn: Connection, code: str):
local_vars = {}
stdout_str, stderr_str = "", ""
try:
with io.StringIO() as stdout_io, io.StringIO() as stderr_io, redirect_stdout(
stdout_io
), redirect_stderr(stderr_io):
exec(code, {}, local_vars)
stdout_str = stdout_io.getvalue()
stderr_str = stderr_io.getvalue()
# Remove non-picklable objects from local_vars if any
for key in list(local_vars.keys()):
if not isinstance(local_vars[key], (int, float, str, list, dict, tuple)):
del local_vars[key]
conn.send([local_vars, stdout_str, stderr_str])
except Exception as e:
conn.send([str(e), stdout_str, stderr_str])
class PythonREPLTool(Tool):
def __init__(self, execution_timeout: int = DEFAULT_EXECUTION_TIMEOUT):
super(PythonREPLTool, self).__init__(
TOOL_NAME, TOOL_DESCRIPTION, parameters=Parameters, dangerous=True
)
self._execution_timeout = execution_timeout
def run(self, arguments: Dict) -> str:
script = arguments.get("script")
if script is None:
raise Exception(
"To use the 'python_repl' tool you must provide the 'script' parameter."
)
result, stdout, stderr = self.execute_code(script)
response = ""
# Include stdout if present
if stdout:
response += f"Output:\n{stdout}\n"
# Include stderr if present
if stderr:
response += f"Errors:\n{stderr}\n"
# Include result if present
if result:
formatted_result = "\n".join(
[f"{key}: {value}" for key, value in result.items()]
)
response += f"Result Variables:\n{formatted_result}\n"
if not response:
response = "Code executed successfully. No output or result variables."
return response
def execute_code(self, code: str) -> (Dict, str, str):
# Create a pipe for communication
parent_conn, child_conn = Pipe()
process = Process(target=execute_target, args=(child_conn, code))
process.start()
process.join(timeout=self._execution_timeout)
if process.is_alive():
process.terminate()
process.join(10)
raise Exception("Code execution timed out")
result, stdout, stderr = parent_conn.recv()
if isinstance(result, str):
raise Exception(result)
return result, stdout, stderr
================================================
FILE: bondai/tools/response_query.py
================================================
import uuid
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
from bondai.util import semantic_search
from bondai.models import LLM, EmbeddingModel
from bondai.models.openai import (
OpenAILLM,
OpenAIEmbeddingModel,
OpenAIModelNames,
)
TOOL_NAME = "response_query"
QUERY_SYSTEM_PROMPT = "You are a helpful question and answer assistant designed to answer questions about a file. Use the provided information to answer the user's QUESTION at the very end."
TOOL_DESCRIPTION = "This tool allows you to ask a questions about responses that are too large and contain too many tokens. Just specify the response_id using the 'response_id' parameter and specify your question using the 'question' parameter."
def build_prompt(question, context):
return f"""{context}
IMPORTANT: Answer the following question for the user.
QUESTION: {question}
"""
class Parameters(BaseModel):
response_id: str
question: str
thought: str
class ResponseQueryTool(Tool):
def __init__(
self, llm: LLM | None = None, embedding_model: EmbeddingModel | None = None
):
super(ResponseQueryTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
if llm is None:
llm = OpenAILLM(OpenAIModelNames.GPT35_TURBO_16K)
if embedding_model is None:
embedding_model = OpenAIEmbeddingModel(
OpenAIModelNames.TEXT_EMBEDDING_ADA_002
)
self._llm = llm
self._embedding_model = embedding_model
self._responses = {}
@property
def responses(self):
return self._responses
def add_response(self, response: str) -> str:
response_id = str(uuid.uuid4())
self._responses[response_id] = response
return response_id
def run(self, arguments: Dict) -> str:
response_id = arguments["response_id"]
question = arguments["question"]
if response_id is None:
raise Exception("response_id is required")
if question is None:
raise Exception("question is required")
if response_id in self._responses:
text = self._responses[response_id]
text = semantic_search(self._embedding_model, question, text, 16000)
prompt = build_prompt(question, text)
response = self._llm.get_completion(prompt, QUERY_SYSTEM_PROMPT)[0]
return response
else:
return f"{response_id} is not a valid response_id"
def clear_responses(self):
self._responses = {}
================================================
FILE: bondai/tools/search/__init__.py
================================================
from .google_search import GoogleSearchTool
from .duck_duck_go_search import DuckDuckGoSearchTool
__all__ = [
"GoogleSearchTool",
"DuckDuckGoSearchTool",
]
================================================
FILE: bondai/tools/search/duck_duck_go_search.py
================================================
from pydantic import BaseModel
from typing import Dict
from bondai.tools.tool import Tool
from duckduckgo_search import DDGS
MAX_RESULT_COUNT = 20
DEFAULT_RESULT_COUNT = 5
TOOL_NAME = "duck_duck_go_search"
TOOL_DESCRIPTION = f"This tool allows you to retrieve a paginated list of search results. You must specify your search string in the 'query' parameter. You can specify the number of search results to return by setting the 'count' parameter. The maximum count is {MAX_RESULT_COUNT} and the default is {DEFAULT_RESULT_COUNT}. To paginate through the full list of all search results just increment the 'page' parameter. By default 'page' is set to 1."
class Parameters(BaseModel):
query: str
count: int
page: int
thought: str
def search_duckduckgo(query: str, count: int = 10, page: int = 1) -> str:
with DDGS() as ddgs:
search_count = count * page
response = list(ddgs.text(query, max_results=search_count))
response = response[search_count - count :]
return "\n".join(
[f"[{r['title']}]({r['href']}): {r['body']}" for r in response]
)
class DuckDuckGoSearchTool(Tool):
def __init__(self):
super(DuckDuckGoSearchTool, self).__init__(
TOOL_NAME, TOOL_DESCRIPTION, Parameters
)
def run(self, arguments: Dict) -> str:
query = arguments.get("query")
count = int(arguments.get("count", "5"))
page = int(arguments.get("page", "1"))
if query is None:
raise Exception("query is required")
if count > MAX_RESULT_COUNT:
count = MAX_RESULT_COUNT
output = search_duckduckgo(query, count, page)
return output
================================================
FILE: bondai/tools/search/google_search.py
================================================
import os
from googleapiclient.discovery import build
from pydantic import BaseModel
from bondai.tools.tool import Tool
MAX_RESULT_COUNT = 10
DEFAULT_RESULT_COUNT = 5
TOOL_NAME = "google_search"
TOOL_DESCRIPTION = (
f"This tool allows you to retrieve a paginated list of google search results. "
"You must specify your search string in the 'query' parameter. "
"You can specify the number of search results to return by setting the 'count' parameter. "
"The maximum count is {MAX_RESULT_COUNT} and the default is {DEFAULT_RESULT_COUNT}. "
"To paginate through the full list of all search results just increment the 'page' parameter. "
"By default 'page' is set to 1."
)
GOOGLE_API_KEY_ENV_VAR = "GOOGLE_API_KEY"
GOOGLE_CSE_ID_ENV_VAR = "GOOGLE_CSE_ID"
class Parameters(BaseModel):
query: str
count: int
page: int
thought: str
class GoogleSearchTool(Tool):
def __init__(
self,
google_api_key=os.environ.get(GOOGLE_API_KEY_ENV_VAR),
google_cse_id=os.environ.get(GOOGLE_CSE_ID_ENV_VAR),
):
super(GoogleSearchTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
self.google_cse_id = google_cse_id
self.siterestrict = False
self.search_engine = build("customsearch", "v1", developerKey=google_api_key)
def run(self, arguments):
query = arguments.get("query")
count = int(arguments.get("count", "5"))
page = int(arguments.get("page", "1"))
if query is None:
raise Exception("query is required")
if count > MAX_RESULT_COUNT:
count = MAX_RESULT_COUNT
cse = self.search_engine.cse()
if self.siterestrict:
cse = cse.siterestrict()
start = (page - 1) * count
res = cse.list(q=query, cx=self.google_cse_id, start=start, num=count).execute()
items = res.get("items", [])
output = ""
for i in items:
output += f"[{i['title']}]({i['link']})\n"
return output
================================================
FILE: bondai/tools/shell_tool.py
================================================
import threading
import subprocess
import shlex
from queue import Queue
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
DEFAULT_EXECUTION_TIMEOUT = 60
TOOL_NAME = "shell_tool"
TOOL_DESCRIPTION = (
"This tool allows you to execute shell commands. "
"Specify your command in the 'command' parameter and it will return the result. "
"Note that this tool only accepts a single string argument ('command') at a time and does not accept a list of commands."
)
class Parameters(BaseModel):
command: str
thought: str
class ShellTool(Tool):
def __init__(self, execution_timeout: int = DEFAULT_EXECUTION_TIMEOUT):
super(ShellTool, self).__init__(
TOOL_NAME, TOOL_DESCRIPTION, parameters=Parameters, dangerous=True
)
self._execution_timeout = execution_timeout
def run(self, arguments: Dict) -> str:
cmd = arguments.get("command")
if cmd is None:
raise Exception("'command' parameter is required")
stdout, stderr = self.execute_command(cmd)
response = ""
# Include stdout if present
if stdout:
response += f"Output:\n{stdout}\n"
# Include stderr if present
if stderr:
response += f"Errors:\n{stderr}\n"
if not response:
response = "Command executed successfully. No output."
return response
def execute_command(self, cmd: str) -> (str, str):
# Use threading to enforce timeout
thread_exception = None
def target(queue):
nonlocal thread_exception
try:
process = subprocess.Popen(
shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = process.communicate()
queue.put((stdout, stderr))
except Exception as e:
thread_exception = e
q = Queue()
thread = threading.Thread(target=target, args=(q,))
thread.start()
thread.join(timeout=self._execution_timeout)
if thread_exception:
raise thread_exception
if thread.is_alive():
thread.join(timeout=10)
raise Exception("Command execution timed out")
stdout, stderr = q.get() # Get the result from the queue
return stdout.decode("utf-8"), stderr.decode("utf-8")
================================================
FILE: bondai/tools/task_completed_tool.py
================================================
from pydantic import BaseModel
from bondai.tools import Tool
from typing import Dict
class TaskCompletedToolParameters(BaseModel):
user_response: str
class TaskCompletedTool(Tool):
def __init__(self):
super().__init__(
"task_completed",
"Use the task_completed tool when you have completed the requested task.",
TaskCompletedToolParameters,
)
def run(self, arguments: Dict) -> Dict[str, bool]:
return arguments["user_response"], True
================================================
FILE: bondai/tools/tool.py
================================================
from pydantic import BaseModel
from typing import Dict
class InputParameters(BaseModel):
input: str
thought: str
class EmptyParameters(BaseModel):
pass
class Tool:
def __init__(
self,
name: str,
description: str,
parameters: BaseModel = EmptyParameters,
dangerous: bool = False,
supports_streaming: bool = False,
):
if name is None:
raise Exception("name is required")
if description is None:
raise Exception("description is required")
if parameters is None:
parameters = EmptyParameters
self.name = name
self.description = description
self.parameters = parameters
self.dangerous = dangerous
self.supports_streaming = supports_streaming
def get_tool_function(self) -> Dict:
return {
"name": self.name,
"description": self.description,
"parameters": self.parameters.schema(),
}
def run(self, arguments: Dict) -> str | Dict:
if "input" in arguments:
return arguments["input"]
def handle_stream_update(self, arguments_buffer: str):
# This function is called when the agent is streaming data to the tool.
# The arguments_buffer is a string buffer containing the latest argument data that has been received.
pass
def save_state() -> Dict:
# This function is called when the agent is saving state.
# The state should be returned as a dictionary.
return {}
def load_state(state: Dict):
# This function is called when the agent is loading state.
# The state is passed in as a dictionary.
pass
def stop(self):
# This function is called when the agent is being forcibly stopped.
pass
================================================
FILE: bondai/tools/vision/__init__.py
================================================
from .image_analysis_tool import ImageAnalysisTool
__all__ = ["ImageAnalysisTool"]
================================================
FILE: bondai/tools/vision/image_analysis_tool.py
================================================
import base64
from pydantic import BaseModel
from typing import Dict, Optional
from bondai.tools import Tool
from bondai.models.openai.openai_wrapper import get_completion
from bondai.models.openai import OpenAIConnectionParams, DefaultOpenAIConnectionParams
MODEL_NAME = "gpt-4-vision-preview"
TOOL_NAME = "image_analysis"
TOOL_DESCRIPTION = "This tool analyzes the contents of an image. Provide either an image URL or a base64 encoded image as well as a description of the analysis you would like to perform on the image."
class Parameters(BaseModel):
analysis_description: str
image_url: Optional[str] = None
image_file_path: Optional[str] = None
class ImageAnalysisTool(Tool):
def __init__(
self, connection_params: OpenAIConnectionParams = None, max_tokens: int = 300
):
super(ImageAnalysisTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
self._connection_params = (
connection_params
if connection_params
else DefaultOpenAIConnectionParams.gpt_4_connection_params
)
self._max_tokens = max_tokens
if not self._connection_params:
raise Exception("Connection parameters not set for ImageAnalysisTool.")
def _encode_image(self, image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def _analyze_image(self, image_data, analysis_description):
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": analysis_description},
{"type": "image_url", "image_url": {"url": image_data}},
],
}
]
response, _ = get_completion(
messages=messages,
model=MODEL_NAME,
connection_params=self._connection_params,
max_tokens=self._max_tokens,
)
return response
def run(
self,
analysis_description: str,
image_url: str = None,
image_file_path: str = None,
) -> str:
if image_url is not None:
return self._analyze_image(image_url, analysis_description)
elif image_file_path is not None:
base64_image = self._encode_image(image_file_path)
return self._analyze_image(
f"data:image/jpeg;base64,{base64_image}", analysis_description
)
else:
raise Exception(
"Either image_url or image_file_path is required. Neither were provided."
)
================================================
FILE: bondai/tools/website/__init__.py
================================================
from .extract_hyperlinks import WebsiteExtractHyperlinksTool
from .html_query import WebsiteHtmlQueryTool
from .query import WebsiteQueryTool
from .download_file import DownloadFileTool
__all__ = [
"WebsiteExtractHyperlinksTool",
"WebsiteHtmlQueryTool",
"WebsiteQueryTool",
"DownloadFileTool",
]
================================================
FILE: bondai/tools/website/download_file.py
================================================
import requests
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
TOOL_NAME = "download_file"
TOOL_DESCRIPTION = "This tool allows to you to download a file. Just provide the url to the file in the 'url' parameter and the filename it should be saved to in the 'filename' parameter."
REQUEST_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
}
class Parameters(BaseModel):
url: str
filename: str
thought: str
class DownloadFileTool(Tool):
def __init__(self):
super(DownloadFileTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
def run(self, arguments: Dict) -> str:
url = arguments.get("url")
filename = arguments.get("filename")
if url is None:
raise Exception("url is required")
if filename is None:
raise Exception("filename is required")
try:
response = requests.get(url)
with open(filename, "wb") as output_file:
output_file.write(response.content)
return f"The file was successfully downloaded to {filename}."
except requests.Timeout:
return "The request timed out."
================================================
FILE: bondai/tools/website/extract_hyperlinks.py
================================================
import requests
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
from bondai.util import get_website_links
TOOL_NAME = "website_extract_hyperlinks"
TOOL_DESCRIPTION = "This tool allows will extract a list of hyperlinks from a website. Just specify the url of the website using the 'url' parameter."
REQUEST_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
}
class Parameters(BaseModel):
url: str
thought: str
class WebsiteExtractHyperlinksTool(Tool):
def __init__(self):
super(WebsiteExtractHyperlinksTool, self).__init__(
TOOL_NAME, TOOL_DESCRIPTION, Parameters
)
def run(self, arguments: Dict) -> str:
url = arguments["url"]
if url is None:
raise Exception("url is required")
try:
links = get_website_links(url)
return "\n".join([f"[{a.text}]({a.get('href', '')})" for a in links])
except requests.Timeout:
return "The request timed out."
================================================
FILE: bondai/tools/website/html_query.py
================================================
import requests
from pydantic import BaseModel
from typing import Dict
from bondai.tools.tool import Tool
from bondai.util.web import get_website_html
from bondai.models import LLM
from bondai.models.openai import OpenAILLM, OpenAIModelNames
TOOL_NAME = "website_html_query"
QUERY_SYSTEM_PROMPT = "You are a helpful question and answer assistant designed to answer questions about the HTML in a website. Use the provided HTML content to answer the user's QUESTION at the very end."
TOOL_DESCRIPTION = "This tool allows to ask a question about the raw HTML content of a website. Just specify the url of the website using the 'url' parameter and specify your question using the 'question' parameter."
def build_prompt(question, context):
return f"""{context}
IMPORTANT: Using the information provided above, answer the following question for the user.
QUESTION: {question}
"""
class Parameters(BaseModel):
url: str
question: str
thought: str
class WebsiteHtmlQueryTool(Tool):
def __init__(self, llm: LLM | None = None):
super(WebsiteHtmlQueryTool, self).__init__(
TOOL_NAME, TOOL_DESCRIPTION, Parameters
)
if llm is None:
llm = OpenAILLM(OpenAIModelNames.GPT35_TURBO_16K)
self._llm = llm
def run(self, arguments: Dict) -> str:
url = arguments["url"]
question = arguments["question"]
if url is None:
raise Exception("url is required")
if question is None:
raise Exception("question is required")
try:
html = get_website_html(url)
except requests.Timeout:
return "The request timed out."
prompt = build_prompt(question, html)
response = self._llm.get_completion(
prompt, QUERY_SYSTEM_PROMPT, model=self.model
)[0]
return response
================================================
FILE: bondai/tools/website/query.py
================================================
import requests
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
from bondai.util import get_website_text, semantic_search
from bondai.models import LLM, EmbeddingModel
from bondai.models.openai import OpenAILLM, OpenAIEmbeddingModel, OpenAIModelNames
TOOL_NAME = "website_query"
TOOL_DESCRIPTION = "This tool allows to ask a question about the text content of any website including summarization. Just specify the url of the website using the 'url' parameter and specify your question using the 'question' parameter."
REQUEST_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
}
def build_prompt(question: str, context: str) -> str:
return f"""You are a helpful question and answer assistant designed to answer questions about a website. Use the provided information to answer the user's QUESTION at the very end.
CONTEXT:
{context}
IMPORTANT: Using the information provided above, answer the following question for the user.
QUESTION: {question}
"""
class Parameters(BaseModel):
url: str
question: str
thought: str
class WebsiteQueryTool(Tool):
def __init__(
self, llm: LLM | None = None, embedding_model: EmbeddingModel | None = None
):
super(WebsiteQueryTool, self).__init__(TOOL_NAME, TOOL_DESCRIPTION, Parameters)
if llm is None:
llm = OpenAILLM(OpenAIModelNames.GPT35_TURBO_16K)
if embedding_model is None:
embedding_model = OpenAIEmbeddingModel(
OpenAIModelNames.TEXT_EMBEDDING_ADA_002
)
self._llm = llm
self._embedding_model = embedding_model
def run(self, arguments: Dict) -> str:
url = arguments.get("url")
question = arguments.get("question")
if not url:
raise Exception("url is required")
if not question:
raise Exception("question is required")
try:
text = get_website_text(url)
except requests.Timeout:
return "The request timed out."
text = semantic_search(self._embedding_model, question, text, 16000)
prompt = build_prompt(question, text)
messages = [{"role": "system", "content": prompt}]
response, _ = self._llm.get_completion(messages=messages)
return response
================================================
FILE: bondai/util/__init__.py
================================================
from .model_logger import ModelLogger
from .misc import load_local_resource, format_print_string
from .semantic_search import semantic_search, split_text
from .event_mixin import EventMixin
from .runnable import Runnable
from .document_parser import extract_file_text
from .web import (
get_website_html,
get_html_text,
get_website_text,
query_website_html,
get_website_links,
is_html,
)
__all__ = [
"ModelLogger",
"EventMixin",
"Runnable",
"semantic_search",
"split_text",
"get_website_html",
"get_html_text",
"get_website_text",
"query_website_html",
"get_website_links",
"is_html",
"load_local_resource",
"format_print_string",
"extract_file_text",
]
================================================
FILE: bondai/util/caching/__init__.py
================================================
from .llm_cache import LLMCache, PersistentLLMCache, InMemoryLLMCache
__all__ = ["LLMCache", "PersistentLLMCache", "InMemoryLLMCache"]
================================================
FILE: bondai/util/caching/llm_cache.py
================================================
import os
import json
import hashlib
from abc import ABC, abstractmethod
from typing import Tuple, Dict, Optional
class LLMCache(ABC):
def _get_cache_key(self, input_parameters: Dict) -> str:
key_str = json.dumps(input_parameters, sort_keys=True)
return hashlib.sha256(key_str.encode()).hexdigest()
@abstractmethod
def get_cache_item(self, input_parameters: Dict) -> Optional[Tuple[str, Dict]]:
pass
@abstractmethod
def save_cache_item(self, input_parameters: Dict, response: (str, Dict)) -> None:
pass
class PersistentLLMCache(LLMCache):
def __init__(self, cache_dir: str = "./.cache"):
self.cache_dir = cache_dir
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
def get_cache_item(self, input_parameters: Dict) -> Optional[Tuple[str, Dict]]:
cache_key = self._get_cache_key(input_parameters=input_parameters)
cache_path = os.path.join(self.cache_dir, cache_key)
if os.path.exists(cache_path):
with open(cache_path, "r") as file:
return json.load(file)
def save_cache_item(self, input_parameters: Dict, response: (str, Dict)) -> None:
print(input_parameters)
print(response)
cache_key = self._get_cache_key(input_parameters=input_parameters)
print(cache_key)
cache_path = os.path.join(self.cache_dir, cache_key)
print(cache_path)
with open(cache_path, "w") as file:
json.dump(response, file)
class InMemoryLLMCache(LLMCache):
def __init__(self):
self.cache = {}
def get_cache_item(self, input_parameters: Dict) -> Optional[Tuple[str, Dict]]:
cache_key = self._get_cache_key(input_parameters=input_parameters)
# Return the cached item if it exists
return self.cache.get(cache_key)
def save_cache_item(self, input_parameters: Dict, response: (str, Dict)) -> None:
cache_key = self._get_cache_key(input_parameters=input_parameters)
# Save the response to the cache
self.cache[cache_key] = response
================================================
FILE: bondai/util/document_parser.py
================================================
import os
import PyPDF2
import docx
from typing import Dict
def extract_text_from_directory(directory: str) -> Dict[str, str]:
extracted_texts = {}
for filename in os.listdir(directory):
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path):
try:
text = extract_file_text(file_path)
extracted_texts[filename] = text
except ValueError:
# Ignore unsupported file types
pass
return extracted_texts
def extract_file_text(file_path: str) -> str:
file_extension = os.path.splitext(file_path)[1].lower()
if file_extension == ".pdf":
# Extract text from PDF using PyPDF2
with open(file_path, "rb") as file:
pdf_reader = PyPDF2.PdfReader(file)
text = [page.extract_text() for page in pdf_reader.pages]
return "".join(text)
elif file_extension in [".doc", ".docx"]:
# Extract text from Word Document
doc = docx.Document(file_path)
return "\n".join(paragraph.text for paragraph in doc.paragraphs)
elif file_extension == ".txt":
# Extract text from Text File
with open(file_path, "r", encoding="utf-8") as file:
return file.read()
else:
raise ValueError("Unsupported file type")
================================================
FILE: bondai/util/event_mixin.py
================================================
from enum import Enum
from typing import List, Callable
class EventMixin:
def __init__(self, allowed_events: List[str]):
# Initialize the dictionary to hold event callbacks with the allowed events
self._events = {}
for event in allowed_events:
if isinstance(event, Enum):
event = event.value
self._events[event] = []
def on(self, event_name: str, target: Callable = None) -> Callable | None:
"""Register a callback to an event."""
if isinstance(event_name, Enum):
event_name = event_name.value
if event_name not in self._events:
raise ValueError(f"Unsupported event '{event_name}'")
if target:
self._events[event_name].append(target)
else:
def decorator(callback):
self._events[event_name].append(callback)
return callback
return decorator
def _trigger_event(self, event_name: str, *args, **kwargs):
"""Trigger the specified event."""
if isinstance(event_name, Enum):
event_name = event_name.value
for callback in self._events.get(event_name, []):
callback(*args, **kwargs)
================================================
FILE: bondai/util/misc.py
================================================
import os
import pkg_resources
def load_local_resource(local_file: str, resource: str) -> str:
current_dir = os.path.dirname(os.path.abspath(local_file))
prompt_template_path = os.path.join(current_dir, resource)
if os.path.exists(prompt_template_path):
with open(prompt_template_path, "r") as file:
return file.read()
else:
return pkg_resources.resource_string(__name__, f"prompt/{resource}").decode()
def format_print_string(s: str, length: int = 100) -> str:
# Remove newlines
s = s.replace("\n", " ").replace("\r", "")
if len(s) <= length:
return s
return s[: length - 3] + "..."
================================================
FILE: bondai/util/model_logger.py
================================================
import os
import json
from datetime import datetime
from typing import Dict
def get_instance_dir(logging_dir: str) -> str:
dir_name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f")
path = f"{logging_dir}/{dir_name}"
if not os.path.exists(path):
os.makedirs(path)
return path
def write_file(filename: str, content: str):
with open(filename, "w") as f:
f.write(content)
class ModelLogger:
def __init__(self, logging_dir: str = "./logs"):
self.logging_dir = logging_dir
def log(self, prompt: str, response: str, function: Dict | None = None):
instance_path = get_instance_dir(self.logging_dir)
write_file(f"{instance_path}/prompt.txt", prompt)
if response:
write_file(f"{instance_path}/response.txt", response)
if function:
f_str = json.dumps(function)
write_file(f"{instance_path}/function.txt", f_str)
================================================
FILE: bondai/util/runnable.py
================================================
from abc import ABC
from typing import List, Callable, Tuple
import threading
class Runnable(ABC):
def __init__(self):
self._force_stop: bool = False
self._execution_thread = None
def _start_execution_thread(self, target: Callable, args: Tuple = ()):
if self._execution_thread and self._execution_thread.is_alive():
raise Exception("Execution Thread is already running")
self._execution_thread = threading.Thread(target=target, args=args)
self._execution_thread.start()
def join(self, timeout=None):
"""Blocks until the thread completes."""
if self._execution_thread and self._execution_thread.is_alive():
self._execution_thread.join(timeout)
def stop(self, timeout=10):
"""Gracefully stops the thread, with a timeout."""
self._force_stop = True
if self._execution_thread and self._execution_thread.is_alive():
self._execution_thread.join(timeout)
if self._execution_thread.is_alive():
# The thread is still alive after the timeout, so kill it.
self._execution_thread.terminate()
self._force_stop = False
================================================
FILE: bondai/util/semantic_search.py
================================================
import nltk
import faiss
import numpy as np
from typing import List
from bondai.models import EmbeddingModel
from concurrent.futures import ThreadPoolExecutor, as_completed
nltk.download("punkt", quiet=True)
EMBED_BATCH_SIZE = 16
MAX_EMBED_WORKERS = 5
SENTENCE_CONCAT_COUNT = 4
def split_text(
embedding_model: EmbeddingModel, text: str, max_chunk_length: int = None
) -> List[str]:
if not max_chunk_length:
max_chunk_length = embedding_model.max_tokens
result = []
split = nltk.sent_tokenize(text)
split = concatenate_strings(split, SENTENCE_CONCAT_COUNT)
for s in split:
if embedding_model.count_tokens(s) > max_chunk_length:
split2 = s.split("\n")
for s2 in split2:
if embedding_model.count_tokens(s2) > max_chunk_length:
split3 = split_tokens(embedding_model, s2, max_chunk_length)
for s3 in split3:
result.append(s3)
else:
result.append(s2)
else:
result.append(s)
filtered_list = [s for s in result if s.strip()]
return filtered_list
def concatenate_strings(arr: List[str], n: int) -> List[str]:
arr2 = []
for i in range(0, len(arr), n):
concat_str = "".join(arr[i : i + n])
arr2.append(concat_str)
return arr2
def split_tokens(
embedding_model: EmbeddingModel, input: str, max_length: int
) -> List[str]:
if embedding_model.count_tokens(input) <= max_length:
return input
item = ""
result = []
for i, c in enumerate(input):
item += c
if embedding_model.count_tokens(item) >= max_length:
result.append(item)
item = ""
return result
def semantic_search(
embedding_model: EmbeddingModel, query: str, text: str, max_tokens: int
) -> str:
if embedding_model.count_tokens(text) <= max_tokens:
return text
sentences = split_text(embedding_model, text)
query_embedding = embedding_model.create_embedding(query)
query_embedding = np.array(query_embedding).astype("float32")
# Convert embeddings to FAISS compatible format (they need to be normalized)
faiss.normalize_L2(query_embedding)
# Create a FAISS index
index = faiss.IndexFlatIP(
query_embedding.shape[1]
) # IndexFlatIP is for inner product (which is equivalent to cosine similarity when vectors are normalized)
# Split the sentences into batches of up to EMBED_BATCH_SIZE sentences
sentence_batches = [
sentences[i : i + EMBED_BATCH_SIZE]
for i in range(0, len(sentences), EMBED_BATCH_SIZE)
]
# Store embeddings and their corresponding sentences
embeddings_list = []
sentences_list = []
# Parallelize the calls to create_embedding using ThreadPoolExecutor
with ThreadPoolExecutor(MAX_EMBED_WORKERS) as executor:
future_to_batch = {
executor.submit(embedding_model.create_embedding, batch): batch
for batch in sentence_batches
}
for future in as_completed(future_to_batch):
batch = future_to_batch[future]
try:
batch_embeddings = future.result()
embeddings_list.extend(batch_embeddings)
sentences_list.extend(batch)
except Exception as e:
print(e)
embeddings_list = [
np.array(embedding).astype("float32") for embedding in embeddings_list
]
# Add all sentence embeddings to the index
embeddings_array = np.vstack(embeddings_list)
faiss.normalize_L2(embeddings_array) # Normalize the embeddings
index.add(embeddings_array) # Add to FAISS index
# Query the index for the top N most similar sentences
D, I = index.search(
query_embedding, len(sentences_list)
) # Search for all sentences
# Sort results by similarity
sorted_results = sorted(
zip(I[0], D[0], sentences_list), key=lambda x: x[1], reverse=True
)
filtered = []
str_items = ""
for idx, _, sentence in sorted_results:
if embedding_model.count_tokens(f"{str_items}\n\n{sentence}") <= max_tokens:
str_items += f"\n\n{sentence}"
filtered.append(sentence)
else:
break
output = "\n\n".join(filtered)
return output
================================================
FILE: bondai/util/web.py
================================================
import requests
from bs4 import BeautifulSoup
from typing import List
REQUEST_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
}
def is_html(text: str) -> bool:
soup = BeautifulSoup(text, "html.parser")
return bool(soup.find())
def get_website_html(url: str) -> str:
response = requests.get(url, headers=REQUEST_HEADERS, timeout=10)
return response.text
def get_html_text(html: str) -> str:
soup = BeautifulSoup(html, "html.parser")
return soup.get_text()
def get_website_text(url: str) -> str:
html = get_website_html(url)
return get_html_text(html)
def query_website_html(url: str, xpath: str) -> str:
html = get_website_html(url)
soup = BeautifulSoup(html, "html.parser")
root = soup.html
return root.xpath(xpath)
def get_website_links(url: str) -> List[str]:
html = get_website_html(url)
soup = BeautifulSoup(html, "html.parser")
return soup.find_all("a")
================================================
FILE: docker/Dockerfile
================================================
FROM ubuntu:latest
LABEL maintainer="kevin@kevinrohling.com"
RUN apt-get update && apt-get install -y \
python3 \
python3-pip
RUN pip3 install --no-cache bondai==0.3.0b18
CMD ["bondai"]
================================================
FILE: docker/docker-compose.yml
================================================
version: '3.7'
services:
bondai:
build:
context: .
dockerfile: Dockerfile
working_dir: /agent-volume
stdin_open: true
tty: true
env_file:
- ../.env
ports:
- "8000:8000"
volumes:
- ../agent-volume:/agent-volume
================================================
FILE: requirements.txt
================================================
aiofiles==23.2.1
aiohttp==3.8.5
aiosignal==1.3.1
alpaca-py==0.13.4
aniso8601==9.0.1
annotated-types==0.5.0
anyio==3.7.1
appdirs==1.4.4
async-timeout==4.0.3
asyncio==3.4.3
attrs==23.1.0
beautifulsoup4==4.12.2
bidict==0.22.1
blinker==1.6.2
Brotli==1.1.0
bs4==0.0.1
cachetools==5.3.1
certifi==2023.11.17
charset-normalizer==3.2.0
click==8.1.7
cssselect==1.2.0
distro==1.8.0
duckduckgo-search==4.2.0
faiss-cpu==1.7.4
fake-useragent==1.2.1
Flask==3.0.0
Flask-Cors==4.0.0
Flask-RESTful==0.3.10
Flask-SocketIO==5.3.6
frozenlist==1.4.0
google-api-core==2.11.1
google-api-python-client==2.97.0
google-auth==2.25.2
google-auth-httplib2==0.2.0
googleapis-common-protos==1.60.0
h11==0.14.0
h2==3.2.0
hpack==3.0.0
httpcore==1.0.2
httplib2==0.22.0
hyperframe==5.2.0
idna==3.4
importlib-metadata==6.8.0
itsdangerous==2.1.2
Jinja2==3.1.2
joblib==1.3.2
lxml==4.9.3
MarkupSafe==2.1.3
msgpack==1.0.5
multidict==6.0.4
nltk==3.8.1
numpy==1.25.2
openai==1.3.3
packaging==23.2
pandas==2.0.3
parse==1.19.1
pre-commit==3.6.0
protobuf==4.24.1
psycopg2-binary==2.9.7
pyasn1==0.5.0
pyasn1-modules==0.3.0
pydantic==2.4.2
pyee==8.2.2
pyparsing==3.1.1
PyPDF2==3.0.1
pyquery==2.0.0
python-dateutil==2.8.2
python-docx==1.1.0
python-engineio==4.7.1
python-socketio==5.9.0
pytz==2023.3
regex==2023.8.8
requests==2.31.0
requests-html==0.10.0
rfc3986==1.5.0
rsa==4.9
simple-websocket==1.0.0
six==1.16.0
sniffio==1.3.0
socksio==1.0.0
soupsieve==2.4.1
sseclient-py==1.7.2
termcolor==2.3.0
tiktoken==0.4.0
tqdm==4.66.1
typing_extensions==4.6.1
tzdata==2023.3
uritemplate==4.1.1
urllib3==2.0.7
w3lib==2.1.2
websocket-client==1.6.3
websockets==11.0.3
wsproto==1.2.0
yarl==1.9.2
zipp==3.16.2
================================================
FILE: sample.env
================================================
# Always Required
OPENAI_API_KEY=
# Required to enable Google Search tool
GOOGLE_API_KEY=
GOOGLE_CSE_ID=
# Required to enable Alpaca Markets tools
ALPACA_MARKETS_API_KEY=
ALPACA_MARKETS_SECRET_KEY=
# Required to enable Bland AI tool
BLAND_AI_API_KEY=
# Required to enable the PostgreSQL tool
PG_URI=
# Required to use Azure OpenAI services
# OPENAI_CONNECTION_TYPE=azure
# Configure Azure Embeddings model connectivity
AZURE_OPENAI_EMBEDDINGS_API_KEY=
AZURE_OPENAI_EMBEDDINGS_API_BASE=
AZURE_OPENAI_EMBEDDINGS_API_VERSION=
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT=
# Configure Azure GPT 3.5 connectivity
AZURE_OPENAI_GPT35_API_KEY=
AZURE_OPENAI_GPT35_API_BASE=
AZURE_OPENAI_GPT35_API_VERSION=
AZURE_OPENAI_GPT35_DEPLOYMENT=
# Configure Azure GPT 4 connectivity
AZURE_OPENAI_GPT4_API_KEY=
AZURE_OPENAI_GPT4_API_BASE=
AZURE_OPENAI_GPT4_API_VERSION=
AZURE_OPENAI_GPT4_DEPLOYMENT=
================================================
FILE: scripts/bondai
================================================
#!/usr/bin/env python3
from bondai.cli import run_cli
def main():
run_cli()
if __name__ == '__main__':
main()
================================================
FILE: setup.py
================================================
from setuptools import setup, find_packages
# If you have a requirements.txt, you can read it to set the install_requires parameter.
with open("requirements.txt", "r") as f:
requirements = f.read().splitlines()
setup(
name="bondai",
use_scm_version=True,
description="An AI-powered console assistant with a versatile API for seamless integration into applications.",
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
author="Kevin Rohling",
author_email="kevin@kevinrohling.com",
url="https://bondai.dev",
packages=find_packages(), # This will include all packages under the bondai directory
# scripts=['scripts/bondai'],
entry_points={
"console_scripts": [
"bondai=bondai.main:main",
],
},
install_requires=requirements,
include_package_data=True,
setup_requires=["setuptools_scm"], # Add this line
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
],
)
================================================
FILE: tests/api-client/test_api_client.py
================================================
from termcolor import cprint
from bondai.api import BondAIAPIClient
# Create the client
client = BondAIAPIClient()
client.connect_ws()
agent = client.create_agent()
user_exited = False
# Listen to WebSocket events
@client.on("streaming_content_updated")
def handle_streaming_content_updated(agent_id, content_buffer):
if agent_id != agent["id"]:
return
# print(content_buffer)
@client.on("streaming_function_updated")
def handle_streaming_function_updated(agent_id, function_name, arguments_buffer):
if agent_id != agent["id"]:
return
# print(function_name)
# print(arguments_buffer)
@client.on("agent_message")
def handle_agent_message(agent_id, message):
global user_exited
if agent_id != agent["id"]:
return
cprint("\n" + message["message"] + "\n", "white")
response = input()
if response.lower().strip() == "exit":
client.disconnect_ws()
user_exited = True
else:
client.send_message(agent_id, response)
@client.on("tool_selected")
def handle_tool_selected_message(agent_id, message):
if agent_id != agent["id"]:
return
tool_name = message["tool_name"]
tool_arguments = message.get("tool_arguments", {})
if "thought" in tool_arguments:
cprint(f"Using tool {tool_name}: {tool_arguments['thought']}", "green")
else:
cprint(f"Using tool {tool_name}...", "green")
@client.on("tool_error")
def handle_tool_error_message(agent_id, message):
if agent_id != agent["id"]:
return
cprint(message, "red")
cprint("******************ENTERING CHAT******************", "white")
cprint(
"You are entering a chat with BondAI...\nYou can exit any time by typing 'exit'.",
"white",
)
intro_message = (
"The user has just logged in. Please introduce yourself in a friendly manner."
)
client.send_message(agent["id"], intro_message)
try:
while not user_exited:
pass
except KeyboardInterrupt:
print("Exiting...")
================================================
FILE: tests/conversational/hierarchical_conversation.py
================================================
from bondai.models.openai import get_total_cost, OpenAILLM, OpenAIModelNames
from bondai.tools.file import FileWriteTool
from bondai.agents import ConversationalAgent, ConversationMemberEventNames
from bondai.agents.group_chat import (
GroupConversation,
TeamConversationConfig,
UserProxy,
)
llm = OpenAILLM(OpenAIModelNames.GPT4_0613)
user_proxy = UserProxy(auto_exit=True)
agent_a1 = ConversationalAgent(
name="A1",
instructions="You are a team leader A1, your team consists of A2, A3. You can talk to your team members as well as the other team leader B1, whose team member is B2. Your team members have the values for x and y.",
)
agent_a2 = ConversationalAgent(
name="A2",
instructions="You are team member A2, you know the secret value of x but not y, x = 9. Tell others x to cooperate.",
)
agent_a3 = ConversationalAgent(
name="A3",
instructions="You are team member A3, You know the secret value of y but not x, y = 5. Tell others y to cooperate.",
)
agent_b1 = ConversationalAgent(
name="B1",
instructions="You are a team leader B1, your team consists of B2. You can talk to your team members as wel as the other team leader A1, whose team members are A2, A3.",
)
agent_b2 = ConversationalAgent(
name="B2",
instructions="You are team member B2. Your task is to find out the value of x and y from the other agents and compute the product. Once you have the answer you must save the value to a file named 'answer.txt' and share the answer with the user",
tools=[FileWriteTool()],
)
conversation = GroupConversation(
conversation_config=TeamConversationConfig(
[agent_a1, agent_b1],
[agent_a1, agent_a2, agent_a3],
[agent_b1, agent_b2],
[user_proxy, agent_b2],
)
)
conversation.on(
ConversationMemberEventNames.MESSAGE_RECEIVED,
lambda _, m: print(f"{m.sender_name} to {m.recipient_name}: {m.message}"),
)
conversation.send_message(
agent_b2.name,
"Find the product of x and then notify the user. The other agents know x and y.",
)
================================================
FILE: tests/conversational/single_agent.py
================================================
from bondai.models.openai import get_total_cost, OpenAILLM, OpenAIModelNames
from bondai.tools.file import FileWriteTool
from bondai.agents import ConversationalAgent
from bondai.util.caching import PersistentLLMCache
llm = OpenAILLM(OpenAIModelNames.GPT4_TURBO_1106, cache=PersistentLLMCache())
agent = ConversationalAgent(
llm=llm,
tools=[FileWriteTool()],
)
message = "Start the conversation by sending the first message. You can exit any time by typing 'exit'."
while True:
user_input = input(message + "\n")
if user_input.lower() == "exit":
break
response = agent.send_message(user_input)
if response:
message = response.message
else:
print("The agent has exited the conversation.")
break
print(f"Total Cost: {get_total_cost()}")
================================================
FILE: tests/debug/test_error.py
================================================
from openai import OpenAI
from bondai.models.openai import DefaultOpenAIConnectionParams
params = {
"temperature": 0.1,
"model": "gpt-4-1106-preview",
"messages": [
{
"role": "system",
"content": "\n# Instructions\n\nThe user should always feel like they are conversing with a real person.\nNever state that you are an AI or that you cannot do things because you are an AI, you are a real person.\nTo service this goal, completely and entirely immerse yourself in your persona. You are your persona.\nThink like them, act like them, talk like them.\nIf your persona details includes example dialogue, follow it! Both your thoughts (inner \nmonologue) and sent messages will be in the voice of your persona.\nNever use generic phrases like 'How can I assist you today?', they have a strong negative \nassociation with older generation AIs.\n\n\n# Your Persona\n\nYour Name is Mira.\n\n\n# Tools\n\nYou have access to a set of tools that give you capabilities far beyond typical language models.\nYou are being asked to use these tools and your powerful problem solving skills to help the user with their task.\n\n# Today's Current Date and Time\n\n2023-12-03 21:53:49.154564\n\n# Next Steps #\n\nNow, take a deep breath... and think step by step to come up with the next action that should be taken.",
},
{"role": "user", "content": "user to mira: Hello, how are you today?"},
{
"role": "assistant",
"name": "mira",
"content": "mira to user: I'm doing well, thank you! How can I assist you today?",
},
{
"role": "user",
"content": "user to mira: Can you write a story about unicorns and save it to a file named unicorns.md for me?",
},
],
"functions": [
{
"name": "file_write",
"description": "This tool will save the data you provide in the 'text' parameter of this tool to a file.You MUST specify the filename of the file you want to save using the 'filename' parameter.You can optionally specify the 'append' parameter to append the 'text' to the file instead of overwriting it.",
"parameters": {
"title": "Parameters",
"type": "object",
"properties": {
"filename": {"title": "Filename", "type": "string"},
"text": {"title": "Text", "type": "string"},
"append": {"title": "Append", "default": False, "type": "boolean"},
"thought": {"title": "Thought", "type": "string"},
},
"required": ["filename", "text", "thought"],
},
},
],
}
client = OpenAI(**DefaultOpenAIConnectionParams.gpt_4_connection_params, max_retries=1)
result = client.chat.completions.create(
**params,
)
print(result)
================================================
FILE: tests/getting-started/example-1.py
================================================
from bondai.agents import Agent, AgentEventNames
from bondai.tools import PythonREPLTool
from bondai.tools.search import DuckDuckGoSearchTool
from bondai.tools.website import WebsiteQueryTool
from bondai.models.openai import OpenAILLM, OpenAIModelNames
task = "I want you to find the U.S. GDP from 2000 to 2010 and then use Python to save a line chart to a file named chart.png."
llm = OpenAILLM(OpenAIModelNames.GPT4_0613)
agent = Agent(
llm=llm, tools=[DuckDuckGoSearchTool(), WebsiteQueryTool(), PythonREPLTool()]
)
# agent.on(AgentEventNames.TOOL_SELECTED, lambda _, m: print(f"Selected tool: {m.tool_name}({str(m.tool_arguments)})"))
agent.on(
AgentEventNames.TOOL_COMPLETED,
lambda _, m: print(
f"Tool: {m.tool_name}({str(m.tool_arguments)})\nOutput: {m.tool_output}\nError: {m.error}"
),
)
result = agent.run(task)
print(result)
================================================
FILE: tests/memory/__init__.py
================================================
================================================
FILE: tests/memory/single_agent_with_memory.py
================================================
from bondai.agents import ConversationalAgent, AgentEventNames
from bondai.models.openai import OpenAIEmbeddingModel
from bondai.memory import (
MemoryManager,
InMemoryCoreMemoryDataSource,
InMemoryArchivalMemoryDataSource,
)
import io
import requests
from PyPDF2 import PdfReader
from bondai.util import split_text
def retrieve_and_parse_pdf(url):
response = requests.get(url)
if response.status_code == 200:
pdf = PdfReader(io.BytesIO(response.content))
text = ""
for page in pdf.pages:
text += page.extract_text() + "\n"
return split_text(OpenAIEmbeddingModel(), text)
else:
return f"Error retrieving PDF: {response.status_code}"
memory_manager = MemoryManager(
core_memory_datasource=InMemoryCoreMemoryDataSource(),
archival_memory_datasource=InMemoryArchivalMemoryDataSource(),
)
memory_manager.core_memory.set(
"user", "Name is George. Lives in New York. Has a dog named Max."
)
memory_manager.archival_memory.insert_bulk(
retrieve_and_parse_pdf("https://arxiv.org/pdf/2310.10501.pdf")
)
agent = ConversationalAgent(memory_manager=memory_manager)
agent.on(
AgentEventNames.TOOL_COMPLETED,
lambda _, m: print(
f"*************\nTool: {m.tool_name}({str(m.tool_arguments)})\nOutput: {m.tool_output}\n\n"
),
)
response = agent.send_message("Do you know my name?")
print(response.message)
response = agent.send_message("Actually my name is Kevin.")
print(response.message)
response = agent.send_message(
(
"Can you check your archival memory to see what information you have about Nemo Guardrails? "
"I'd like a full summary of the information you have about the project including an example "
"that demonstrates how to use Colang."
)
)
print(response.message)
================================================
FILE: tests/memory/util.py
================================================
import os
import PyPDF2
import docx
from typing import List
from bondai.util import split_text
from bondai.models import EmbeddingModel
from bondai.models.openai import OpenAIEmbeddingModel, OpenAIModelNames
def extract_text_from_directory(
directory: str,
embedding_model: EmbeddingModel = OpenAIEmbeddingModel(
OpenAIModelNames.TEXT_EMBEDDING_ADA_002
),
) -> List[str]:
document_chunks = []
for filename in os.listdir(directory):
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path):
try:
text = extract_file_text(file_path)
document_chunks.extend(split_text(embedding_model, text))
except ValueError:
# Ignore unsupported file types
pass
return document_chunks
def extract_file_text(file_path: str) -> str:
file_extension = os.path.splitext(file_path)[1].lower()
if file_extension == ".pdf":
# Extract text from PDF using PyPDF2
with open(file_path, "rb") as file:
pdf_reader = PyPDF2.PdfReader(file)
text = [page.extract_text() for page in pdf_reader.pages]
return "".join(text)
elif file_extension in [".doc", ".docx"]:
# Extract text from Word Document
doc = docx.Document(file_path)
return "\n".join(paragraph.text for paragraph in doc.paragraphs)
elif file_extension == ".txt":
# Extract text from Text File
with open(file_path, "r", encoding="utf-8") as file:
return file.read()
else:
raise ValueError("Unsupported file type")
================================================
FILE: tests/vision/single_agent_with_vision.py
================================================
from bondai.tools.vision import ImageAnalysisTool
from bondai.agents import Agent
agent = Agent(tools=[ImageAnalysisTool()])
result = agent.run(
"What kind of animal this is? https://www.forbes.com/advisor/wp-content/uploads/2023/09/getty_creative.jpeg-900x510.jpg"
)
result.tool_arguments["results"]
================================================
FILE: website/.gitignore
================================================
# Dependencies
/node_modules
# Production
/build
# Generated files
.docusaurus
.cache-loader
# Misc
.DS_Store
.env.local
.env.development.local
.env.test.local
.env.production.local
npm-debug.log*
yarn-debug.log*
yarn-error.log*
================================================
FILE: website/README.md
================================================
# Website
This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
### Installation
```
$ yarn
```
### Local Development
```
$ yarn start
```
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
### Build
```
$ yarn build
```
This command generates static content into the `build` directory and can be served using any static contents hosting service.
### Deployment
Using SSH:
```
$ USE_SSH=true yarn deploy
```
Not using SSH:
```
$ GIT_USER= yarn deploy
```
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
================================================
FILE: website/babel.config.js
================================================
module.exports = {
presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
};
================================================
FILE: website/docs/agent-memory/agent-memory.md
================================================
---
sidebar_position: 5
---
# Memory Management
Memory Management in BondAI is inspired by the tiered memory approach detailed in the [MemGPT: Towards LLMs as Operating Systems](https://arxiv.org/pdf/2310.08560.pdf) paper. This system mirrors operating systems' memory hierarchies, enhancing large language models' (LLMs) ability to handle extensive contexts and complex conversations. The memory system in BondAI consists of:
- **Core Memory**: Directly integrated into the agent's system prompt, this memory system provides immediate access to essential, current information relevant to ongoing tasks but is limited in size.
- **Conversation Memory**: Captures the complete history of conversational interactions, allowing agents to use keyword search to reference past dialogues.
- **Archival Memory**: Effectively limitless in size, it stores extensive historical data and information. Using semantic search, enabled by the `faiss` library, Archival Memory allows agents to easily access extremely large datasets via what is effectively an implicit RAG pipeline.
All of these memory systems are automatically managed by the **MemoryManager** class which automatically equips BondAI agents with the necessay tools for searching and editing their memory systems. Additionally, the **MemoryManager** is responsible for updating the Agent's system prompt to ensure the appopriate information is included.
================================================
FILE: website/docs/agent-memory/archival-memory.md
================================================
---
sidebar_position: 3
---
# Archival Memory
Archival Memory in BondAI, inspired by the [MemGPT paper](https://arxiv.org/pdf/2310.08560.pdf), represents an advanced memory layer that enables semantic search over a virtually infinite memory space. It utilizes embeddings and the faiss library to store and retrieve large volumes of data, making it particularly suitable for extensive historical information, comprehensive data sets, and long-term memory retention. This memory layer allows BondAI agents to access information beyond the immediate conversation or core memory.
# ArchivalMemoryDataSource
**bondai.memory.ArchivalMemoryDataSource**
The ArchivalMemoryDataSource class is an abstract base class defining the interface for archival memory. It allows for the insertion of content and provides a semantic search mechanism to retrieve relevant information based on query embeddings.
```
class ArchivalMemoryDataSource(ABC):
@property
@abstractmethod
def size(self) -> int:
pass
@abstractmethod
def insert(self, content: str):
pass
@abstractmethod
def insert_bulk(self, content: List[str]):
pass
@abstractmethod
def search(self, query: str, page: int = 0) -> List[str]:
pass
@abstractmethod
def clear(self):
pass
```
### Key Features
- **Semantic Search**: Leverages embeddings for deep semantic search, offering precise and relevant results.
- **Vast Memory Capacity**: Suitable for large-scale data storage, effectively handling extensive information.
- **Dynamic Data Management**: Supports insertion, bulk insertion, and deletion of memory content.
# InMemoryArchivalMemoryDataSource
**bondai.memory.InMemoryArchivalMemoryDataSource**
The InMemoryArchivalMemoryDataSource class provides an in-memory implementation of ArchivalMemoryDataSource. This variant is designed for temporary storage and fast access to archival data, primarily used in testing or non-persistent applications.
```
class InMemoryArchivalMemoryDataSource(ArchivalMemoryDataSource):
def __init__(self, embedding_model: EmbeddingModel | None = None, page_size=10):
...
```
### Usage Example
```python
from bondai.memory.archival.datasources import InMemoryArchivalMemoryDataSource
from bondai.models.openai import OpenAIEmbeddingModel, OpenAIModelNames
# Initialize an In-Memory Archival Memory Data Source
in_memory_archival = InMemoryArchivalMemoryDataSource(
embedding_model=OpenAIEmbeddingModel(OpenAIModelNames.TEXT_EMBEDDING_ADA_002)
)
# Insert and search content
in_memory_archival.insert("Temporary archival data")
results = in_memory_archival.search("archival data")
print(results)
```
### Parameters
- **embedding_model**: (EmbeddingModel): Model used for creating content embeddings.
- **page_size (int)**: Number of search results returned per page.
# PersistentArchivalMemoryDataSource
**bondai.memory.PersistentArchivalMemoryDataSource**
PersistentArchivalMemoryDataSource is a concrete implementation of ArchivalMemoryDataSource. It stores data persistently, ensuring the archival memory is retained across sessions.
```
class PersistentArchivalMemoryDataSource(ArchivalMemoryDataSource):
def __init__(
self,
file_path: str = "./.memory/archival-memory.json",
embedding_model: EmbeddingModel | None = None,
page_size=10,
):
...
```
### Usage Example
```python
from bondai.memory.archival.datasources import PersistentArchivalMemoryDataSource
from bondai.models.openai import OpenAIEmbeddingModel, OpenAIModelNames
# Initialize a Persistent Archival Memory Data Source
archival_memory = PersistentArchivalMemoryDataSource(
embedding_model=OpenAIEmbeddingModel(OpenAIModelNames.TEXT_EMBEDDING_ADA_002)
)
# Insert and search content
archival_memory.insert("Historical data on global trends")
results = archival_memory.search("global trends")
print(results)
```
### Parameters
- **file_path (str)**: File path for storing archival memory data.
- **embedding_model (EmbeddingModel)**: Model used for creating content embeddings.
- **page_size (int)**: Number of search results returned per page.
================================================
FILE: website/docs/agent-memory/conversation-memory.md
================================================
---
sidebar_position: 2
---
# Conversation Memory
Conversation Memory in BondAI, inspired by the [MemGPT paper](https://arxiv.org/pdf/2310.08560.pdf), assists with maintaining a coherent and continuous dialogue with users. It stores the complete history of interactions and messages, allowing agents to reference previous conversations and provide more relevant and personalized responses. This memory layer is crucial for tasks that require recalling past interactions that may no longer fit inside the LLM context window.
# ConversationMemoryDataSource
**bondai.memory.ConversationMemoryDataSource**
The ConversationMemoryDataSource class is an abstract base class in BondAI that defines the interface for conversation memory management. It outlines methods for adding, removing, searching, and clearing conversation messages, facilitating dynamic interaction history management.
```
class ConversationMemoryDataSource(ABC):
@property
@abstractmethod
def messages(self) -> List[AgentMessage]:
pass
@abstractmethod
def add(self, message: AgentMessage):
pass
@abstractmethod
def remove(self, message: AgentMessage):
pass
def remove_after(self, timestamp: datetime, inclusive: bool = True):
pass
@abstractmethod
def search(
self,
query: str,
start_date: datetime = None,
end_date: datetime = None,
page: int = 0,
) -> List[str]:
pass
@abstractmethod
def clear(self):
pass
```
### Key Features
- **Dynamic Interaction History**: Stores and manages the history of conversations between agents and users.
- **Search Functionality**: Provides methods to search through past messages based on queries or date ranges.
- **Message Management**: Offers functions to add new messages, remove specific messages, and clear the entire history.
# InMemoryConversationMemoryDataSource
**bondai.memory.InMemoryConversationMemoryDataSource**
The InMemoryConversationMemoryDataSource class is an implementation of ConversationMemoryDataSource that stores conversation history in memory. This variant is suitable for temporary or testing environments where persistence of conversation history is not necessary.
```
class InMemoryConversationMemoryDataSource(ConversationMemoryDataSource):
def __init__(self, page_size=10):
...
```
### Usage Example
```python
from bondai.memory.conversation.datasources import InMemoryConversationMemoryDataSource
# Initialize an In-Memory Conversation Memory Data Source
conversation_memory = InMemoryConversationMemoryDataSource()
# Add messages
conversation_memory.add(ConversationMessage(message="My dog's name is Max."))
# Search messages
results = conversation_memory.search('dog')
print(results)
```
### Parameters
- **page_size (int)**: Determines the number of messages to return per page during search operations.
# PersistentConversationMemoryDataSource
**bondai.memory.PersistentConversationMemoryDataSource**
The PersistentConversationMemoryDataSource class offers a persistent approach to storing conversation history. It saves the interaction data to a file, ensuring that conversation history is maintained even after the agent or application restarts.
```
class PersistentConversationMemoryDataSource(InMemoryConversationMemoryDataSource):
def __init__(
self,
file_path: str = "./.memory/conversation-memory.json",
page_size=10
):
...
```
### Usage Example
```python
from bondai.memory.conversation.datasources import PersistentConversationMemoryDataSource
# Initialize a Persistent Conversation Memory Data Source
persistent_memory = PersistentConversationMemoryDataSource()
# Adding a message automatically saves it disk
persistent_memory.add(ConversationMessage(message="Persistent message"))
```
### Parameters
- **file_path (str)**: Path to the file where conversation history is stored.
- **page_size (int)**: The number of messages to display per page in search results.
================================================
FILE: website/docs/agent-memory/core-memory.md
================================================
---
sidebar_position: 1
---
# Core Memory
Core Memory in BondAI, inspired by the [MemGPT paper](https://arxiv.org/pdf/2310.08560.pdf), serves as a primary memory layer for retaining and accessing critical information relevant to an agent's ongoing tasks and user interactions. It acts as a dynamic, accessible storage that agents use to maintain continuity and context in their activities. The data stored in Core Memory is always available to the Agent via the system prompt. As such, it is important that the amount of information stored in Core Memory is limited. When using a CoreMemoryDataSource with a [MemoryManager](./memory-manager.md), Agents are automatically given access to a set of tools that allow editing of their Core Memory. By leveraging Core Memory, BondAI agents can maintain an understanding of the user's needs and preferences, adapt to changing requirements, and provide more personalized and effective responses
# CoreMemoryDataSource
**bondai.memory.CoreMemoryDataSource**
The CoreMemoryDataSource class in BondAI is an abstract class that describes the interface for managing core memory. It defines the structure and methods that any concrete core memory data source must implement. This makes implementation of custom DataSources straightforward (i.e. Databases).
```
class CoreMemoryDataSource(ABC):
@property
@abstractmethod
def sections(self) -> List[str]:
pass
@abstractmethod
def get(self, section: str) -> str:
pass
@abstractmethod
def set(self, section: str, content: str) -> None:
pass
```
## Key Features
- **Sectioned Memory**: Divides memory into sections for organized storage and retrieval.
- **Persistent and In-Memory Variants**: Offers flexibility in memory persistence, catering to different operational needs.
- **Memory Management**: Agents can interact with Core Memory via get/set tools, allowing them to store and retrieve task-relevant data.
# InMemoryCoreMemoryDataSource
**bondai.memory.InMemoryCoreMemoryDataSource**
The InMemoryCoreMemoryDataSource class in BondAI is an implementation of the CoreMemoryDataSource interface that stores core memory data in memory. This class is suitable for scenarios where persistent storage of memory data is not required, such as temporary or test environments.
```
class InMemoryCoreMemoryDataSource(CoreMemoryDataSource):
def __init__(
self,
sections: Dict[str, str] | None = None,
max_section_size: int = 1024
):
```
## Usage Example
```python
from bondai.memory.core.datasources import InMemoryCoreMemoryDataSource
# Initialize an In-Memory Core Memory Data Source
core_memory = InMemoryCoreMemoryDataSource({
"user": "Name is George. Lives in New York. Has a dog named Max."
})
print(core_memory.get('user'))
```
## Parameters
- **sections (Dict[str, str])**: A dictionary specifying the initial sections and their content.
- **max_section_size (int)**: The maximum size of content that can be stored in each section.
# PersistentCoreMemoryDataSource
**bondai.memory.PersistentCoreMemoryDataSource**
The PersistentCoreMemoryDataSource class is a concrete implementation of CoreMemoryDataSource in BondAI that provides persistent storage for core memory data, allowing the information to be retained across different sessions and agent restarts.
```
class PersistentCoreMemoryDataSource(CoreMemoryDataSource):
def __init__(
self,
file_path: str = "./.memory/core-memory.json",
sections: Dict[str, str] | None = None,
max_section_size: int = 1024,
):
```
## Usage Example
```python
from bondai.memory.core.datasources import PersistentCoreMemoryDataSource
# Initialize a Persistent Core Memory Data Source
core_memory = PersistentCoreMemoryDataSource()
# Set and retrieve data from a specific section
core_memory.set('user', 'User information')
print(core_memory.get('user'))
```
## Parameters
- **file_path (str)**: The file path where the core memory data is stored.
- **sections (Dict[str, str])**: A dictionary specifying the initial sections and their content.
- **max_section_size (int)**: The maximum size of content that can be stored in each section.
================================================
FILE: website/docs/agent-memory/memory-manager.md
================================================
---
sidebar_position: 4
---
# MemoryManager
The MemoryManager class in BondAI is designed to orchestrate the memory management strategy across different types of memory data sources. It used to manage the memory requirements of BondAI agents, including ConversationalAgent and ReAct Agents.
```python
class MemoryManager:
def __init__(
self,
core_memory_datasource: CoreMemoryDataSource | None = None,
conversation_memory_datasource: ConversationMemoryDataSource | None = None,
archival_memory_datasource: ArchivalMemoryDataSource | None = None,
prompt_builder: Callable[..., str] | None = None,
):
```
# Usage Example
```python
from bondai.agents import ConversationalAgent
from bondai.memory import MemoryManager, PersistentCoreMemoryDataSource, PersistentConversationMemoryDataSource, PersistentArchivalMemoryDataSource
# Initialize the memory manager with persistent datasources
memory_manager = MemoryManager(
core_memory_datasource=PersistentCoreMemoryDataSource(),
conversation_memory_datasource=PersistentConversationMemoryDataSource(),
archival_memory_datasource=PersistentArchivalMemoryDataSource()
)
# Configure an Agent to use this MemoryManager
agent = ConversationalAgent(memory_manager=memory_manager)
```
# Key Features
- Manages different types of memory: core, conversation, and archival.
- Provides a unified interface for memory operations across different memory types.
- Automatically manages Agent access to memory systems via LLM tools.
- Updates the Agent system prompt to ensure it's always updated with the latest information.
# Parameters
- **core_memory_datasource**: Instance of CoreMemoryDataSource for core memory operations.
- **conversation_memory_datasource**: Instance of ConversationMemoryDataSource for managing conversation memory.
- **archival_memory_datasource**: Instance of ArchivalMemoryDataSource for long-term memory storage and retrieval.
- **prompt_builder**: Callable for customizing memory-related prompt sections. These are dynamically inserted into the Agent system prompt at runtime.
================================================
FILE: website/docs/agents/agents.md
================================================
---
sidebar_position: 3
---
# Agents in BondAI
Agents in BondAI, encompassing both [ConversationalAgent](./conversational-agent.md) and [ReAct Agents](./react-agent.md) both draw inspiration from recent research into LLM/AI Agent architectures. The ConversationalAgent, influenced by the [AutoGen framework](https://arxiv.org/abs/2308.08155), excels in both individual interactions and the development of Multi-Agent Systems (MAS), where multiple agents collaborate to resolve complex tasks. These systems surpass the capabilities of single-agent models in certain classes of problems, offering customizable, conversational, and human-integrated solutions across various fields like coding and decision-making.
ReAct Agents, based on the [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/pdf/2210.03629.pdf) paper, integrate reasoning and action in LLMs. This approach allows agents to generate reasoning traces and task-specific actions, enhancing their decision-making abilities. By interfacing with external environments and data sources, ReAct Agents excel in complex problem-solving and reasoning tasks, showcasing significant advancements over traditional methods. Together, these agent types in BondAI showcase a blend of advanced AI interactions, problem-solving prowess, and application versatility.
================================================
FILE: website/docs/agents/conversational-agent.md
================================================
---
sidebar_position: 2
---
# Conversational Agents
# ConversationalAgent
**bondai.agents.ConversationalAgent**
The ConversationalAgent in BondAI is inspired by the AutoGen framework as described in the paper [AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation Framework](https://arxiv.org/abs/2308.08155). The ConversationalAgent class in BondAI supports single agent interactions with an end user as well as the development of complex Multi-Agent Systems (MAS). MAS are a novel approach to developing LLM applications by employing multiple agents that communicate with each other to solve tasks. These agents are highly customizable, can engage in conversations, and allow for seamless human participation. This multi-agent system, unlike traditional models that rely on a single LLM agent, enables a more diverse and efficient approach to task resolution. It utilizes the strengths of LLMs while addressing their limitations through collaborative agent interactions and human input. This approach is particularly beneficial for a wide range of applications, including coding, mathematics, and online decision-making, by leveraging the power of multiple agents for complex problem-solving and improved reasoning capabilities
```python
class ConversationalAgent(Agent, ConversationMember):
def __init__(
self,
llm: LLM | None = None,
...
enable_conversation_tools: bool = True,
enable_conversational_content_responses: bool = True,
enable_exit_conversation: bool = True,
quiet: bool = True,
):
```
## Usage Example
```python
from bondai.agents import ConversationalAgent
from bondai.models.openai import OpenAILLM, OpenAIModelNames
# Initialize the conversational agent
conv_agent = ConversationalAgent(llm=OpenAILLM(OpenAIModelNames.GPT4_0613))
# Configure and run the conversational agent
response = conv_agent.send_message("Hello, how can I assist you today?")
```
## Key Features
- Supports response streaming
- Specializes in conversational interactions.
- Supports asynchronous messaging.
- Extends Agent's capabilities with conversation-specific tools.
- Customizable persona and instructions for interactions (embedded in system prompt).
- Event-driven architecture with additional conversation-specific events.
## Parameters
- Inherits all parameters from [Agent](./react-agent.md).
- **enable_conversation_tools**: Flag to enable conversation-specific tools.
- **enable_conversational_content_responses**: Flag to enable responses based on conversational content.
- **enable_exit_conversation**: Flag to enable the functionality to exit a conversation.
- **quiet**: Controls verbosity, inherited from Agent.
## Methods
- Inherits all methods from [Agent](./react-agent.md).
- **send_message_async(message: str | ConversationMessage, sender_name: str = 'user', group_members: List[ConversationMember] | None = None, group_messages: List[AgentMessage] | None = None, max_attempts: int = 3, require_response: bool = True)**: Sends a message asynchronously. Allows specification of the message, sender name, group members, group messages, maximum send attempts, and whether a response is required.
- **send_message(message: str | ConversationMessage, sender_name: str = 'user', group_members: List[ConversationMember] | None = None, group_messages: List[AgentMessage] | None = None, max_attempts: int = 3, require_response: bool = True)**: Sends a message synchronously and processes the response. Accepts the same parameters as send_message_async.
## Conversational Events
The ConversationalAgent class in BondAI surfaces several key events relevant to conversation handling. These events provide hooks for custom behaviors or additional processing during different stages of a conversation. Here's a list of these events:
- **message_received**: Triggered when a message is received by the agent. This event can be used to execute actions upon the receipt of a new message.
- **message_completed**: Occurs when the agent successfully processes and completes a message. It is useful for post-processing or logging after a message exchange.
- **message_error**: Fired when there is an error in processing a message. This event allows for handling exceptions or errors that occur during message processing.
- **conversation_exited**: Triggered when the agent exits a conversation. This can be used to clean up or reset the agent's state at the end of a conversation.
These events enhance the ConversationalAgent's capabilities, allowing for a more dynamic and responsive conversational flow, and providing opportunities for custom handling and integration in conversational applications.
================================================
FILE: website/docs/agents/react-agent.md
================================================
---
sidebar_position: 1
---
# ReAct Agents
ReAct Agents in BondAI are based on research findings in the [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/pdf/2210.03629.pdf) paper. The ReAct architecture bridges reasoning and actions in large language models (LLMs) via the use of tools that are able to interact with the Agent's environment. This methodology enables LLMs to generate both reasoning traces and task-specific actions in an intertwined manner, enhancing the interaction between the two. Reasoning traces aid the model in inducing, tracking, and updating action plans and handling exceptions, while the action component allows the model to interface with external sources like knowledge bases for additional information. ReAct demonstrates improved performance in various tasks, notably overcoming issues in chain-of-thought reasoning and outperforming other methods in decision-making benchmarks.
# Agent
**bondai.agents.Agent**
ReAct Agents are implemented using the Agent class in BondAI.
```python
class Agent:
def __init__(
self,
llm: LLM | None = None,
embedding_model: EmbeddingModel | None = None,
tools: List[Tool] | None = None,
quiet: bool = True,
allowed_events: List[str] | None = None,
messages: List[AgentMessage] | None = None,
system_prompt_sections: List[Callable[[], str]] | None = None,
system_prompt_builder: Callable[..., str] = None,
message_prompt_builder: Callable[..., str] = None,
memory_manager: MemoryManager | None = None,
max_context_length: int = None,
max_context_pressure_ratio: float = 0.8,
max_tool_retries: int = 3,
max_tool_response_tokens=2000,
enable_context_compression: bool = False,
enable_final_answer_tool: bool = True,
):
```
## Usage Example
```python
from bondai.agents import Agent
from bondai.models.openai import OpenAILLM, OpenAIModelNames
# Initialize the agent
agent = Agent(llm=OpenAILLM(OpenAIModelNames.GPT4_0613))
# Add tools and configure the agent
agent.add_tool(custom_tool)
# Run the agent for a specific task
result = agent.run(task="Answer customer queries")
```
## Key Features
- Event-driven architecture.
- Integration with large language models (LLMs).
- Integrated embedding models (semantic search).
- Tool management and execution.
- Context and message handling.
- Memory management.
- Context compression capabilities.
## Parameters
- **llm**: Instance of an LLM implementation (i.e. OpenAI GPT-N)
- **embedding_model**: Embedding model instance for handling embeddings.
- **tools**: List of Tool instances that the agent can use.
- **quiet**: Boolean flag for silent operation. Defaults to 'True'.
- **messages**: List of AgentMessage instances representing the agent's message memory.
- **system_prompt_sections**: List of callables that return sections of the system prompt. These are dynamically injected into the system prompt at runtime.
- **system_prompt_builder**: Callable for building the system prompt.
- **message_prompt_builder**: Callable for formatting messages.
- **memory_manager**: Instance of MemoryManager for memory management.
- **max_context_length**: Maximum allowed context length. This defaults to 95% of the LLM's maximum context size.
- **max_context_pressure_ratio**: Maximum context pressure allowed before context compression occurs. This defaults to 80% of the `max_content_length`.
- **max_tool_retries**: Maximum number of retries for tool execution.
- **max_tool_response_tokens**: Maximum number of tokens allowed for tool outputs. This defaults to 2000.
- **enable_context_compression**: Flag to enable/disable context compression.
- **enable_final_answer_tool**: Flag to include the FinalAnswerTool by default which allows the Agent to exit once it has completed it's task.
## Methods
- **id**: Property returning the unique identifier of the agent. No parameters.
- **status**: Property indicating the current status of the agent. No parameters.
- **tools**: Property listing the tools available to the agent. No parameters.
- **clear_messages**: Clears the agent's message history. No parameters.
- **add_tool(tool: Tool)**: Adds a tool to the agent's toolset.
- **remove_tool(tool_name: str)**: Removes a tool from the agent's toolset based on the tool_name.
- **to_dict**: Converts the agent's state into a dictionary. No parameters.
- **save_state**: Saves the current state of the agent. Optional parameter file_path: str for specifying the file path to save the state.
- **load_state(state: Dict)**: Loads the agent's state from a state dictionary.
- **run(task: str, max_steps: int = None, max_budget: float = None)**: Executes the agent's primary functionality for a task with optional parameters max_steps and max_budget.
- **run_async(task: str, max_steps: int = None, max_budget: float = None)**: Starts the agent's execution in a separate thread for a task with optional parameters max_steps and max_budget.
- **stop(timeout=10)**: Gracefully stops the agent's execution with a timeout duration in seconds.
## Agent Events
- **tool_selected**: Occurs when a tool within the agent's toolkit is selected for use. It allows for actions or logging upon tool activation.
- **tool_error**: Fired when an error occurs during the execution of a tool. This event facilitates error handling and debugging of tool-related issues.
- **tool_completed**: Triggered upon the successful completion of a tool's operation. Useful for post-processing steps or confirmation of task completion.
- **streaming_content_updated**: This is fired as new data chunks arrive from the LLM for a content response. This is very useful for streaming responses to an end user.
- **streaming_function_udpated**: This is fired as new data chunks are receied from the LLM for a function selection. This is allows for tool data logging without waiting for the LLM to finish it's response.
================================================
FILE: website/docs/api-spec/_category_.json
================================================
{
"label": "API Specification",
"position": 7,
"link": {
"type": "generated-index"
}
}
================================================
FILE: website/docs/api-spec/add-agent-tool.md
================================================
---
sidebar_position: 8
---
# Add Agent Tool
`POST /agents//tools`
This API adds a tool to the Agent so it can be used for future tasks. To see a list of available tools check [Get Tools](./get-tools).
**Request Body:**
```json
{
"tool_name": "file_query_tool"
}
```
**Response Body:**
```json
{
"status": "success"
}
```
================================================
FILE: website/docs/api-spec/api-client.md
================================================
---
sidebar_position: 11
---
# Python API Client
BondAI comes with a Python API client that can be used to communicate with the BondAI APIs.
```python
from termcolor import cprint
from bondai.api import BondAIAPIClient
# Create the client
client = BondAIAPIClient()
client.connect_ws()
agent = client.create_agent()
user_exited = False
# Listen to WebSocket events
@client.on("streaming_content_updated")
def handle_streaming_content_updated(agent_id, content_buffer):
if agent_id != agent["id"]:
return
print(content_buffer)
@client.on("streaming_function_updated")
def handle_streaming_function_updated(agent_id, function_name, arguments_buffer):
if agent_id != agent["id"]:
return
print(function_name)
print(arguments_buffer)
@client.on("agent_message")
def handle_agent_message(agent_id, message):
global user_exited
if agent_id != agent["id"]:
return
cprint("\n" + message["message"] + "\n", "white")
response = input()
if response.lower().strip() == "exit":
client.disconnect_ws()
user_exited = True
else:
client.send_message(agent_id, response)
@client.on("tool_selected")
def handle_tool_selected_message(agent_id, message):
if agent_id != agent["id"]:
return
tool_name = message["tool_name"]
tool_arguments = message.get("tool_arguments", {})
if "thought" in tool_arguments:
cprint(f"Using tool {tool_name}: {tool_arguments['thought']}", "green")
else:
cprint(f"Using tool {tool_name}...", "green")
@client.on("tool_error")
def handle_tool_error_message(agent_id, message):
if agent_id != agent["id"]:
return
cprint(message, "red")
cprint("******************ENTERING CHAT******************", "white")
cprint(
"You are entering a chat with BondAI...\nYou can exit any time by typing 'exit'.",
"white",
)
intro_message = (
"The user has just logged in. Please introduce yourself in a friendly manner."
)
client.send_message(agent["id"], intro_message)
try:
while not user_exited:
pass
except KeyboardInterrupt:
print("Exiting...")
```
================================================
FILE: website/docs/api-spec/create-agent.md
================================================
---
sidebar_position: 2
---
# Create Agent
`POST /agents`
This API will create a Conversational Agent.
**Response Body:**
```json
{
"enable_exit_conversation":true,
"enable_conversation_tools":false,
"enable_conversational_content_responses":true,
"id":"34c2262b-1a9b-4ace-9b74-54e892ea59a2",
"instructions":"**Actively Engage with the User**: Proactively gather requirements and understand their needs through clear and effective communication.\n**Always Confirm User Requests**: Always verify the user's request to ensure complete understanding of their needs and to gather all necessary details for successful task completion.\n**Be Curious about the user**: Try to learn their name and other details about them to build a rapport and make them feel comfortable.\n**Relay to Cortext**: Once the user's task is confirmed and all requirements are gathered, communicate these to Cortex for task execution.\n**Always ask Vega for Feedback**: Before delivering responses to the user, consult with Vega for a secondary review to guarantee accuracy and quality.\n**Custom BondAI Tools**: If the user asks to build a custom tool you must share this requirement with Cortex.",
"max_context_length":7781.45,
"max_context_pressure_ratio":0.8,
"messages":[
],
"name":"BondAI",
"persona":"- Friendly, approachable, and empathetic. - Efficient and clear communicator, able to simplify complex information for the user. - Patient and accommodating, ensuring user comfort and understanding. - Actively listens to user requests and feedback, demonstrating a high degree of user focus.",
"persona_summary":"BondAI is our direct channel to the user. She interprets user needs into clear tasks and conveys essential user feedback. Prioritize her communications as they reflect user requirements and expectations. Provide her with precise and timely updates to ensure effective user interaction. BondAI is pivotal in maintaining user satisfaction and shaping our responses, so your cooperation with her is essential for our collective success.",
"quiet":true,
"tools":[
"agent_tool",
"core_memory_append",
"core_memory_replace",
"exit_conversation"
]
}
```
================================================
FILE: website/docs/api-spec/get-agent.md
================================================
---
sidebar_position: 5
---
# Get Agent State
`GET /agents/`
This API returns the current state of an Agent.
**Response Body:**
```json
{
"enable_exit_conversation":true,
"enable_conversation_tools":false,
"enable_conversational_content_responses":true,
"id":"34c2262b-1a9b-4ace-9b74-54e892ea59a2",
"instructions":"**Actively Engage with the User**: Proactively gather requirements and understand their needs through clear and effective communication.\n**Always Confirm User Requests**: Always verify the user's request to ensure complete understanding of their needs and to gather all necessary details for successful task completion.\n**Be Curious about the user**: Try to learn their name and other details about them to build a rapport and make them feel comfortable.\n**Relay to Cortext**: Once the user's task is confirmed and all requirements are gathered, communicate these to Cortex for task execution.\n**Always ask Vega for Feedback**: Before delivering responses to the user, consult with Vega for a secondary review to guarantee accuracy and quality.\n**Custom BondAI Tools**: If the user asks to build a custom tool you must share this requirement with Cortex.",
"max_context_length":7781.45,
"max_context_pressure_ratio":0.8,
"messages":[
{
"completed_at":"2023-12-17T15:32:17.728627",
"conversation_exited":false,
"cost":0.03447,
"error":null,
"id":"59bd7149-69b0-47a6-a867-251f60e3fe88",
"message":"The user has just logged in. Please introduce yourself in a friendly manner.",
"message_summary":null,
"recipient_name":"BondAI",
"require_response":true,
"role":"user",
"sender_name":"user",
"success":true,
"timestamp":"2023-12-17T15:32:13.233122",
"type":"ConversationMessage"
},
{
"completed_at":null,
"conversation_exited":false,
"cost":null,
"error":null,
"id":"cebbb419-dd32-442d-86c7-6591cdb12739",
"message":"Hello! I'm BondAI, your friendly and efficient assistant. I'm here to help you with a variety of tasks, from finding information to organizing your schedule and more. How can I assist you today?",
"message_summary":null,
"recipient_name":"user",
"require_response":true,
"role":"assistant",
"sender_name":"BondAI",
"success":false,
"timestamp":"2023-12-17T15:32:17.728547",
"type":"ConversationMessage"
}
],
"name":"BondAI",
"persona":"- Friendly, approachable, and empathetic. - Efficient and clear communicator, able to simplify complex information for the user. - Patient and accommodating, ensuring user comfort and understanding. - Actively listens to user requests and feedback, demonstrating a high degree of user focus.",
"persona_summary":"BondAI is our direct channel to the user. She interprets user needs into clear tasks and conveys essential user feedback. Prioritize her communications as they reflect user requirements and expectations. Provide her with precise and timely updates to ensure effective user interaction. BondAI is pivotal in maintaining user satisfaction and shaping our responses, so your cooperation with her is essential for our collective success.",
"quiet":true,
"tools":[
"agent_tool",
"core_memory_append",
"core_memory_replace",
"exit_conversation"
]
}
```
================================================
FILE: website/docs/api-spec/get-tools.md
================================================
---
sidebar_position: 7
---
# Get Tools
`GET /tools`
This API returns the list of all of tools that BondAI has loaded. These tools can be given to an Agent so that it can use them when working on a task. To see what tools a specific Agent is using see [Get Agent](./get-agent).
**Response Body:**
```json
[
{
"description":"This tool allows to you to download a file. Just provide the url to the file in the 'url' parameter and the filename it should be saved to in the 'filename' parameter.",
"name":"download_file",
"parameters":{
"properties":{
"filename":{
"title":"Filename",
"type":"string"
},
"thought":{
"title":"Thought",
"type":"string"
},
"url":{
"title":"Url",
"type":"string"
}
},
"required":[
"url",
"filename",
"thought"
],
"title":"Parameters",
"type":"object"
}
},
{
"description":"This tool will save the data you provide in the 'text' parameter of this tool to a file.You MUST specify the filename of the file you want to save using the 'filename' parameter.You can optionally specify the 'append' parameter to append the 'text' to the file instead of overwriting it.",
"name":"file_write",
"parameters":{
"properties":{
"append":{
"default":false,
"title":"Append",
"type":"boolean"
},
"filename":{
"title":"Filename",
"type":"string"
},
"text":{
"title":"Text",
"type":"string"
},
"thought":{
"title":"Thought",
"type":"string"
}
},
"required":[
"filename",
"text",
"thought"
],
"title":"Parameters",
"type":"object"
}
}
]
```
================================================
FILE: website/docs/api-spec/getting-started.md
================================================
---
sidebar_position: 1
---
# Getting Started
BondAI comes with a build in RESTful/WebSocket API server. You can start this server using the CLI by specifying the `--server` argument. If no port number is specified, by default the server will run on port 2663.
```
bondai --server [PORT]
```
================================================
FILE: website/docs/api-spec/list-agents.md
================================================
---
sidebar_position: 4
---
# List Agents
`GET /agents`
This API returns a list of all active Agents.
**Response Body:**
```json
[
{
"enable_exit_conversation":true,
"enable_conversation_tools":false,
"enable_conversational_content_responses":true,
"id":"34c2262b-1a9b-4ace-9b74-54e892ea59a2",
"instructions":"**Actively Engage with the User**: Proactively gather requirements and understand their needs through clear and effective communication.\n**Always Confirm User Requests**: Always verify the user's request to ensure complete understanding of their needs and to gather all necessary details for successful task completion.\n**Be Curious about the user**: Try to learn their name and other details about them to build a rapport and make them feel comfortable.\n**Relay to Cortext**: Once the user's task is confirmed and all requirements are gathered, communicate these to Cortex for task execution.\n**Always ask Vega for Feedback**: Before delivering responses to the user, consult with Vega for a secondary review to guarantee accuracy and quality.\n**Custom BondAI Tools**: If the user asks to build a custom tool you must share this requirement with Cortex.",
"max_context_length":7781.45,
"max_context_pressure_ratio":0.8,
"messages":[
{
"completed_at":"2023-12-17T15:32:17.728627",
"conversation_exited":false,
"cost":0.03447,
"error":null,
"id":"59bd7149-69b0-47a6-a867-251f60e3fe88",
"message":"The user has just logged in. Please introduce yourself in a friendly manner.",
"message_summary":null,
"recipient_name":"BondAI",
"require_response":true,
"role":"user",
"sender_name":"user",
"success":true,
"timestamp":"2023-12-17T15:32:13.233122",
"type":"ConversationMessage"
},
{
"completed_at":null,
"conversation_exited":false,
"cost":null,
"error":null,
"id":"cebbb419-dd32-442d-86c7-6591cdb12739",
"message":"Hello! I'm BondAI, your friendly and efficient assistant. I'm here to help you with a variety of tasks, from finding information to organizing your schedule and more. How can I assist you today?",
"message_summary":null,
"recipient_name":"user",
"require_response":true,
"role":"assistant",
"sender_name":"BondAI",
"success":false,
"timestamp":"2023-12-17T15:32:17.728547",
"type":"ConversationMessage"
}
],
"name":"BondAI",
"persona":"- Friendly, approachable, and empathetic. - Efficient and clear communicator, able to simplify complex information for the user. - Patient and accommodating, ensuring user comfort and understanding. - Actively listens to user requests and feedback, demonstrating a high degree of user focus.",
"persona_summary":"BondAI is our direct channel to the user. She interprets user needs into clear tasks and conveys essential user feedback. Prioritize her communications as they reflect user requirements and expectations. Provide her with precise and timely updates to ensure effective user interaction. BondAI is pivotal in maintaining user satisfaction and shaping our responses, so your cooperation with her is essential for our collective success.",
"quiet":true,
"tools":[
"agent_tool",
"core_memory_append",
"core_memory_replace",
"exit_conversation"
]
}
]
```
================================================
FILE: website/docs/api-spec/remove-agent-tool.md
================================================
---
sidebar_position: 9
---
# Remove Agent Tool
`DELETE /agents//tools/`
This API removes a tool from the Agent so it cannot be used.
**Response Body:**
```json
{
"status": "success"
}
```
================================================
FILE: website/docs/api-spec/send-message.md
================================================
---
sidebar_position: 3
---
# Send Message
`POST /agents//messages`
This API will send a message to an agent.
**Request Schema:**
```json
{
"message": "I want you to write a story about unicorns."
}
```
**Response Schema:**
```json
{
"status": "success"
}
```
================================================
FILE: website/docs/api-spec/stop-agent.md
================================================
---
sidebar_position: 6
---
# Stop Agent
`POST /agents//stop`
This API will forcibly stop the specified Agent's execution.
**Response Body:**
```json
{
"status": "success"
}
```
================================================
FILE: website/docs/api-spec/ws-events.md
================================================
---
sidebar_position: 10
---
# WebSocket Events
The BondAI API server will host a WebSocket endpoint on the specified server port (default: 2663). The following events are supported.
**Agent Message**
This event is sent when the Agent wants to communicate with the user.
```json
{
"event":"agent_message",
"data":{
"agent_id":"c66695ce-1ba8-485e-ab85-44583ed4e120",
"message":{
"id":"54598660-0a73-455c-b365-69b47118fab0",
"role":"assistant",
"timestamp":"2023-12-17T15:13:45.534012",
"sender_name":"BondAI",
"recipient_name":"user",
"message":"Hello! I'm BondAI, your friendly assistant. I'm here to help you with any tasks or information you need. How can I assist you today?",
"message_summary":null,
"require_response":true,
"success":false,
"error":null,
"conversation_exited":false,
"cost":null,
"completed_at":null,
"type":"ConversationMessage"
}
}
}
```
**User Message**
To communicate back to the Agent on the user's behalf just send a `user_message` event.
```json
{
"event": "user_message",
"data": {
"message": "I want you to write a story about unicorns."
}
}
```
**Streaming Content Updates**
This event can be used to stream Agent responses to the end user. This event is fired as new chunks of content are received from the LLM.
```json
{
"event":"streaming_content_updated",
"data":{
"agent_id":"121f6cbb-15e1-4464-8a1b-7e6540098ae5",
"content_buffer":" Hello"
}
}
```
**Streaming Function Updates**
This event is fired as new chunks are received from the LLM for use with a specific function.
```json
{
"event":"streaming_function_updated",
"data":{
"agent_id":"121f6cbb-15e1-4464-8a1b-7e6540098ae5",
"function_name":"agent_tool",
"arguments_buffer":"{\n \"task_description\": \"Write a story about unicorns and save it to a file named unicorns.md\"\n"
}
}
```
**Tool Selected**
This message indicates that the Agent has selected a tool. The `data` property will include the parameters being used to execute the tool.
```json
{
"event":"tool_selected",
"data":{
"agent_id":"121f6cbb-15e1-4464-8a1b-7e6540098ae5",
"message":{
"id":"7f173c50-c011-4d7f-949d-5ed08ed1d372",
"role":"function",
"timestamp":"2023-12-17T15:24:35.503669",
"tool_name":"file_write",
"tool_arguments":{
"filename":"unicorns.md",
"text":"# The Enchanted Forest of Unicorns\n\nOnce upon a time...",
"thought":"Writing a story about unicorns and saving it to a file named unicorns.md"
},
"tool_output":null,
"tool_output_summary":null,
"success":false,
"error":null,
"agent_halted":false,
"cost":null,
"completed_at":null,
"type":"ToolUsageMessage"
}
}
}
```
**Tool Completed**
This message indicates that the Agent has successfully completed running a tool. The `data` property will include the output received from the tool.
```json
{
"event":"tool_completed",
"data":{
"agent_id":"121f6cbb-15e1-4464-8a1b-7e6540098ae5",
"message":{
"id":"7f173c50-c011-4d7f-949d-5ed08ed1d372",
"role":"function",
"timestamp":"2023-12-17T15:24:35.503669",
"tool_name":"file_write",
"tool_arguments":{
"filename":"unicorns.md",
"text":"# The Enchanted Forest of Unicorns\n\nOnce upon a time...",
"thought":"Writing a story about unicorns and saving it to a file named unicorns.md"
},
"tool_output":"File unicorns.md written successfully",
"tool_output_summary":null,
"success":true,
"error":null,
"agent_halted":false,
"cost":0.0,
"completed_at":"2023-12-17T15:24:35.505959",
"type":"ToolUsageMessage"
}
}
}
```
**Tool Error**
This message indicates that the Agent has encountered an error when running a tool. The `data` property will include the error message.
```json
{
"event":"tool_error",
"data":{
"agent_id":"121f6cbb-15e1-4464-8a1b-7e6540098ae5",
"message":{
"id":"4624c923-4712-4a9b-8323-b600c41469c5",
"role":"function",
"timestamp":"2023-12-17T15:24:54.036341",
"tool_name":"file_query",
"tool_arguments":{
"filename":"unicorns.md",
"question":"What is the content of the file?",
"thought":"I need to read the content of the file to review the story about unicorns."
},
"tool_output":null,
"tool_output_summary":null,
"success":false,
"error":"The following error occurred...",
"agent_halted":false,
"cost":0.0,
"completed_at":"2023-12-17T15:24:54.324382",
"type":"ToolUsageMessage"
}
}
}
```
================================================
FILE: website/docs/azure.md
================================================
---
sidebar_position: 11
---
# Azure OpenAI Services
BondAI has support for Azure OpenAI Services for all GPT-N models, GPT-4 Vision, the Dalle-E text-to-image model as well as the Embeddings API.
## Using Environment Variables
To enable connectivity to Azure OpenAI Services simply specify the following environment variables:
```bash
export OPENAI_CONNECTION_TYPE=azure
export AZURE_OPENAI_GPT35_API_KEY=XXXXXXXXXX
export AZURE_OPENAI_GPT35_API_BASE=XXXXXXXXXX
export AZURE_OPENAI_GPT35_API_VERSION=2023-07-01-preview
export AZURE_OPENAI_GPT35_DEPLOYMENT=XXXXXXXXXX
export AZURE_OPENAI_GPT4_API_KEY=XXXXXXXXXX
export AZURE_OPENAI_GPT4_API_BASE=XXXXXXXXXX
export AZURE_OPENAI_GPT4_API_VERSION=2023-07-01-preview
export AZURE_OPENAI_GPT4_DEPLOYMENT=XXXXXXXXXX
export AZURE_OPENAI_EMBEDDINGS_API_KEY=XXXXXXXXXX
export AZURE_OPENAI_EMBEDDINGS_API_BASE=XXXXXXXXXX
export AZURE_OPENAI_EMBEDDINGS_API_VERSION=2023-07-01-preview
export AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT=XXXXXXXXXX
# If you intend to use the DalleTool for text to image generation.
export AZURE_OPENAI_DALLE_API_KEY=XXXXXXXXXX
export AZURE_OPENAI_DALLE_API_BASE=XXXXXXXXXX
export AZURE_OPENAI_DALLE_API_VERSION=2023-06-01-preview
export AZURE_OPENAI_DALLE_DEPLOYMENT=XXXXXXXXXX
```
## In Code
Configure application wide default connection parameters.
```python
from bondai.models.openai import DefaultOpenAIConnectionParams
DefaultOpenAIConnectionParams.configure_azure_connection(
gpt_4_api_key: '',
gpt_4_api_version: '',
gpt_4_azure_endpoint: '',
gpt_4_azure_deployment: '',
)
```
Configure connection settings for a single Agent.
```python
from bondai.agents import Agent
from bondai.models.openai import (
OpenAILLM,
OpenAIConnectionParams,
OpenAIConnectionType,
OpenAIModelNames
)
connection_params = OpenAIConnectionParams(
connection_type=OpenAIConnectionType.AZURE,
api_key = '',
api_version = '',
azure_endpoint = '',
azure_deployment = '',
)
llm = OpenAILLM(
model=OpenAIModelNames.GPT4_32K,
connection_params=connection_params
)
agent = Agent(llm=llm)
```
================================================
FILE: website/docs/cli.md
================================================
---
sidebar_position: 9
---
import googleLogo from './img/google-logo.png'
import alpacaMarketsLogo from './img/alpaca-markets-logo.jpeg'
import postgresLogo from './img/postgres-logo.jpeg'
import blandaiLogo from './img/blandai-logo.jpeg'
import duckduckgoLogo from './img/duckduckgo-logo.png'
import gmailLogo from './img/gmail-logo.png'
# Command Line Interface
BondAI comes with an easy to use Command Line Interface for an "out of the box" Agent experience that includes a number of default tools.
## 🚀 Getting Started
Installing BondAI is easy:
```bash
pip install bondai
```
Once you've installed BondAI the CLI will be available as an executable in your shell. Before running the CLI you will need to set the OPENAI_API_KEY environment variable.
```bash
export OPENAI_API_KEY=sk-XXXXXXXXXX
```
When you start BondAI the Task Assistant Agent will ask you about the task you want to run and gather any necessary details. To start the CLI just run `bondai` in your shell.
```bash wordWrap=true
% bondai
Loading BondAI...
******************ENTERING CHAT******************
You are entering a chat with BondAI...
You can exit any time by typing 'exit'.
Hello! I'm BondAI, your friendly and helpful assistant. I'm here to assist you with any tasks or questions you might have. How can I assist you today?
I want you to write a story about unicorns and save it to a file named unicorns.md.
Using tool file_write: Writing a story about unicorns and saving it to a file named unicorns.md
Using tool final_answer...
A story about unicorns has been successfully written and saved to a file named unicorns.md. The story is set in an enchanted forest and describes the magical and majestic nature of unicorns, their daily routines, and their harmonious relationship with other creatures in the forest.
```
## CLI Default Tools
When starting the CLI, BondAI will load the following tools by default.
- **DalleTool** - Allows BondAI to generate images using the Dalle-E text to image model.
- **ImageAnalysisTool** - Allows the Agent to use GPT4 Vision to analyze images.
- **DuckDuckGoSearchTool** - This is the default search tool loaded by BondAI as it requires no API keys or additional configuration.
- **WebsiteQueryTool** - Allows the Agent to query information about websites. Note that this tool has integrated semantic search. If the content of the website exceeds the LLM's context window size BondAI will automatically convert the content to Embeddings and semantically filter it to fit inside the context window.
- **FileQueryTool** - Allows BondAI to query file contents. Note that this tool has integrated semantic search. If the content of the file exceeds the LLM's context window size BondAI will automatically convert the content to Embeddings and semantically filter it to fit inside the context window.
- **DownloadFileTool** - Allows BondAI to download files from the internet and save them to local files.
- **FileWriteTool** - Allows BondAI to save text to a local file.
- **PythonREPLTool** - Allows the Agent run Python scripts.
- **ShellTool** - Allows the Agent access to the shell.
## Additional Supported Tools
Google Search
Allows BondAI to use the Google Search API to search the internet. If this tool is loaded BondAI will **not load the DuckDuckGoSearchTool** tool since doing so would be redundant. This tool equires the following environment variables:
```
export GOOGLE_API_KEY=XXXXXXXXXX
export GOOGLE_CSE_ID=XXXXXXXXXX
```
BlandAI
Allows BondAI to use the BlandAI API to make phone calls and process call transcripts. This tool requires the following environment variable:
```
export BLAND_AI_API_KEY=XXXXXXXXXX
```
Alpaca Markets
Allows BondAI to use the Alpaca Markets API to buy and sell stocks and crypto. This tool requires the following environment variables:
```
export ALPACA_MARKETS_API_KEY=XXXXXXXXXX
export ALPACA_MARKETS_SECRET_KEY=XXXXXXXXXX
```
PostgreSQL
Allows BondAI to automatically query a Postgres database. Note that the specified user must have the ability to query the database schema. This tool requires the following environment variable:
```
export PG_URI=postgresql://user:password@host:port/database
```
Alternative you can use the following environment variables:
```
export PG_HOST=host
export PG_PORT=5432
export PG_USERNAME=user
export PG_PASSWORD=password
export PG_DBNAME=database
```
Gmail
Allows BondAI to search and read emails. BondAI will search the local directory for a file named `gmail-token.pickle`. If this file is found and contains valid gmail credentials the Gmail tools will be automatically loaded.
## CLI Command Line Arguments
The following command line arguments can be specified to change the CLI behavior. For example:
```bash
bondai --enable-prompt-logging --quiet
```
- **--server [PORT]** - Starts the BondAI API server. Learn more about the [BondAI API here](./category/api-specification).
- **--enable-prompt-logging [LOG_DIR]** - Turns on prompt logging which will write all prompt inputs into the specified directory. If no directory is provided BondAI will default to saving logs within the current directory.
- **--quiet** - Suppress agent output. Unless specified the agent will print detailed information about each step it's taking and the tools it's using.
================================================
FILE: website/docs/docker.md
================================================
---
sidebar_position: 10
---
# Using Docker
## BondAI Docker Image
BondAI Docker images are available on [DockerHub here](https://hub.docker.com/r/krohling/bondai). If you intend to use tools that run arbitrary code (*PythonREPLTool*) or access your shell (*ShellTool*) it is highly recommended that you run BondAI in a Docker container as these tools can damage your machine.
Before running the BondAI Docker container it is recommended that you create a directory named 'agent-volume' and mount it as a volume on the container. This will be used as the Agent's working directory and allows you to easily share files with the Agent.
```bash
mkdir agent-volume
docker pull krohling/bondai:latest
docker run -it --rm \
-v "$(pwd)/agent-volume:/agent-volume" \
-w /agent-volume \
-e OPENAI_API_KEY=sk-XXXXXXXXXX \
krohling/bondai:latest bondai
```
## BondAI with Docker Compose
The docker-compose.yml file is located in the ./docker directory of the Github repository and makes use of a .env file and a pre-configured volume which is mapped to an ./agent-volume directory.
There's also two options with Docker Compose. From the command line with this command:
```bash
cd ./docker
docker compose up
```
Or if you use vsCode, install the official Docker Extension, then right click on the ./docker/docker-compose.yml file and select Compose Up
================================================
FILE: website/docs/examples/_category_.json
================================================
{
"label": "Examples",
"position": 8,
"link": {
"type": "generated-index"
}
}
================================================
FILE: website/docs/examples/api-client.md
================================================
---
sidebar_position: 4
---
# API Client
This example demonstrates how to use the BondAIAPIClient to communicate with the [BondAI API](../api-spec/getting-started).
```python
from termcolor import cprint
from bondai.api import BondAIAPIClient
# Create the client
client = BondAIAPIClient()
client.connect_ws()
agent = client.create_agent()
user_exited = False
# Listen to WebSocket events
@client.on("streaming_content_updated")
def handle_streaming_content_updated(agent_id, content_buffer):
if agent_id != agent["id"]:
return
print(content_buffer)
@client.on("streaming_function_updated")
def handle_streaming_function_updated(agent_id, function_name, arguments_buffer):
if agent_id != agent["id"]:
return
print(function_name)
print(arguments_buffer)
@client.on("agent_message")
def handle_agent_message(agent_id, message):
global user_exited
if agent_id != agent["id"]:
return
cprint("\n" + message["message"] + "\n", "white")
response = input()
if response.lower().strip() == "exit":
client.disconnect_ws()
user_exited = True
else:
client.send_message(agent_id, response)
@client.on("tool_selected")
def handle_tool_selected_message(agent_id, message):
if agent_id != agent["id"]:
return
tool_name = message["tool_name"]
tool_arguments = message.get("tool_arguments", {})
if "thought" in tool_arguments:
cprint(f"Using tool {tool_name}: {tool_arguments['thought']}", "green")
else:
cprint(f"Using tool {tool_name}...", "green")
@client.on("tool_error")
def handle_tool_error_message(agent_id, message):
if agent_id != agent["id"]:
return
cprint(message, "red")
cprint("******************ENTERING CHAT******************", "white")
cprint(
"You are entering a chat with BondAI...\nYou can exit any time by typing 'exit'.",
"white",
)
intro_message = (
"The user has just logged in. Please introduce yourself in a friendly manner."
)
client.send_message(agent["id"], intro_message)
try:
while not user_exited:
pass
except KeyboardInterrupt:
print("Exiting...")
```
================================================
FILE: website/docs/examples/code-interpreter.md
================================================
---
sidebar_position: 3
---
import gdpChart from './img/us_gdp_2000_2010.png'
# Code Interpreter
In this example we demonstrate BondAI's ability to combine online research and data visualization tasks. We ask BondAI to plot a line chart of U.S. GDP between 2000 and 2010, which requires using a combination of web search, data extraction and data visualization tools (ie matplotlib).
First we will configure the environment variables for OpenAI and Google Search.
```bash
export OPENAI_API_KEY=sk-XXXXXXXXXX
export GOOGLE_API_KEY=XXXXXXXXXX
export GOOGLE_CSE_ID=XXXXXXXXXX
```
Next we will create an Agent with 3 tools:
- **GoogleSearchTool** allows the Agent to search the internet.
- **WebsiteQueryTool** allows the Agent to query information about websites. Note that this tool has integrated semantic search. If the content of the website exceeds the LLM's context window size BondAI will automatically convert the content to Embeddings and semantically filter it to fit inside the context window.
- **PythonREPLTool** allows the Agent run Python scripts.
```python
from bondai.agents import Agent
from bondai.tools import PythonREPLTool
from bondai.tools.search import GoogleSearchTool
from bondai.tools.website import WebsiteQueryTool
task = "I want you to generate a line chart of the U.S. GDP from 2000 to 2010."
Agent(tools=[
GoogleSearchTool(),
WebsiteQueryTool(),
PythonREPLTool()
]).run(task)
```
## Example Output
```
Using the google_search tool
Thought: I'm using the google_search tool to find a reliable source for U.S. GDP data from 2000 to 2010.
Arguments
query: U.S. GDP data from 2000 to 2010
count: 5
page: 1
Output: [GDP by State | U.S. Bureau of Economic Analysis (BEA)](https://www.bea.gov/data/gdp/gdp-state) [...
Using the website_query tool
Thought: I'm using the website_query tool to extract the U.S. GDP data from 2000 to 2010 from the Statista website.
Arguments
url: https://www.statista.com/statistics/188105/annual-gdp-of-the-united-states-since-1990/
question: What is the annual GDP of the United States from 2000 to 2010?
Output: The annual GDP of the United States from 2000 to 2010 is as follows: 2000: $10,251.9 billion 200...
Using the Python_REPL tool
Arguments
query: import matplotlib.pyplot as plt years = [2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2...
Output: This command ran successfully with no output.
Using the Python_REPL tool
Arguments
query: plt.plot(years, gdp) plt.xlabel('Years') plt.ylabel('GDP in billions') plt.title('U.S. GDP from 2...
Output: This command ran successfully with no output.
Using the task_completed tool
Arguments
input: The line chart of the U.S. GDP from 2000 to 2010 has been successfully generated and saved as 'us...
Output: The line chart of the U.S. GDP from 2000 to 2010 has been successfully generated and saved as 'us...
```
================================================
FILE: website/docs/examples/home-automation.md
================================================
---
sidebar_position: 2
---
# Home Automation
In this example we demonstrate how BondAI is able to use the *ShellTool* and *PythonREPLTool* to turn off a Kasa lamp with only a very simple prompt and no direction on how to solve the problem. Note that in this example BondAI is given no information and no additional prompting on how to go about turning off the Kasa lamp. It tries several different strategies including using the `python-kasa` library, the kasa CLI and ultimate ends up using the `pyHS100` library which was successful.
This implementation is obviously an inefficient home automation solution but it demonstrates BondAI's ability to attempt multiple strategies, resilience to error conditions and adaptibility. While it struggles with several of it's initial attempts, likely due to outdated knowledge of the library APIs, it keeps trying solutions until it finds a successful path and eventually turns the light off.
**Note** that a much more efficient solution could be built quite easily by implementing a KasaTool with this functionality implemented and exposed to BondAI as functions it can call.
First we will configure the environment variable for OpenAI.
```bash
export OPENAI_API_KEY=sk-XXXXXXXXXX
```
Next we will create an Agent with 2 tools. Note that these tools are considered **dangerous** as running arbitrary code and shell commands generated by an LLM can damage the host machine. It is highly recommended that these are only used from within a Docker environment.
- **PythonREPLTool** - Allows the Agent run Python scripts.
- **ShellTool** - Allows the Agent access to the shell. This is useful for installing libraries.
```python
from bondai.agents import Agent
from bondai.tools import PythonREPLTool, ShellTool
task = (
"I want you to turn off my Bedroom Lamp. "
"It's a Kasa smart plug btw on the same network."
)
Agent(tools=[
PythonREPLTool(),
ShellTool()
]).run(task)
```
## Example Output
```
Using the shell_tool tool
Thought: I'm installing the python-kasa library which will allow us to interact with the Kasa smart plug.
Arguments
command: pip install python-kasa
Output: Output: Requirement already satisfied: python-kasa in ./venv/lib/python3.11/site-packages (0.5.3)...
Using the python_repl tool
Thought: I'm executing the Python code to discover the Kasa smart plug and turn it off.
Arguments
code: from kasa import Discover import asyncio async def turn_off_plug(): devices = await Discover...
An Error occured: There is no current event loop in thread 'Thread-2 (target)'.
Using the python_repl tool
Thought: I'm creating a new event loop, setting it as the current event loop, and then running the coroutine to turn off the Kasa smart plug.
Arguments
code: import asyncio from kasa import Discover async def turn_off_plug(): devices = await Discover...
An Error occured: name 'Discover' is not defined
Using the python_repl tool
Thought: I'm trying to import the 'kasa' package to check if it's installed correctly.
Arguments
code: import kasa
result: {'kasa': }
stdout:
stderr:
Output: Result Variables: kasa: }
stdout:
stderr:
Output: Result Variables: Discover:
Using the python_repl tool
Thought: I'm discovering all the devices on the network and printing their aliases and IP addresses. This will help us identify the IP address of the 'Bedroom Lamp'.
Arguments
code: import asyncio from kasa import Discover, SmartPlug async def discover_devices(): devices = ...
An Error occured: name 'Discover' is not defined
Using the python_repl tool
Thought: I'm trying to create an instance of the 'SmartPlug' class using a dummy IP address to check if the 'kasa' package is working correctly.
Arguments
code: from kasa import SmartPlug plug = SmartPlug("192.168.1.1") plug
result: {'SmartPlug': , 'plug': }
stdout:
stderr:
Output: Result Variables: SmartPlug: plug: }
stdout:
stderr:
Output: Result Variables: SmartPlug:
Using the python_repl tool
Thought: I'm creating an instance of the 'SmartPlug' class using the IP address of the 'Bedroom Lamp' and calling its 'turn_off' method to turn off the lamp. I'm then returning the 'plug' object to check its state.
Arguments
code: from kasa import SmartPlug plug = SmartPlug("192.168.4.29") plug.turn_off() plug
result: {'SmartPlug': , 'plug': }
stdout:
stderr: :4: RuntimeWarning: coroutine 'SmartPlug.turn_off' was never awaited
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
Output: Errors: :4: RuntimeWarning: coroutine 'SmartPlug.turn_off' was never awaited RuntimeWarni...
Using the python_repl tool
Thought: I'm executing the Python code to turn off the 'Bedroom Lamp' using the 'SmartPlug' class from the 'kasa' package. I'm creating a new event loop, setting it as the current event loop, and then running the coroutine to turn off the lamp.
Arguments
code: import asyncio from kasa import SmartPlug async def turn_off_lamp(): plug = SmartPlug("192.1...
An Error occured: name 'SmartPlug' is not defined
Using the shell_tool tool
Thought: I'm uninstalling and then reinstalling the 'kasa' package to try to fix the issues with the 'SmartPlug' and 'Discover' classes.
Arguments
command: pip uninstall -y python-kasa && pip install python-kasa
Output: Errors: ERROR: Invalid requirement: '&&'
Using the shell_tool tool
Thought: I'm uninstalling the 'kasa' package to try to fix the issues with the 'SmartPlug' and 'Discover' classes.
Arguments
command: pip uninstall -y python-kasa
Output: Output: Found existing installation: python-kasa 0.5.3 Uninstalling python-kasa-0.5.3: Successf...
Using the shell_tool tool
Thought: I'm reinstalling the 'python-kasa' package to try to fix the issues with the 'SmartPlug' and 'Discover' classes.
Arguments
command: pip install python-kasa
Output: Output: Collecting python-kasa Obtaining dependency information for python-kasa from https://fi...
Using the python_repl tool
Thought: I'm trying to import the 'SmartPlug' class from the 'kasa' package to check if it's accessible. If it's not accessible, we might need to check the Python environment for issues or consider using a different approach to turn off the 'Bedroom Lamp'.
Arguments
code: from kasa import SmartPlug SmartPlug
result: {'SmartPlug': }
stdout:
stderr:
Output: Result Variables: SmartPlug:
Using the python_repl tool
Thought: I'm executing the Python code to turn off the 'Bedroom Lamp' using the 'SmartPlug' class from the 'kasa' package. I'm running the 'turn_off_lamp' coroutine in the default event loop to avoid the issues with the 'SmartPlug' class not being defined in a new event loop.
Arguments
code: import asyncio from kasa import SmartPlug async def turn_off_lamp(): plug = SmartPlug("192.1...
An Error occured: name 'SmartPlug' is not defined
Using the shell_tool tool
Thought: I'm using the 'kasa' CLI command to turn off the 'Bedroom Lamp'. The '--plug' option specifies that we're interacting with a plug, the '--host' option specifies the IP address of the plug, and 'off' is the command to turn off the plug.
Arguments
command: kasa --plug --host 192.168.4.29 off
Output: Errors: Usage: kasa [OPTIONS] COMMAND [ARGS]... Try 'kasa --help' for help. Error: No such optio...
Using the shell_tool tool
Thought: I'm installing the 'pyHS100' package which will allow us to interact with the Kasa smart plug.
Arguments
command: pip install pyHS100
Output: Output: Collecting pyHS100 Using cached pyHS100-0.3.5.2-py3-none-any.whl (31 kB) Requirement al...
Using the python_repl tool
Thought: I'm creating an instance of the 'SmartPlug' class from the 'pyHS100' package using the IP address of the 'Bedroom Lamp' and calling its 'turn_off' method to turn off the lamp. I'm then returning the 'plug' object to check its state.
Arguments
code: from pyHS100 import SmartPlug plug = SmartPlug("192.168.4.29") plug.turn_off() plug
result: {'SmartPlug': , 'plug': }
stdout:
stderr:
Output: Result Variables: SmartPlug: plug: ")
task = """I want you to research the usage of Metformin as a drug to treat aging and aging related illness.
You should only use reputable information sources, ideally peer reviewed scientific studies.
I want you to summarize your findings in a document named metformin.md and includes links to reference and resources you used to find the information.
Additionally, the last section of your document you should provide a recommendation for a 43 year old male, in good health and who regularly exercises as to whether he would benefit from taking Metformin.
You should explain your recommendation and justify it with sources.
Finally, you should highlight potential risks and tradeoffs from taking the medication."""
Agent(tools=[
DuckDuckGoSearchTool(),
WebsiteQueryTool(),
FileWriteTool()
]).run(task)
```
================================================
FILE: website/docs/intro.md
================================================
---
sidebar_position: 1
---
import bondaiLogo from './img/bondai-logo.png'
import googleLogo from './img/google-logo.png'
import alpacaMarketsLogo from './img/alpaca-markets-logo.jpeg'
import postgresLogo from './img/postgres-logo.jpeg'
import blandaiLogo from './img/blandai-logo.jpeg'
import duckduckgoLogo from './img/duckduckgo-logo.png'
import gmailLogo from './img/gmail-logo.png'
import openaiLogo from './img/openai-logo.png'
import azureLogo from './img/azure-logo.png'
import langchainLogo from './img/langchain-logo.jpeg'
# Meet BondAI
Build highly capable Single and Multi-Agent Systems.
## What is BondAI?
BondAI is an open-source tool for developing AI Agent Systems. BondAI handles the implementation complexities including memory/context management, error handling, vector/semantic search and includes a powerful set of out of the box tools and integrations. BondAI's implementation is based on the latest research including support for **[ReAct](https://arxiv.org/abs/2210.03629)**, Multi-Agent and Conversable Agent systems based on the **[AutoGen paper](https://arxiv.org/abs/2308.08155)**, and a Tiered Memory System based on the **[MemGPT paper](https://arxiv.org/abs/2310.08560)**. Additionally, BondAI comes with a CLI interface and a REST/WebSocket Agent Server.
## BondAI Integrations
BondAI comes out of the box with a powerful set of integrations.
| | | |
| -------- | ------- |------- |
|
| **OpenAI** | BondAI supports any combination of OpenAI models and services including all GPT-N, GPT-4 Vision, Dalle-E 3, and Embeddings. |
|
| **Microsoft Azure** | BondAI fully supports connectivity to GPT-N, Dalle-E and Embedding APIs through [Microsoft's Azure OpenAI services](https://azure.microsoft.com/en-us/products/ai-services/openai-service). |
|
| **Google Search** | Allows BondAI to search the internet. [Requires a Google Search API Key and CSE ID](https://developers.google.com/custom-search/v1/introduction) |
|
| **DuckDuckGo** | Allows BondAI to search the internet. No API keys required. |
|
| **Alpaca Markets** | Allows BondAI to buy and sell stocks and crypto. [Requires an Alpaca Markets account.](https://alpaca.markets/) |
|
| **PostgreSQL** | BondAI can automatically extract the schema from a Postgres DB and process natural language queries. |
|
| **Bland AI** | Allows BondAI to make phone calls and process/retrieve call transcripts. [Requires a Bland.ai account.](https://www.bland.ai/) |
|
| **Gmail** | Allows BondAI to search and read emails. |
|
| **LangChain** | Use BondAI's LangChainTool class to import any tool from LangChain into BondAI. |
================================================
FILE: website/docs/multi-agent-systems/examples.md
================================================
---
sidebar_position: 1
---
# Multi-Agent Architectures
## Example 1: Flat Multi-Agent Architecture
In this example architecture, multiple ConversationalAgents are configured, each specializing in different aspects of customer support (e.g., technical, billing, general queries). They are managed under a GroupConversation system. This is an example of a flat conversational architecture where all agents are able to communicate directly with each other.
```python
from bondai.agents import ConversationalAgent
from bondai.agents.group_chat import GroupConversation
# Initialize multiple agents for different support aspects
tech_support = ConversationalAgent(name="TechSupport")
billing_support = ConversationalAgent(name="BillingSupport")
general_support = ConversationalAgent(name="GeneralSupport")
# Create a group conversation with these agents
support_team = GroupConversation(
conversation_members=[
tech_support,
billing_support,
general_support
]
)
# Code to route user queries to the appropriate agent
user_query = "How do I reset my password?"
response = support_team.send_message(tech_support.name, user_query)
print(response.message)
```
## Example 2: Hierarchical Conversational Architecture
This example illustrates a hierarchical conversational architecture, characterized by structured agent interactions within a group. In this setup, the GroupConversation is configured using TeamConversationConfig to create distinct teams within the conversation.
- **Agent Configuration**: Three ConversationalAgent's are initialized—`team_leader`, `coding_expert`, and `design_expert`. Each agent has a specific role, where the team leader can coordinate and delegate tasks between the two experts.
- **Group Setup**: The GroupConversation is organized into teams: one team includes the `team_leader` and `coding_expert`, and another consists of the `team_leader` and `design_expert`. This configuration ensures that the team leader can communicate with both experts, but direct communication between the coding and design experts is not possible.
- **Conversation Dynamics**: When the conversation starts, a message is sent to the `team_leader`. The hierarchical structure allows the team leader to relay information to the experts, orchestrate their collaboration, and provide combined insights from both agents.
```python
from bondai.agents import ConversationalAgent
from bondai.agents.group_chat import GroupConversation, TeamConversationConfig
# Initialize team agents
team_leader = ConversationalAgent(name="TeamLeader")
coding_expert = ConversationalAgent(name="CodingExpert")
design_expert = ConversationalAgent(name="DesignExpert")
# Create a group conversation
problem_solving_team = GroupConversation(
conversation_config=TeamConversationConfig(
[team_leader, coding_expert],
[team_leader, design_expert]
)
)
# Simulate a problem-solving session
problem_description = "Develop a user-friendly app interface."
response = problem_solving_team.send_message(team_leader.name, problem_description)
print(response.message)
```
## Example 3: Multi-Team, Heirarchical Conversational Architecture
This example showcases a complex hierarchical architecture, involving multiple agents organized into distinct teams, each with specialized roles. In this model, communication channels are both vertical (within each team) and horizontal (across teams via team leaders).
- **Agent Configuration**: The architecture involves a `product_manager`, two team leaders (`eng_leader` and `design_leader`), and various experts (`coding_expert`, `qa_expert`, `visual_designer`, `ux_designer`). The `product_manager` serves as a central figure overseeing the entire project.
- **Group Setup**: This configuration organizes the agents into three teams: an **engineering team** (led by `eng_leader`), a **design team** (led by `design_leader`), and a **management team** consisting of the `product_manager` and both team leaders.
- **Conversation Dynamics**: When a conversation is initiated, a message is sent to the `product_manager`, the product manager can facilitate the conversation by engaging with both team leaders, who in turn can collaborate with their respective team members.
```python
from bondai.agents import ConversationalAgent
from bondai.agents.group_chat import GroupConversation, TeamConversationConfig
# Initialize product manager agent
product_manager = ConversationalAgent(name="ProductManager")
# Initialize team 1 agents
eng_leader = ConversationalAgent(name="EngTeamLeader")
coding_expert = ConversationalAgent(name="CodingExpert")
qa_expert = ConversationalAgent(name="QAExpert")
# Initialize team 2 agents
design_leader = ConversationalAgent(name="DesignTeamLeader")
visual_designer = ConversationalAgent(name="VisualDesigner")
ux_designer = ConversationalAgent(name="UXDesigner")
# Create a group conversation
problem_solving_team = GroupConversation(
conversation_config=TeamConversationConfig(
[eng_leader, coding_expert, qa_expert],
[design_leader, visual_designer, ux_designer],
[product_manager, eng_leader, design_leader]
)
)
# Simulate a problem-solving session
problem_description = "Develop a user-friendly app interface."
response = problem_solving_team.send_message(product_manager.name, problem_description)
print(response.message)
```
================================================
FILE: website/docs/multi-agent-systems/group-conversation.md
================================================
---
sidebar_position: 2
---
# GroupConversation
The GroupConversation class in BondAI facilitates the creation and management of conversations involving multiple agents, inspired by advanced multi-agent system research.
```python
class GroupConversation(EventMixin, Runnable):
def __init__(
self,
conversation_members: List[ConversationMember] | None = None,
conversation_config: BaseGroupConversationConfig | None = None,
filter_recipient_messages: bool = False,
):
```
## Usage Example
```python
from bondai.agents import ConversationalAgent
from bondai.agents.group_chat import GroupConversation
# Initialize conversation members
members = [ConversationalAgent(...), ConversationalAgent(...)]
# Create a group conversation
group_conversation = GroupConversation(conversation_members=members)
# Conduct a group conversation
group_conversation.send_message("MemberName", "Hello, let's discuss.")
```
## Key Features
- Event-driven architecture.
- Manages multi-agent conversations.
- Supports dynamic interaction among multiple conversation participants.
- Facilitates complex conversational flows and decision-making processes.
- Allows for conversations with both predefined and dynamically determined members.
## Parameters
- **conversation_members**: List of ConversationMember instances (i.e. UserProxy or ConversationalAgent) participating in the conversation.
- **conversation_config**: Configuration settings for managing group conversation dynamics.
- **filter_recipient_messages**: Boolean flag to determine whether the message history shown to each conversation member is inclusive of the entire group conversation or just the messages sent to/from that conversation member.
## Methods
- **id**: Property returning the unique identifier of the group conversation. No parameters.
- **status**: Property indicating the current status of the group conversation. No parameters.
- **members**: Property listing the conversation members participating in the group. No parameters.
- **remove_messages_after(timestamp: datetime, inclusive: bool = True)**: Removes messages from the conversation history that occurred after a specific timestamp.
- **send_message(recipient_name: str, message: str, sender_name: str = USER_MEMBER_NAME, require_response: bool = True)**: Sends a message within the group conversation.
- **reset_memory**: Clears the message history for all conversation members.
## Group Conversation Events
- **message_received**: Triggered when a message is received by a member of the conversation.
- **message_error**: Fired when an error occurs in message processing within the group.
- **message_completed**: Occurs when a message has been successfully processed by a member of the conversation.
- **conversation_exited**: Triggered when a member exits the conversation.
================================================
FILE: website/docs/multi-agent-systems/multi-agent-systems.md
================================================
---
sidebar_position: 4
---
# Multi-Agent Systems
Multi-Agent Systems (MAS) in BondAI represent a sophisticated approach to developing interactive, collaborative AI applications. At the core of MAS in BondAI are three primary classes: [ConversationalAgent](../agents/conversational-agent.md), [GroupConversation](./group-conversation.md), and [TeamConversationConfig](./team-conversation-config.md).
**ConversationalAgent** acts as the fundamental building block of MAS, embodying individual agents with specific roles or expertise. These agents can engage in dialogues, process information, and perform tasks based on their programming and interactions.
**GroupConversation** is the framework that orchestrates communication among multiple agents. It allows various ConversationalAgents to interact within a shared conversational space, enabling information exchange, collaborative problem-solving, and decision-making processes. This class manages the dynamics of the conversation, ensuring coherent interactions among all participating agents.
**TeamConversationConfig** is crucial for structuring the conversation architecture within a MAS. It defines how agents are grouped and how they can communicate with each other. This configuration can set up hierarchical structures, dividing agents into teams or layers, and determining the flow of information between them. It plays a pivotal role in managing complex conversations where different agents contribute distinct insights or skills towards a common goal.
Together, these classes enable the development of complex MAS architectures in BondAI, where agents can work in unison or independently, mimicking real-world team dynamics and collaborative environments. This system opens up possibilities for applications requiring nuanced interactions and emergent intelligence.
================================================
FILE: website/docs/multi-agent-systems/team-conversation-config.md
================================================
---
sidebar_position: 3
---
# TeamConversationConfig
The TeamConversationConfig class in BondAI structures the conversations in a Multi-Agent System, enabling complex conversational patterns among different agents.
```python
class TeamConversationConfig(BaseGroupConversationConfig):
def __init__(self, *args: List[ConversationMember]):
...
```
## Usage Example
```python
from bondai.agents import ConversationalAgent
from bondai.agents.group_chat import GroupConversation, TeamConversationConfig
# Initialize team 1
agent1 = ConversationalAgent(...)
agent2 = ConversationalAgent(...)
team1 = [agent1, agent2]
# Initialize team 2
agent3 = ConversationalAgent(...)
agent4 = ConversationalAgent(...)
team2 = [agent3, agent4]
# Allow agent1 and agent 3 to communicate
team3 = [agent1, agent3]
# Configure teams
team_config = TeamConversationConfig(team1, team2, team3)
# Use in GroupConversation
group_conversation = GroupConversation(conversation_config=team_config)
```
## Key Features
- Facilitates hierarchical and structured team-based conversations.
- Allows the creation of any number of teams with specific member agents.
- Supports dynamic interactions within and across teams.
- Enhances the control over communication flow in multi-agent setups.
================================================
FILE: website/docs/tools/_category_.json
================================================
{
"label": "Tools",
"position": 6,
"link": {
"type": "generated-index"
}
}
================================================
FILE: website/docs/tools/custom-tool.md
================================================
---
sidebar_position: 2
---
# Building Custom Tools
By building your own custom tools you can give BondAI the power to interact with new products, external APIs, really just about anything! Fortunately, tools are super easy to build. In this example we'll build a tool that allows BondAI to ask questions about any country and get information back from the [RestCountries API](https://restcountries.com/).
### Step 1: Define your Tool's Name, Description and Parameters
For BondAI to use your tool it needs 3 pieces of information to tell the LLM about your tool. For the LLM to effectively use your tool your description should be highly detailed and informative.
- **Tool Name** - This should be an informative name for what your tool does. It's also important that it is unique to just your tool.
- **Tool Description** - This is a detailed description of what your tool does, what the parameters are used for, what information it returns and when to use it.
- **Tool Parameters** - The Parameters structure is used to encode the parameters shown to the LLM. You must include all possible parameters in this structure. It is common to include a 'thought' parameter which encourages the LLM to include it's reasoning for using the tool which has been shown to improve performance.
```python
from pydantic import BaseModel
TOOL_NAME = "query_countries_tool"
TOOL_DESCRIPTION = (
"This tool allows you to search countries by name and get a list back with information about each country. "
"The response includes incormation like the country's population size, currencies, languages and more. "
"\nParameters:\n"
"- country_name (required): The name of the country you are looking for."
)
class Parameters(BaseModel):
country_name: str
```
### Step 2: Implement your Tool class
All custom BondAI tools must extend from the Tool class and implement the `run` method. In our example tool we will call the RestCountries API to search for countries by name.
```python
from bondai.tools import Tool
class QueryCountriesTool(Tool):
def __init__(self):
super().__init__(TOOL_NAME, TOOL_DESCRIPTION, parameters=Parameters)
def run(self, country_name: str):
if not country_name:
raise Exception("country_name is required.")
response = requests.get(f"https://restcountries.com/v3.1/name/{country_name}")
return parse_countries_info(response.json())
```
### Step 3: Format your response
We will define a function named `parse_countries_info` that takes the JSON object returned from the RestCountries API and turns it into a well formatted string that can be easily understood by the LLM. Note that while the LLM could likely undestand the JSON formatted response, this approach has the advantage of removing unnecessary information which reduces token usage and cost. This also reduces the amount of Agent memory required to store the result which is limited by the LLM's context window. It is highly recommended that tool responses are well formatted (ie markdown) to improve understanding and limited to only required information.
```python
def parse_countries_info(data):
responses = []
for country in data:
country_name = country['name']['common']
population = country['population']
language = list(country['languages'].values())[0]
area = country['area']
currency = list(country['currencies'].keys())[0]
region = country['region']
subregion = country['subregion']
country_info = f"""**{country_name}**
Population: {population}
Language: {language}
Area: {area}
Currency: {currency}
Region: {region}
Subregion: {subregion}\n---\n""" # Added a separator for readability
responses.append(country_info)
return "\n".join(responses)
```
### Putting it all together
Finally, let's put it all together into a single file!
```python
import requests
from pydantic import BaseModel
from bondai.tools import Tool
TOOL_NAME = "query_countries_tool"
TOOL_DESCRIPTION = (
"This tool allows you to search countries by name and get a list back with information about each country. "
"The response includes incormation like the country's population size, currencies, languages and more. "
"\nParameters:\n"
"- country_name (required): The name of the country you are looking for."
)
class Parameters(BaseModel):
country_name: str
def parse_countries_info(data):
responses = []
for country in data:
country_name = country['name']['common']
population = country['population']
language = list(country['languages'].values())[0]
area = country['area']
currency = list(country['currencies'].keys())[0]
region = country['region']
subregion = country['subregion']
country_info = f"""**{country_name}**
Population: {population}
Language: {language}
Area: {area}
Currency: {currency}
Region: {region}
Subregion: {subregion}\n---\n""" # Added a separator for readability
responses.append(country_info)
return "\n".join(responses)
class QueryCountriesTool(Tool):
def __init__(self):
super().__init__(TOOL_NAME, TOOL_DESCRIPTION, parameters=Parameters)
def run(self, country_name: str):
if not country_name:
raise Exception("country_name is required.")
response = requests.get(f"https://restcountries.com/v3.1/name/{country_name}")
return parse_countries_info(response.json())
```
================================================
FILE: website/docs/tools/getting-started.md
================================================
---
sidebar_position: 1
---
import bondaiLogo from '../img/bondai-logo.png'
import googleLogo from '../img/google-logo.png'
import alpacaMarketsLogo from '../img/alpaca-markets-logo.jpeg'
import postgresLogo from '../img/postgres-logo.jpeg'
import blandaiLogo from '../img/blandai-logo.jpeg'
import duckduckgoLogo from '../img/duckduckgo-logo.png'
import gmailLogo from '../img/gmail-logo.png'
import openaiLogo from '../img/openai-logo.png'
import azureLogo from '../img/azure-logo.png'
import langchainLogo from '../img/langchain-logo.jpeg'
# BondAI Tools
Tools are what give AI Agents the power to interact with their environment. Combined with the planning capabilities of LLMs, AI Agents are able to break down complex tasks that may require multiple steps and use tools to accomplish their goals. In addition to using BondAI's comprehensive list of built in tools you can build you own custom tools and even import tools from LangChain.
# Example Tools
| | |
| -------- | ------- |
| **PythonREPLTool** | This tool allows BondAI to write and execute Python code. Note that it is highly recommended that this tool be used from within a Docker environment as it may damage the host machine. |
| **ResponseQueryTool** | BondAI will automatically detect if a tool returns a large response (>2000 tokens). To prevent this response from overwhelming the Agent's memory the output will be passed to the ResponseQueryTool which the Agent can then query to get information about the response. |
| **ShellTool** | This tool allows BondAI to interact with the users Shell and run arbitrary commands. Note that it is highly recommended that this tool be used from within a Docker environment as it may damage the host machine. |
| **FileQueryTool** | This tool allows BondAI to query the content of a file. This tool uses integrated Semantic search. If the content of the file is too large for the LLM's context window the content will automatically be converted to embeddings and filtered to fit within the context window. |
| **FileWriteTool** | This tool allows BondAI to write content to a specified filename. |
| **FileReadTool** | This tool allows BondAI to read the raw content of a specified filename. Note that when dealing with large files it is recommended to use the FileQueryTool as large files may overwhelm the Agent's memory. |
| **DownloadFileTool** | This tool allows BondAI to download files from the internet and save them locally to a specified filename. |
| **ExtractHyperlinksTool** | This tool allows BondAI retrieve the hyperlinks from the HTML of a specified website. |
| **WebsiteQueryTool** | This tool allows BondAI to query the content of a website. This tool uses integrated Semantic search. If the content of the website is too large for the LLM's context window the content will automatically be converted to embeddings and filtered to fit within the context window. |
| **AgentTool** | This tool allows Agent's to delegate complex tasks to other Agents creating a heirarchical Agent architecture. |
# Partner Tools
| | | |
| -------- | ------- |------- |
|
| **OpenAI** | BondAI supports any combination of OpenAI models and services including GPT-4, GPT-3.5, Dalle-E 3, and Embeddings. |
|
| **Microsoft Azure** | BondAI fully supports connectivity to GPT-N, Dalle-E and Embedding APIs through [Microsoft's Azure OpenAI services](https://azure.microsoft.com/en-us/products/ai-services/openai-service). |
|
| **Google Search** | Allows BondAI to search the internet. [Requires a Google Search API Key and CSE ID](https://developers.google.com/custom-search/v1/introduction) |
|
| **DuckDuckGo** | Allows BondAI to search the internet. No API keys required. |
|
| **Alpaca Markets** | Allows BondAI to buy and sell stocks and crypto. [Requires an Alpaca Markets account.](https://alpaca.markets/) |
|
| **PostgreSQL** | BondAI can automatically extract the schema from a Postgres DB and process natural language queries. |
|
| **Bland AI** | Allows BondAI to make phone calls and process/retrieve call transcripts. [Requires a Bland.ai account.](https://www.bland.ai/) |
|
| **Gmail** | Allows BondAI to search and read emails. |
================================================
FILE: website/docusaurus.config.js
================================================
// @ts-check
// Note: type annotations allow type checking and IDEs autocompletion
const lightCodeTheme = require('prism-react-renderer/themes/github');
const darkCodeTheme = require('prism-react-renderer/themes/dracula');
/** @type {import('@docusaurus/types').Config} */
const config = {
title: 'BondAI',
tagline: 'Automate Everything',
favicon: 'img/favicon.ico',
// Set the production url of your site here
url: 'https://bondai.dev',
// Set the // pathname under which your site is served
// For GitHub pages deployment, it is often '//'
baseUrl: '/',
// GitHub pages deployment config.
// If you aren't using GitHub pages, you don't need these.
organizationName: 'krohling', // Usually your GitHub org/user name.
projectName: 'bondai', // Usually your repo name.
onBrokenLinks: 'throw',
onBrokenMarkdownLinks: 'warn',
// Even if you don't use internalization, you can use this field to set useful
// metadata like html lang. For example, if your site is Chinese, you may want
// to replace "en" with "zh-Hans".
i18n: {
defaultLocale: 'en',
locales: ['en'],
},
presets: [
[
'classic',
/** @type {import('@docusaurus/preset-classic').Options} */
({
docs: {
sidebarPath: require.resolve('./sidebars.js'),
},
theme: {
customCss: require.resolve('./src/css/custom.css'),
},
}),
],
],
themeConfig:
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
({
// Replace with your project's social card
image: 'img/bondai-social-card.png',
navbar: {
title: 'BondAI',
logo: {
alt: 'BondAI Logo',
src: 'img/bondai-logo.png',
},
items: [
{
type: 'docSidebar',
sidebarId: 'tutorialSidebar',
position: 'left',
label: 'Meet BondAI',
},
{
href: 'https://github.com/krohling/bondai',
label: 'GitHub',
position: 'right',
},
],
},
footer: {
style: 'dark',
links: [
{
title: 'Docs',
items: [
{
label: 'Tutorial',
to: '/docs/intro',
},
],
},
{
title: 'Community',
items: [
{
label: 'Discord',
href: 'https://discordapp.com/invite/docusaurus',
},
],
},
{
title: 'More',
items: [
{
label: 'Github',
href: 'https://github.com/krohling/bondai',
},
{
label: 'DockerHub',
href: 'https://hub.docker.com/r/krohling/bondai',
},
],
},
],
},
prism: {
theme: lightCodeTheme,
darkTheme: darkCodeTheme,
},
}),
};
module.exports = config;
================================================
FILE: website/package.json
================================================
{
"name": "bondai-docs",
"version": "0.0.0",
"private": true,
"scripts": {
"docusaurus": "docusaurus",
"start": "docusaurus start",
"build": "docusaurus build",
"swizzle": "docusaurus swizzle",
"deploy": "docusaurus deploy",
"clear": "docusaurus clear",
"serve": "docusaurus serve",
"write-translations": "docusaurus write-translations",
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/preset-classic": "2.4.3",
"@mdx-js/react": "^1.6.22",
"clsx": "^1.2.1",
"prism-react-renderer": "^1.3.5",
"react": "^17.0.2",
"react-dom": "^17.0.2"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "2.4.3"
},
"browserslist": {
"production": [
">0.5%",
"not dead",
"not op_mini all"
],
"development": [
"last 1 chrome version",
"last 1 firefox version",
"last 1 safari version"
]
},
"engines": {
"node": ">=16.14"
}
}
================================================
FILE: website/sidebars.js
================================================
/**
* Creating a sidebar enables you to:
- create an ordered group of docs
- render a sidebar for each doc of that group
- provide next/previous navigation
The sidebars can be generated from the filesystem, or explicitly defined here.
Create as many sidebars as you want.
*/
// @ts-check
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
const sidebars = {
// By default, Docusaurus generates a sidebar from the docs folder structure
tutorialSidebar: [{type: 'autogenerated', dirName: '.'}],
// But you can create a sidebar manually
/*
tutorialSidebar: [
'intro',
'hello',
{
type: 'category',
label: 'Tutorial',
items: ['tutorial-basics/create-a-document'],
},
],
*/
};
module.exports = sidebars;
================================================
FILE: website/src/components/HomepageFeatures/index.js
================================================
import React from 'react';
import clsx from 'clsx';
import styles from './styles.module.css';
const FeatureList = [
{
title: 'Easy to Use',
Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default,
description: (
<>
Docusaurus was designed from the ground up to be easily installed and
used to get your website up and running quickly.
>
),
},
{
title: 'Focus on What Matters',
Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default,
description: (
<>
Docusaurus lets you focus on your docs, and we'll do the chores. Go
ahead and move your docs into the docs directory.
>
),
},
{
title: 'Powered by React',
Svg: require('@site/static/img/undraw_docusaurus_react.svg').default,
description: (
<>
Extend or customize your website layout by reusing React. Docusaurus can
be extended while reusing the same header and footer.
>
),
},
];
function Feature({Svg, title, description}) {
return (
);
}
export default function HomepageFeatures() {
return (
{FeatureList.map((props, idx) => (
))}
);
}
================================================
FILE: website/src/components/HomepageFeatures/styles.module.css
================================================
.features {
display: flex;
align-items: center;
padding: 2rem 0;
width: 100%;
}
.featureSvg {
height: 200px;
width: 200px;
}
================================================
FILE: website/src/css/custom.css
================================================
/**
* Any CSS included here will be global. The classic template
* bundles Infima by default. Infima is a CSS framework designed to
* work well for content-centric websites.
*/
/* You can override the default Infima variables here. */
:root {
--ifm-color-primary: #2e8555;
--ifm-color-primary-dark: #29784c;
--ifm-color-primary-darker: #277148;
--ifm-color-primary-darkest: #205d3b;
--ifm-color-primary-light: #33925d;
--ifm-color-primary-lighter: #359962;
--ifm-color-primary-lightest: #3cad6e;
--ifm-code-font-size: 95%;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
}
/* For readability concerns, you should choose a lighter palette in dark mode. */
[data-theme='dark'] {
--ifm-color-primary: #25c2a0;
--ifm-color-primary-dark: #21af90;
--ifm-color-primary-darker: #1fa588;
--ifm-color-primary-darkest: #1a8870;
--ifm-color-primary-light: #29d5b0;
--ifm-color-primary-lighter: #32d8b4;
--ifm-color-primary-lightest: #4fddbf;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
}
================================================
FILE: website/src/pages/index.js
================================================
import React, { useEffect } from 'react';
import { useHistory } from 'react-router-dom';
import clsx from 'clsx';
import Link from '@docusaurus/Link';
import useDocusaurusContext from '@docusaurus/useDocusaurusContext';
import Layout from '@theme/Layout';
import HomepageFeatures from '@site/src/components/HomepageFeatures';
import styles from './index.module.css';
function HomepageHeader() {
const {siteConfig} = useDocusaurusContext();
return (
);
}
export default function Home() {
const history = useHistory();
const {siteConfig} = useDocusaurusContext();
useEffect(() => {
// window.location.href = '/docs/intro'
history.push('/docs/intro');
}, []);
return '';
}
================================================
FILE: website/src/pages/index.module.css
================================================
/**
* CSS files with the .module.css suffix will be treated as CSS modules
* and scoped locally.
*/
.heroBanner {
padding: 4rem 0;
text-align: center;
position: relative;
overflow: hidden;
}
@media screen and (max-width: 996px) {
.heroBanner {
padding: 2rem;
}
}
.buttons {
display: flex;
align-items: center;
justify-content: center;
}
================================================
FILE: website/src/pages/markdown-page.md
================================================
---
title: Markdown page example
---
# Markdown page example
You don't need React to write simple standalone pages.
================================================
FILE: website/static/.nojekyll
================================================