Full Code of krohling/bondai for AI

main b16cc11c06b2 cached
186 files
410.8 KB
98.1k tokens
537 symbols
1 requests
Download .txt
Showing preview only (456K chars total). Download the full file or copy to clipboard to get everything.
Repository: krohling/bondai
Branch: main
Commit: b16cc11c06b2
Files: 186
Total size: 410.8 KB

Directory structure:
gitextract_tr4w9k_j/

├── .github/
│   └── workflows/
│       ├── deploy-website.yaml
│       └── deploy.yaml
├── .gitignore
├── .pre-commit-config.yaml
├── CONTRIBUTING.md
├── LICENSE
├── MANIFEST.in
├── README.md
├── bondai/
│   ├── __init__.py
│   ├── agents/
│   │   ├── __init__.py
│   │   ├── agent.py
│   │   ├── compression/
│   │   │   ├── __init__.py
│   │   │   ├── conversation_summarizer.py
│   │   │   ├── message_summarizer.py
│   │   │   └── prompts/
│   │   │       ├── conversation_summarizer_prompt_template.md
│   │   │       └── message_summarizer_prompt_template.md
│   │   ├── conversation_member.py
│   │   ├── conversational_agent.py
│   │   ├── group_chat/
│   │   │   ├── __init__.py
│   │   │   ├── group_conversation.py
│   │   │   ├── group_conversation_config.py
│   │   │   └── user_proxy.py
│   │   ├── messages.py
│   │   ├── prompts/
│   │   │   ├── __init__.py
│   │   │   ├── agent_message_prompt_template.md
│   │   │   ├── conversational_agent_system_prompt_template.md
│   │   │   ├── default_persona.py
│   │   │   └── react_agent_system_prompt_template.md
│   │   └── util.py
│   ├── api/
│   │   ├── __init__.py
│   │   ├── agent_wrapper.py
│   │   ├── api_error.py
│   │   ├── api_user_proxy.py
│   │   ├── client.py
│   │   ├── routes.py
│   │   ├── server.py
│   │   └── settings.py
│   ├── cli/
│   │   ├── __init__.py
│   │   ├── cli.py
│   │   ├── default_tools.py
│   │   └── personas/
│   │       ├── __init__.py
│   │       └── user_liaison_agent.py
│   ├── main.py
│   ├── memory/
│   │   ├── __init__.py
│   │   ├── archival/
│   │   │   ├── __init__.py
│   │   │   ├── datasources.py
│   │   │   └── tools.py
│   │   ├── conversation/
│   │   │   ├── __init__.py
│   │   │   ├── datasources.py
│   │   │   └── tools.py
│   │   ├── core/
│   │   │   ├── __init__.py
│   │   │   ├── datasources.py
│   │   │   └── tools.py
│   │   ├── memory_manager.py
│   │   └── prompts/
│   │       └── default_prompt_template.md
│   ├── models/
│   │   ├── __init__.py
│   │   ├── embedding_model.py
│   │   ├── llm.py
│   │   └── openai/
│   │       ├── __init__.py
│   │       ├── default_openai_connection_params.py
│   │       ├── env_vars.py
│   │       ├── openai_connection_params.py
│   │       ├── openai_embedding_model.py
│   │       ├── openai_llm.py
│   │       ├── openai_models.py
│   │       └── openai_wrapper.py
│   ├── prompt/
│   │   ├── __init__.py
│   │   ├── default_prompt_builder.py
│   │   ├── default_prompt_template.md
│   │   ├── jinja_prompt_builder.py
│   │   └── prompt_builder.py
│   ├── tools/
│   │   ├── __init__.py
│   │   ├── agent_tool.py
│   │   ├── alpaca_markets/
│   │   │   ├── __init__.py
│   │   │   ├── create_order.py
│   │   │   ├── env_vars.py
│   │   │   ├── get_account.py
│   │   │   ├── list_positions.py
│   │   │   └── response_formatter.py
│   │   ├── bland_ai/
│   │   │   ├── __init__.py
│   │   │   └── bland_ai_tools.py
│   │   ├── conversational/
│   │   │   ├── __init__.py
│   │   │   └── conversational_tools.py
│   │   ├── dalle_tool.py
│   │   ├── database/
│   │   │   ├── __init__.py
│   │   │   └── db_query.py
│   │   ├── file/
│   │   │   ├── __init__.py
│   │   │   ├── file_query.py
│   │   │   ├── file_read.py
│   │   │   └── file_write.py
│   │   ├── gmail/
│   │   │   ├── __init__.py
│   │   │   ├── list_emails.py
│   │   │   └── query_emails.py
│   │   ├── langchain_tool.py
│   │   ├── python_repl_tool.py
│   │   ├── response_query.py
│   │   ├── search/
│   │   │   ├── __init__.py
│   │   │   ├── duck_duck_go_search.py
│   │   │   └── google_search.py
│   │   ├── shell_tool.py
│   │   ├── task_completed_tool.py
│   │   ├── tool.py
│   │   ├── vision/
│   │   │   ├── __init__.py
│   │   │   └── image_analysis_tool.py
│   │   └── website/
│   │       ├── __init__.py
│   │       ├── download_file.py
│   │       ├── extract_hyperlinks.py
│   │       ├── html_query.py
│   │       └── query.py
│   └── util/
│       ├── __init__.py
│       ├── caching/
│       │   ├── __init__.py
│       │   └── llm_cache.py
│       ├── document_parser.py
│       ├── event_mixin.py
│       ├── misc.py
│       ├── model_logger.py
│       ├── runnable.py
│       ├── semantic_search.py
│       └── web.py
├── docker/
│   ├── Dockerfile
│   └── docker-compose.yml
├── requirements.txt
├── sample.env
├── scripts/
│   └── bondai
├── setup.py
├── tests/
│   ├── api-client/
│   │   └── test_api_client.py
│   ├── conversational/
│   │   ├── hierarchical_conversation.py
│   │   └── single_agent.py
│   ├── debug/
│   │   └── test_error.py
│   ├── getting-started/
│   │   └── example-1.py
│   ├── memory/
│   │   ├── __init__.py
│   │   ├── single_agent_with_memory.py
│   │   └── util.py
│   └── vision/
│       └── single_agent_with_vision.py
└── website/
    ├── .gitignore
    ├── README.md
    ├── babel.config.js
    ├── docs/
    │   ├── agent-memory/
    │   │   ├── agent-memory.md
    │   │   ├── archival-memory.md
    │   │   ├── conversation-memory.md
    │   │   ├── core-memory.md
    │   │   └── memory-manager.md
    │   ├── agents/
    │   │   ├── agents.md
    │   │   ├── conversational-agent.md
    │   │   └── react-agent.md
    │   ├── api-spec/
    │   │   ├── _category_.json
    │   │   ├── add-agent-tool.md
    │   │   ├── api-client.md
    │   │   ├── create-agent.md
    │   │   ├── get-agent.md
    │   │   ├── get-tools.md
    │   │   ├── getting-started.md
    │   │   ├── list-agents.md
    │   │   ├── remove-agent-tool.md
    │   │   ├── send-message.md
    │   │   ├── stop-agent.md
    │   │   └── ws-events.md
    │   ├── azure.md
    │   ├── cli.md
    │   ├── docker.md
    │   ├── examples/
    │   │   ├── _category_.json
    │   │   ├── api-client.md
    │   │   ├── code-interpreter.md
    │   │   ├── home-automation.md
    │   │   ├── investor-agent.md
    │   │   └── online-research/
    │   │       ├── metformin-research.md
    │   │       └── online-research.md
    │   ├── getting-started.md
    │   ├── intro.md
    │   ├── multi-agent-systems/
    │   │   ├── examples.md
    │   │   ├── group-conversation.md
    │   │   ├── multi-agent-systems.md
    │   │   └── team-conversation-config.md
    │   └── tools/
    │       ├── _category_.json
    │       ├── custom-tool.md
    │       └── getting-started.md
    ├── docusaurus.config.js
    ├── package.json
    ├── sidebars.js
    ├── src/
    │   ├── components/
    │   │   └── HomepageFeatures/
    │   │       ├── index.js
    │   │       └── styles.module.css
    │   ├── css/
    │   │   └── custom.css
    │   └── pages/
    │       ├── index.js
    │       ├── index.module.css
    │       └── markdown-page.md
    └── static/
        └── .nojekyll

================================================
FILE CONTENTS
================================================

================================================
FILE: .github/workflows/deploy-website.yaml
================================================
name: Deploy BondAI Website

on:
  push:
    branches:
      - main

jobs:
  deploy:
    runs-on: ubuntu-latest

    steps:
    - name: Checkout code
      uses: actions/checkout@v2

    - name: Set up Node.js
      uses: actions/setup-node@v2
      with:
        node-version: '19.7'

    - name: Install Docusaurus dependencies
      run: |
        cd website
        npm install
        npm run build

    - name: Deploy to S3
      uses: jakejarvis/s3-sync-action@master
      with:
        args: --acl public-read --follow-symlinks --delete
      env:
        AWS_S3_BUCKET: bondai-docs
        AWS_ACCESS_KEY_ID: ${{ secrets.DOCS_DEPLOY_AWS_ACCESS_KEY }}
        AWS_SECRET_ACCESS_KEY: ${{ secrets.DOCS_DEPLOY_AWS_SECRET_ACCESS_KEY }}
        AWS_REGION: 'us-west-2'
        SOURCE_DIR: 'website/build'

    - name: Invalidate CloudFront distribution
      uses: chetan/invalidate-cloudfront-action@v1.3
      env:
        DISTRIBUTION: E1JJN112WBIR8P
        PATHS: '/*'
        AWS_REGION: 'us-west-2'
        AWS_ACCESS_KEY_ID: ${{ secrets.DOCS_DEPLOY_AWS_ACCESS_KEY }}
        AWS_SECRET_ACCESS_KEY: ${{ secrets.DOCS_DEPLOY_AWS_SECRET_ACCESS_KEY }}


================================================
FILE: .github/workflows/deploy.yaml
================================================
name: Deploy to PyPI and DockerHub

on:
  push:
    tags:
      - 'v[0-9]+.[0-9]+.[0-9]+[a-zA-Z0-9]*'
  workflow_dispatch:

jobs:
  deploy:
    runs-on: ubuntu-latest

    steps:
    - name: Checkout code
      uses: actions/checkout@v2

    - name: Set up Python
      uses: actions/setup-python@v2
      with:
        python-version: '3.x'

    - name: Install Python dependencies
      run: |
        python -m pip install --upgrade pip
        pip install setuptools wheel twine setuptools_scm

    - name: Build and deploy to PyPI
      run: |
        python setup.py sdist bdist_wheel
        twine upload dist/* -u __token__ -p ${{ secrets.PYPI_TOKEN }}

    - name: Set up Docker Buildx
      uses: docker/setup-buildx-action@v1

    - name: Login to DockerHub
      uses: docker/login-action@v1
      with:
        username: ${{ secrets.DOCKERHUB_USERNAME }}
        password: ${{ secrets.DOCKERHUB_TOKEN }}

    - name: Determine if pre-release
      id: prerelease_check
      run: |
        if [[ ${{ github.ref_name }} =~ [a-zA-Z] ]]; then
          echo "This is a pre-release version."
          echo "::set-output name=tag_list::krohling/bondai:${{ github.ref_name }}"
        else
          echo "This is a stable release version."
          echo "::set-output name=tag_list::krohling/bondai:${{ github.ref_name }},krohling/bondai:latest"
        fi

    - name: Build and push Docker image
      uses: docker/build-push-action@v2
      with:
        context: ./docker
        push: true
        tags: ${{ steps.prerelease_check.outputs.tag_list }}
        platforms: linux/amd64,linux/arm64


================================================
FILE: .gitignore
================================================
*.DS_Store
response_query_storage
gmail-token.pickle
.debug
.memory
.cache
misc
ui/.next
ui/node_modules
ui/agent-volume
ui/data-volume
ui/app/backups
ui/tools
ui/app/_backup
ui/app/test/
docker/agent-volume/
.vscode
website/build
node_modules/

# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
!/ui/lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
#   For a library or package, you might want to ignore these files since the code is
#   intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
#   However, in case of collaboration, if having platform-specific dependencies or dependencies
#   having no cross-platform support, pipenv may install dependencies that don't work, or not
#   install all needed dependencies.
#Pipfile.lock

# poetry
#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
#   This is especially recommended for binary packages to ensure reproducibility, and is more
#   commonly ignored for libraries.
#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock

# pdm
#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
#   in version control.
#   https://pdm.fming.dev/#use-with-ide
.pdm.toml

# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/

# PyCharm
#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
#  and can be added to the global gitignore or merged into this file.  For a more nuclear
#  option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

notes.txt

================================================
FILE: .pre-commit-config.yaml
================================================
repos:
  - repo: https://github.com/psf/black
    rev: stable
    hooks:
      - id: black


================================================
FILE: CONTRIBUTING.md
================================================
# Contribution Instruction & Guidelines

Hello there! Any kind of contribution to **BondAI** is most welcome!

- If you have a question, please use GitHub
  [discussions](https://github.com/krohling/bondai/discussions).
- If you found a bug or have a feature request, please use GitHub
  [issues](https://github.com/krohling/bondai/issues).
- If you fixed a bug or implemented a new feature, please do a pull request. If it
  is a larger change or addition, it would be great to first discuss it through an
  [issue](https://github.com/krohling/bondai/issues).

## Development Setup

Warning: If you run **BondAI** on your own system, tools that interact with the file system will have full access to your local disk! I highly recommend running and testing inside of a Docker container.

Always be careful when approving any code!

## Tools

When you contribute code, please use **black** for code formatting. 

## Branching & Release Strategy

The default branch is called master.
It contains the latest features, which would be ready for deployment.
It is not possible to push to it directly.
Instead, for every feature, a branch should be created, which will then be merged back into main with a pull request.


================================================
FILE: LICENSE
================================================
Copyright 2023 Kevin Rohling

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

================================================
FILE: MANIFEST.in
================================================
include bondai/prompt/default_prompt_template.md
include bondai/cli/onboarding_prompt_template.md

================================================
FILE: README.md
================================================
<a href="https://bondai.dev">
<p align="center">
<img src="assets/bondai-logo.png" alt="Description or Alt text" style="border-radius: 10px; width: 50%;"  alt="logo">
</p>
</a>

<p align="center">
    <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
    <a href="https://pypi.org/project/bondai/"><img src="https://img.shields.io/pypi/v/bondai" alt="PyPI"></a>
    <a href="https://hub.docker.com/r/krohling/bondai"><img src="https://img.shields.io/docker/v/krohling/bondai?logo=docker" alt="Docker"></a>
    <a href="https://colab.research.google.com/drive/1Rmzosq6LD_ZR3MkqQO1M1Af27VAaYNRE?usp=sharing" style={{marginLeft: '10px'}}>
        <img src="https://colab.research.google.com/assets/colab-badge.svg" style={{marginLeft: '10px'}} alt="PyPI"/>
    </a>
</p>
<p align="center"><em>Build highly capable Single and Multi-Agent Systems.</em></p>

# <a href="https://bondai.dev">BondAI Homepage</a>

Checkout the BondAI Homepage ([https://bondai.dev](https://bondai.dev)) for in depth documentation, examples and API specification.

# Getting Started

There are 3 ways to use BondAI:

1) 🛠️ **Command Line Interface (CLI)** - This is the easiest way to get up and running fast. Run BondAI on your command line with a pre-configured set of tools.

2) 🐋 **Docker** - Running BondAI in a Docker container is recommended if you plan on using tools that run code or directly access your shell.

3) 🏗️ **Start Coding with BondAI** - Integrate BondAI into your own codebase and start building your own agents.

## 🚀 Installation

Installing BondAI is easy:

```bash
pip install bondai
```

## 🛠️ Command Line Interface (CLI)

Once you've installed BondAI using `pip install bondai` the CLI will be available as an executable accessible simply by running `bondai` in your shell. [Learn more about all of the tools and options available through the CLI](https://bondai.dev/docs/cli).

Before running `bondai` you will need to set the OPENAI_API_KEY environment variable.
```bash
export OPENAI_API_KEY=sk-XXXXXXXXXX
```

Once the environment variable has been set you can run `bondai` to start the CLI.

```bash
Loading BondAI...
******************ENTERING CHAT******************
You are entering a chat with BondAI...
You can exit any time by typing 'exit'.

Hello! I'm BondAI, your friendly and helpful assistant. I'm here to assist you with any tasks or questions you might have. How can I assist you today?

I want you to write a story about unicorns and save it to a file named unicorns.md.
Using tool file_write: Writing a story about unicorns and saving it to a file named unicorns.md
Using tool final_answer...

A story about unicorns has been successfully written and saved to a file named unicorns.md. The story is set in an enchanted forest and describes the magical and majestic nature of unicorns, their daily routines, and their harmonious relationship with other creatures in the forest.
```


## 🐋 Docker

BondAI Docker images are available on [DockerHub here](https://hub.docker.com/r/krohling/bondai). If you intend to use tools that run arbitrary code (*PythonREPLTool*) or access your shell (*ShellTool*) it is highly recommended that you run BondAI in a Docker container as these tools can damage your machine.

Before running the BondAI Docker container it is recommended that you create a directory named 'agent-volume' and mount it as a volume on the container. This will be used as the Agent's working directory and allows you to easily share files with the Agent.

```bash
mkdir agent-volume
docker pull krohling/bondai:latest
docker run -it --rm \
           -v ./agent-volume:/agent-volume \
           -w /agent-volume \
           OPENAI_API_KEY=sk-XXXXXXXXXX \
           bondai:latest bondai
```

## 🔥 Start Coding with BondAI

BondAI has a straightforward API for creating powerful AI Agents. Check out our [examples](https://bondai.dev/docs/category/examples) for ideas on how to get started.  Remember to set your *OPENAI_API_KEY* environment variable before running your BondAI Agent.

```python
from bondai.agents import Agent
from bondai.tools.search import DuckDuckGoSearchTool
from bondai.tools.website import WebsiteQueryTool
from bondai.tools.file import FileWriteTool

task = """I want you to research the usage of Metformin as a drug to treat aging and aging related illness. 
You should only use reputable information sources, ideally peer reviewed scientific studies. 
I want you to summarize your findings in a document named metformin.md and includes links to reference and resources you used to find the information. 
Additionally, the last section of your document you should provide a recommendation for a 43 year old male, in good health and who regularly exercises as to whether he would benefit from taking Metformin. 
You should explain your recommendation and justify it with sources. 
Finally, you should highlight potential risks and tradeoffs from taking the medication."""

Agent(tools=[
  DuckDuckGoSearchTool(),
  WebsiteQueryTool(),
  FileWriteTool()
]).run(task)
```

## BondAI Integrations

BondAI comes out of the box with a powerful set of integrations.

|     |  |  |
| -------- | ------- |------- |
| <img src="assets/logos/openai-logo.png" alt="openai logo" width="50"/> | **OpenAI**     | BondAI supports any combination of OpenAI models and services including GPT-4, GPT-3.5, Dalle-E 3, and Embeddings.  |
| <img src="assets/logos/azure-logo.png" alt="azure logo" width="50"/> | **Microsoft Azure**     | BondAI fully supports connectivity to GPT-N, Dalle-E and Embedding APIs through [Microsoft's Azure OpenAI services](https://azure.microsoft.com/en-us/products/ai-services/openai-service).  |
| <img src="assets/logos/google-logo.png" alt="google logo" width="50"/>  | **Google Search**    | Allows BondAI to search the internet. [Requires a Google Search API Key and CSE ID](https://developers.google.com/custom-search/v1/introduction) |
| <img src="assets/logos/duckduckgo-logo.png" alt="duckduckgo logo" width="50"/> | **DuckDuckGo**     | Allows BondAI to search the internet. No API keys required. |
| <img src="assets/logos/alpaca-markets-logo.jpeg" alt="alpaca markets logo" width="50"/> | **Alpaca Markets**     | Allows BondAI to buy and sell stocks and crypto. [Requires an Alpaca Markets account.](https://alpaca.markets/)  |
| <img src="assets/logos/postgres-logo.jpeg" alt="postgres logo" width="75"/>    | **PostgreSQL**    | BondAI can automatically extract the schema from a Postgres DB and process natural language queries. |
| <img src="assets/logos/blandai-logo.jpeg" alt="bland.ai logo" width="50"/> | **Bland AI**     | Allows BondAI to make phone calls and process/retrieve call transcripts. [Requires a Bland.ai account.](https://www.bland.ai/)  |
| <img src="assets/logos/gmail-logo.png" alt="gmail logo" width="50"/> | **Gmail**     | Allows BondAI to search and read emails.  |


================================================
FILE: bondai/__init__.py
================================================


================================================
FILE: bondai/agents/__init__.py
================================================
from .conversational_agent import ConversationalAgent
from .agent import Agent, DEFAULT_MESSAGE_PROMPT_TEMPLATE
from .conversation_member import ConversationMember, ConversationMemberEventNames
from .messages import (
    AgentMessage,
    SystemMessage,
    ConversationMessage,
    ToolUsageMessage,
    AgentMessageList,
    message_to_dict,
    USER_MEMBER_NAME,
)
from .util import (
    AgentStatus,
    AgentEventNames,
    AgentException,
    BudgetExceededException,
    MaxStepsExceededException,
    parse_response_content_message,
)

__all__ = [
    "ConversationalAgent",
    "Agent",
    "parse_response_content_message",
    "DEFAULT_MESSAGE_PROMPT_TEMPLATE",
    "AgentStatus",
    "AgentEventNames",
    "AgentException",
    "BudgetExceededException",
    "MaxStepsExceededException",
    "ConversationMember",
    "ConversationMemberEventNames",
    "AgentMessage",
    "SystemMessage",
    "ConversationMessage",
    "ToolUsageMessage",
    "AgentMessageList",
    "message_to_dict",
    "USER_MEMBER_NAME",
]


================================================
FILE: bondai/agents/agent.py
================================================
import os
import uuid
import traceback
from pydantic import BaseModel
from datetime import datetime
from typing import Dict, List, Tuple, Callable
from bondai.util import EventMixin, Runnable, load_local_resource
from bondai.tools import Tool, ResponseQueryTool
from bondai.models import LLM, EmbeddingModel
from bondai.memory import MemoryManager
from bondai.prompt import JinjaPromptBuilder
from bondai.models.openai import (
    OpenAILLM,
    OpenAIEmbeddingModel,
    OpenAIModelNames,
    get_total_cost,
)
from .conversation_member import ConversationMember
from .messages import AgentMessage, AgentMessageList, SystemMessage, ToolUsageMessage
from .compression import summarize_conversation, summarize_messages
from .util import (
    AgentStatus,
    AgentEventNames,
    AgentException,
    BudgetExceededException,
    MaxStepsExceededException,
    ContextLengthExceededException,
    count_request_tokens,
    format_llm_messages,
    execute_tool,
)


DEFAULT_MAX_TOOL_RETRIES = 5
DEFAULT_MAX_TOOL_RESPONSE_TOKENS = 2000
DEFAULT_SYSTEM_PROMPT_TEMPLATE = load_local_resource(
    __file__, os.path.join("prompts", "react_agent_system_prompt_template.md")
)
DEFAULT_MESSAGE_PROMPT_TEMPLATE = load_local_resource(
    __file__, os.path.join("prompts", "agent_message_prompt_template.md")
)


class FinalAnswerParameters(BaseModel):
    results: str


class FinalAnswerTool(Tool):
    def __init__(self):
        super().__init__(
            "final_answer",
            "Use the final_answer tool once you have completed your TASK. Provide a highly detailed description of the results of your task in the 'results' parameter.",
            FinalAnswerParameters,
        )

    def run(self, results: str) -> Tuple[str, bool]:
        return results, True


class Agent(EventMixin, Runnable):
    def __init__(
        self,
        llm: LLM | None = None,
        embedding_model: EmbeddingModel | None = None,
        tools: List[Tool] | None = None,
        quiet: bool = True,
        allowed_events: List[str] | None = None,
        messages: List[AgentMessage] | None = None,
        system_prompt_sections: List[Callable[[], str]] | None = None,
        system_prompt_builder: Callable[..., str] = None,
        message_prompt_builder: Callable[..., str] = None,
        memory_manager: MemoryManager | None = None,
        max_context_length: int = None,
        max_context_pressure_ratio: float = 0.8,
        max_tool_retries: int = DEFAULT_MAX_TOOL_RETRIES,
        max_tool_response_tokens=DEFAULT_MAX_TOOL_RESPONSE_TOKENS,
        enable_context_compression: bool = False,
        enable_final_answer_tool: bool = True,
    ):
        Runnable.__init__(self)
        if allowed_events is None:
            allowed_events = [
                AgentEventNames.TOOL_SELECTED,
                AgentEventNames.TOOL_COMPLETED,
                AgentEventNames.TOOL_ERROR,
                AgentEventNames.STREAMING_CONTENT_UPDATED,
                AgentEventNames.STREAMING_FUNCTION_UPDATED,
                AgentEventNames.CONTEXT_COMPRESSION_REQUESTED,
            ]
        EventMixin.__init__(self, allowed_events=allowed_events)

        if llm is None:
            llm = OpenAILLM(OpenAIModelNames.GPT4_0613)
        if embedding_model is None:
            embedding_model = OpenAIEmbeddingModel(
                OpenAIModelNames.TEXT_EMBEDDING_ADA_002
            )
        if tools is None:
            tools = []
        if system_prompt_sections is None:
            system_prompt_sections = []
        if messages is None:
            messages = []

        self._id: str = str(uuid.uuid4())
        self._status: AgentStatus = AgentStatus.IDLE
        self._messages = AgentMessageList(messages=messages)
        self._llm: LLM = llm
        self._embedding_model: EmbeddingModel = embedding_model
        self._tools: List[Tool] = tools
        self._quiet: bool = quiet
        self._system_prompt_sections: List[Callable[[], str]] = system_prompt_sections
        self._system_prompt_builder: Callable[..., str] = system_prompt_builder
        self._message_prompt_builder: Callable[..., str] = message_prompt_builder
        self._memory_manager = memory_manager
        self._max_context_length = (
            max_context_length if max_context_length else (self._llm.max_tokens * 0.95)
        )
        self._max_context_pressure_ratio = max_context_pressure_ratio
        self._max_tool_retries = max_tool_retries
        self._max_tool_response_tokens = max_tool_response_tokens
        self._enable_context_compression = enable_context_compression
        if self._memory_manager:
            self._tools.extend(self._memory_manager.tools)
            self._system_prompt_sections.append(self._memory_manager)
        if self._system_prompt_builder is None:
            self._system_prompt_builder = JinjaPromptBuilder(
                DEFAULT_SYSTEM_PROMPT_TEMPLATE
            )
        if self._message_prompt_builder is None:
            self._message_prompt_builder = JinjaPromptBuilder(
                DEFAULT_MESSAGE_PROMPT_TEMPLATE
            )
        if enable_final_answer_tool:
            self._tools.append(FinalAnswerTool())

    @property
    def id(self) -> str:
        return self._id

    @property
    def status(self) -> AgentStatus:
        return self._status

    @property
    def tools(self) -> List[Tool]:
        return self._tools

    def clear_messages(self):
        if self._status == AgentStatus.RUNNING:
            raise AgentException(
                "Cannot reset memory while agent is in a running state."
            )
        self._messages.clear()

    def add_tool(self, tool: Tool):
        if not any([t.name == tool.name for t in self._tools]):
            self.tools.append(tool)

    def remove_tool(self, tool_name: str):
        self._tools = [t for t in self._tools if t.name != tool_name]

    def to_dict(self) -> Dict:
        return {"id": self.id, "tools": [t.name for t in self._tools]}

    def save_state(self) -> Dict:
        if self._status == AgentStatus.RUNNING:
            raise AgentException("Cannot save agent state while it is running.")

        state = {"tools": {}}

        for tool in self._tools:
            state["tools"][tool.name] = tool.save_state()

        return state

    def load_state(self, state: Dict):
        if self._status == AgentStatus.RUNNING:
            raise AgentException("Cannot load agent state while it is running.")

        for tool in self._tools:
            if tool.name in state["tools"]:
                tool.load_state(state["tools"][tool.name])

    def _is_context_pressure_too_high(
        self,
        llm_messages: List[Dict[str, str]],
        tools: List[Tool] | None = None,
    ) -> float:
        if tools is None:
            tools = []
        context_pressure_ratio = float(
            count_request_tokens(self._llm, llm_messages, tools)
        ) / float(self._max_context_length)
        return context_pressure_ratio > self._max_context_pressure_ratio

    def _get_llm_response(
        self,
        messages: List[Dict] | None = None,
        tools: List[Tool] | None = None,
        content_stream_callback: Callable[[str], None] | None = None,
        function_stream_callback: Callable[[str], None] | None = None,
    ) -> (str | None, Dict | None):
        if messages is None:
            messages = []
        if tools is None:
            tools = []

        request_tokens = count_request_tokens(
            llm=self._llm, messages=messages, tools=tools
        )
        if request_tokens > self._llm.max_tokens:
            raise ContextLengthExceededException(
                f"Context length ({request_tokens}) exceeds maximum tokens allowed by LLM: {self._llm.max_tokens}"
            )

        llm_functions = list(map(lambda t: t.get_tool_function(), tools))

        if (
            self._llm.supports_streaming
        ):  # and (any([t.supports_streaming for t in tools]) or content_stream_callback):

            def _function_stream_callback(function_name, arguments_buffer):
                streaming_tools: [Tool] = [
                    t for t in tools if t.name == function_name and t.supports_streaming
                ]
                if len(streaming_tools) > 0:
                    tool: Tool = streaming_tools[0]
                    tool.handle_stream_update(arguments_buffer)
                if function_stream_callback:
                    function_stream_callback(function_name, arguments_buffer)
                self._trigger_event(
                    AgentEventNames.STREAMING_FUNCTION_UPDATED,
                    self,
                    function_name,
                    arguments_buffer,
                )

            def _content_stream_callback(content_buffer):
                if content_stream_callback:
                    content_stream_callback(content_buffer)
                self._trigger_event(
                    AgentEventNames.STREAMING_CONTENT_UPDATED, self, content_buffer
                )

            llm_response, llm_response_function = self._llm.get_streaming_completion(
                messages=messages,
                functions=llm_functions,
                function_stream_callback=_function_stream_callback,
                content_stream_callback=_content_stream_callback,
            )
        else:
            llm_response, llm_response_function = self._llm.get_completion(
                messages=messages,
                functions=llm_functions,
                # function_stream_callback=function_stream_callback,
                # content_stream_callback=content_stream_callback
            )

        return llm_response, llm_response_function

    def run(
        self,
        task: str,
        max_steps: int = None,
        max_budget: float = None,
    ) -> ToolUsageMessage | str:
        if self._status == AgentStatus.RUNNING:
            raise AgentException("Cannot start agent while it is in a running state.")
        self._status = AgentStatus.RUNNING
        try:
            return self._run_tool_loop(
                tools=self._tools,
                task=task,
                starting_cost=get_total_cost(),
                max_budget=max_budget,
                max_steps=max_steps,
            )
        finally:
            self._status = AgentStatus.IDLE

    def run_async(
        self,
        task: str,
        max_steps: int = None,
        max_budget: float = None,
    ):
        """Runs the agent's task in a separate thread."""
        if self._status == AgentStatus.RUNNING:
            raise AgentException("Cannot start agent while it is in a running state.")

        args = (task, max_steps, max_budget)
        self._start_execution_thread(target=self.run, args=args)

    def stop(self, timeout=10):
        """Gracefully stops the thread, with a timeout."""
        self._force_stop = True
        for tool in self._tools:
            tool.stop()

        super().stop(timeout=timeout)

    def _run_tool_loop(
        self,
        tools: List[Tool],
        starting_cost: float,
        max_budget: float = None,
        max_steps: int = None,
        max_tool_retries: int = None,
        task: str | None = None,
        prompt_vars: Dict | None = None,
        return_conversational_responses: bool = False,
        retain_tool_messages_in_context: bool = True,
        addition_context_messages: List[AgentMessage] | None = None,
        conversation_members: List[ConversationMember] | None = None,
        content_stream_callback: Callable[[str], None] | None = None,
        function_stream_callback: Callable[[str], None] | None = None,
    ) -> ToolUsageMessage | str:
        if addition_context_messages is None:
            addition_context_messages = []
        if conversation_members is None:
            conversation_members = []
        if max_tool_retries is None:
            max_tool_retries = self._max_tool_retries

        error_count = 0
        step_count = 0
        last_error_message = None
        local_messages = []
        self._force_stop = False
        response_query_tool = ResponseQueryTool(
            llm=self._llm, embedding_model=self._embedding_model
        )

        def append_message(message):
            if isinstance(message, SystemMessage):
                system_messages = [
                    m for m in local_messages if not isinstance(m, SystemMessage)
                ]
                for m in system_messages:
                    local_messages.remove(m)

            local_messages.append(message)
            if retain_tool_messages_in_context:
                self._messages.add(message)
                if self._memory_manager and self._memory_manager.conversation_memory:
                    self._memory_manager.conversation_memory.add(message)

        while not self._force_stop:
            step_count += 1
            if max_budget and get_total_cost() - starting_cost > max_budget:
                raise BudgetExceededException()
            if max_steps and step_count > max_steps:
                raise MaxStepsExceededException()

            if (
                len(response_query_tool.responses) > 0
                and response_query_tool not in tools
            ):
                tools.append(response_query_tool)

            if self._enable_context_compression:
                self._compress_llm_context(
                    tools=tools,
                    last_error_message=last_error_message,
                    conversation_members=conversation_members,
                    additional_context_messages=addition_context_messages
                    + local_messages,
                    prompt_vars=prompt_vars,
                )

            llm_context = self._build_llm_context(
                messages=AgentMessageList(
                    self._messages + addition_context_messages + local_messages
                ),
                tools=tools,
                task=task,
                last_error_message=last_error_message,
                conversation_members=conversation_members,
                prompt_vars=prompt_vars,
            )

            llm_response_content, llm_response_function = self._get_llm_response(
                messages=llm_context,
                tools=tools,
                content_stream_callback=content_stream_callback,
                function_stream_callback=function_stream_callback,
            )
            # print(llm_response_content)

            last_error_message = None
            if llm_response_function and any(
                [
                    m.name == llm_response_function.get("tool_name")
                    for m in conversation_members
                ]
            ):
                message = f"""MessageSendFailure: You attempted to send a message to {llm_response_function.get('tool_name')} but this message failed.
                To send a message to {llm_response_function.get('tool_name')} you must use the 'send_message' tool or use this format in your response:

                ```
                {llm_response_function.get('tool_name')}: Include your message here.)
                ```
                """
                append_message(SystemMessage(message=message))
            if llm_response_function:
                tool_message = ToolUsageMessage(
                    tool_name=llm_response_function["name"],
                    tool_arguments=llm_response_function.get("arguments") or {},
                )
                self._trigger_event(AgentEventNames.TOOL_SELECTED, self, tool_message)
                self._handle_llm_function(tool_message=tool_message, tools=tools)

                if (
                    isinstance(tool_message.tool_output, str)
                    and self._llm.count_tokens(tool_message.tool_output)
                    > self._max_tool_response_tokens
                ):
                    response_id = response_query_tool.add_response(
                        tool_message.tool_output
                    )
                    tool_message.tool_output = f"The result from this tool was greater than {self._max_tool_response_tokens} tokens and could not be displayed. However, you can use the response_query tool to ask questions about the content of this response. Just use response_id = {response_id}."

                append_message(tool_message)
                if tool_message.success:
                    error_count = 0
                    self._trigger_event(
                        AgentEventNames.TOOL_COMPLETED, self, tool_message
                    )
                    if tool_message.agent_halted:
                        return tool_message
                else:
                    error_count += 1
                    last_error_message = str(tool_message.error)
                    message = "ToolUsageError: Your last tool usage failed and MUST BE CORRECTED. If this error is not corrected you will not be able to proceed."
                    append_message(SystemMessage(message=message))
                    self._trigger_event(AgentEventNames.TOOL_ERROR, self, tool_message)
                    if error_count >= max_tool_retries:
                        return tool_message
            elif llm_response_content and return_conversational_responses:
                return llm_response_content
            else:
                error_count += 1
                message = "InvalidResponseError: The response does not conform to the required format. A function selection was expected, but none was provided."
                append_message(SystemMessage(message=message))
                if error_count >= max_tool_retries:
                    raise AgentException(message)

        if self._force_stop:
            self._force_stop = False
            raise AgentException("Agent was forcibly stopped.")

    def _build_llm_context(
        self,
        messages: AgentMessageList,
        tools: List[Tool] | None = None,
        task: str | None = None,
        last_error_message: str | None = None,
        conversation_members: List[ConversationMember] | None = None,
        truncate_context: bool = True,
        prompt_vars: Dict | None = None,
    ) -> (str | None, Dict | None):
        if tools is None:
            tools = []
        if conversation_members is None:
            conversation_members = []
        if prompt_vars is None:
            prompt_vars = {}

        prompt_sections = []
        for s in self._system_prompt_sections:
            if callable(s):
                prompt_sections.append(s())
            else:
                prompt_sections.append(s)

        system_prompt: str = self._system_prompt_builder(
            conversation_members=conversation_members,
            tools=tools,
            task=task,
            prompt_sections=prompt_sections,
            error_message=last_error_message,
            **prompt_vars,
        )

        # print(system_prompt)
        llm_context = format_llm_messages(
            system_prompt, messages, self._message_prompt_builder
        )

        if truncate_context:
            reduced_messages = AgentMessageList(messages)
            while (
                self._is_context_pressure_too_high(llm_context, tools)
                and len(reduced_messages) > 0
            ):
                reduced_messages.remove(reduced_messages[0])
                llm_context = format_llm_messages(
                    system_prompt, reduced_messages, self._message_prompt_builder
                )

        return llm_context

    def _compress_llm_context(
        self,
        tools: List[Tool] | None = None,
        last_error_message: str | None = None,
        conversation_members: List[ConversationMember] | None = None,
        additional_context_messages: List[AgentMessage] | None = None,
        prompt_vars: Dict | None = None,
    ) -> List[AgentMessage]:
        if tools is None:
            tools = []
        if conversation_members is None:
            conversation_members = []
        if additional_context_messages is None:
            additional_context_messages = []

        all_context_messages = AgentMessageList(
            self._messages + additional_context_messages
        )

        llm_context = self._build_llm_context(
            messages=all_context_messages,
            tools=tools,
            last_error_message=last_error_message,
            conversation_members=conversation_members,
            truncate_context=False,
            prompt_vars=prompt_vars,
        )

        if self._is_context_pressure_too_high(llm_context, tools):
            # Try summarizing individual messages
            # TODO: Give the agent an opportunity to save information to Archival database

            summarize_messages(
                llm=self._llm,
                messages=self._messages[:-1],
                message_prompt_builder=self._message_prompt_builder,
            )

            llm_context = self._build_llm_context(
                messages=all_context_messages,
                tools=tools,
                last_error_message=last_error_message,
                conversation_members=conversation_members,
                truncate_context=False,
                prompt_vars=prompt_vars,
            )

            if self._is_context_pressure_too_high(llm_context, tools):
                # Try summarizing the entire conversation

                last_message = self._messages[-1]
                summary_message = summarize_conversation(
                    llm=self._llm,
                    messages=self._messages[:-1],
                    message_prompt_builder=self._message_prompt_builder,
                )
                self._messages.clear()
                self._messages.add(summary_message)
                self._messages.add(last_message)

                all_context_messages = AgentMessageList(
                    self._messages + additional_context_messages
                )
                llm_context = self._build_llm_context(
                    messages=all_context_messages,
                    tools=tools,
                    last_error_message=last_error_message,
                    conversation_members=conversation_members,
                    truncate_context=False,
                    prompt_vars=prompt_vars,
                )

                if self._is_context_pressure_too_high(llm_context, tools):
                    # Fire a message for group conversation compression
                    self._trigger_event(
                        AgentEventNames.CONTEXT_COMPRESSION_REQUESTED, self
                    )

                    llm_context = self._build_llm_context(
                        messages=all_context_messages,
                        tools=tools,
                        last_error_message=last_error_message,
                        conversation_members=conversation_members,
                        truncate_context=False,
                        prompt_vars=prompt_vars,
                    )

                    if self._is_context_pressure_too_high(llm_context, tools):
                        print(
                            "Warning: Context compression failed to relieve pressure."
                        )

    def _handle_llm_function(self, tool_message: ToolUsageMessage, tools: List[Tool]):
        tool_starting_cost = get_total_cost()

        try:
            tool_output = execute_tool(
                tool_name=tool_message.tool_name,
                tool_arguments=tool_message.tool_arguments,
                tools=tools,
            )

            agent_halted = False
            if isinstance(tool_output, tuple):
                tool_output, agent_halted = tool_output

            tool_message.agent_halted = agent_halted
            tool_message.tool_output = tool_output
            tool_message.success = True
        except Exception as e:
            # traceback.print_exc()
            tool_message.error = e

        tool_message.completed_at = datetime.now()
        tool_message.cost = get_total_cost() - tool_starting_cost


================================================
FILE: bondai/agents/compression/__init__.py
================================================
from .conversation_summarizer import summarize_conversation
from .message_summarizer import summarize_messages

__all__ = ["summarize_conversation", "summarize_messages"]


================================================
FILE: bondai/agents/compression/conversation_summarizer.py
================================================
import os
from typing import List
from bondai.models import LLM
from bondai.prompt import PromptBuilder, JinjaPromptBuilder
from bondai.util import load_local_resource
from bondai.agents.messages import (
    AgentMessage,
    SummaryMessage,
)

DEFAULT_SUMMARY_PROMPT_TEMPLATE = load_local_resource(
    __file__, os.path.join("prompts", "conversation_summarizer_prompt_template.md")
)


def summarize_conversation(
    llm: LLM,
    messages: List[AgentMessage],
    message_prompt_builder: PromptBuilder,
    summary_prompt_builder: PromptBuilder = JinjaPromptBuilder(
        DEFAULT_SUMMARY_PROMPT_TEMPLATE
    ),
) -> AgentMessage:
    if not messages:
        return []

    # Format the messages
    message_prompts = [
        message_prompt_builder.build_prompt(
            message=msg,
        )
        for msg in messages
    ]

    # Get the summary for the entire conversation
    prompt = summary_prompt_builder.build_prompt(messages=message_prompts)
    summary, _ = llm.get_completion(messages=[{"role": "system", "content": prompt}])

    # Return the summary wrapped in an SummaryMessage
    return SummaryMessage(
        message=summary,
        children=list(messages),
        timestamp=messages[-1].timestamp,
    )


================================================
FILE: bondai/agents/compression/message_summarizer.py
================================================
import os
from typing import List
from concurrent.futures import ThreadPoolExecutor, as_completed
from bondai.models import LLM
from bondai.prompt import PromptBuilder, JinjaPromptBuilder
from bondai.util import load_local_resource
from bondai.agents.messages import AgentMessage, ConversationMessage, ToolUsageMessage

MIN_SUMMARIZABLE_LENGTH = 250
DEFAULT_SUMMARY_PROMPT_TEMPLATE = load_local_resource(
    __file__, os.path.join("prompts", "message_summarizer_prompt_template.md")
)


def summarize_messages(
    llm: LLM,
    messages: List[AgentMessage],
    message_prompt_builder: PromptBuilder,
    summary_prompt_builder: PromptBuilder = JinjaPromptBuilder(
        DEFAULT_SUMMARY_PROMPT_TEMPLATE
    ),
    max_summary_words: int = 100,
) -> List[AgentMessage]:
    summarizable_messages = [
        m
        for m in messages
        if (
            isinstance(m, ConversationMessage)
            and not m.message_summary
            and len(m.message) > MIN_SUMMARIZABLE_LENGTH
        )
        or (
            isinstance(m, ToolUsageMessage)
            and not m.tool_output_summary
            and len(m.tool_output) > MIN_SUMMARIZABLE_LENGTH
        )
    ]

    print(f"Summarizing {len(summarizable_messages)} messages...")

    # Creating a thread pool executor to parallelize summary generation
    with ThreadPoolExecutor() as executor:
        futures = []
        for m in summarizable_messages:
            # Find all messages that occurred before the current message
            previous_messages = [msg for msg in messages if msg.timestamp < m.timestamp]
            # Submit the _summarize_message task to the executor
            future = executor.submit(
                _summarize_message,
                m,
                previous_messages[-5:],
                llm,
                summary_prompt_builder,
                message_prompt_builder,
                max_summary_words,
            )
            futures.append(future)

        for future in as_completed(futures):
            try:
                future.result()
            except Exception as exc:
                print(f"Message summary generation generated an exception: {exc}")

    return messages


def _summarize_message(
    message: AgentMessage,
    previous_messages: List[AgentMessage],
    llm: LLM,
    prompt_builder: PromptBuilder,
    message_prompt_builder: PromptBuilder,
    max_summary_words: int,
) -> str:
    message_prompt = message_prompt_builder.build_prompt(message=message)
    previous_message_prompts = [
        message_prompt_builder.build_prompt(
            message=msg,
        )
        for msg in previous_messages
    ]

    prompt = prompt_builder.build_prompt(
        message=message_prompt,
        previous_messages=previous_message_prompts,
        max_words=max_summary_words,
    )

    summary, _ = llm.get_completion(messages=[{"role": "system", "content": prompt}])

    # print("************")
    # print(f"Message: {message_prompt}")
    # print(f"Summary: {summary}")
    # print("************")
    if isinstance(message, ConversationMessage):
        print("Updating message summary...")
        message.message_summary = summary
    elif isinstance(message, ToolUsageMessage):
        message.tool_output_summary = summary
    # print(message)


================================================
FILE: bondai/agents/compression/prompts/conversation_summarizer_prompt_template.md
================================================
Read the entire conversation provided below and create a summary. Your task is to condense the key information and main points from the conversation into a concise summary. Focus on retaining critical details and insights from the dialogue.

# Conversation:
{% for msg in messages %}
- {{ msg }}
{% endfor %}

# Instructions:
- Aim to capture the essence and most significant aspects of the conversation in your summary.
- Ensure that all vital information, including key facts, decisions, and insights, are included in the summary.
- Produce a clear and coherent summary that reflects the main points of the conversation.
- Exclude any redundant or non-essential information from the summary.
- Present the summary in a concise and organized manner. 

# Conversation Summary:

================================================
FILE: bondai/agents/compression/prompts/message_summarizer_prompt_template.md
================================================
Read the following conversation and summarize the final message:

# Conversation
{% for msg in previous_messages %}
- {{ msg }}
{% endfor %}
- {{ message }}

# Important Rules
- Use the preceding conversation as context but summarize ONLY the following message.
- Your summary must be no longer than {max_words} words.
- Output only the summary. Do NOT include anything else in your output.

Message To Summarize:
{{ message }}

Summary:

================================================
FILE: bondai/agents/conversation_member.py
================================================
import uuid
from abc import ABC, abstractmethod
from enum import Enum
from typing import List, Callable
from .messages import (
    AgentMessage,
    ConversationMessage,
    AgentMessageList,
    USER_MEMBER_NAME,
)

DEFAULT_MAX_SEND_ATTEMPTS = 3


class ConversationMemberEventNames(Enum):
    MESSAGE_RECEIVED: str = "message_received"
    MESSAGE_COMPLETED: str = "message_completed"
    MESSAGE_ERROR: str = "message_error"
    CONVERSATION_EXITED: str = "agent_exited"


class ConversationMember(ABC):
    def __init__(
        self,
        name: str,
        persona: str | None = None,
        persona_summary: str | None = None,
    ):
        self._id: str = str(uuid.uuid4())
        self._name: str = name
        self._persona: str = persona
        self._persona_summary: str = persona_summary
        self._messages: AgentMessageList = AgentMessageList()

    @property
    def id(self) -> str:
        return self._id

    @property
    def name(self) -> str:
        return self._name

    @property
    def persona(self) -> str:
        return self._persona

    @property
    def persona_summary(self) -> str:
        return self._persona_summary

    @property
    def messages(self) -> AgentMessageList:
        return self._messages

    @abstractmethod
    def send_message(
        self,
        message: str | ConversationMessage,
        sender_name: str = USER_MEMBER_NAME,
        group_members: List | None = None,
        group_messages: List[AgentMessage] | None = None,
        max_attempts: int = DEFAULT_MAX_SEND_ATTEMPTS,
        require_response: bool = True,
    ) -> (str, str, bool):
        pass

    def send_message_async(
        self,
        message: str | ConversationMessage,
        sender_name: str = USER_MEMBER_NAME,
        group_members: List | None = None,
        group_messages: List[AgentMessage] | None = None,
        max_attempts: int = DEFAULT_MAX_SEND_ATTEMPTS,
        require_response: bool = True,
    ):
        pass

    def clear_messages(self):
        pass


================================================
FILE: bondai/agents/conversational_agent.py
================================================
import os
import traceback
import json
from datetime import datetime
from typing import Dict, List, Callable
from bondai.util import load_local_resource
from bondai.tools import Tool
from bondai.memory import MemoryManager
from bondai.tools.conversational import (
    SEND_MESSAGE_TOOL_NAME,
    EXIT_CONVERSATION_TOOL_NAME,
    SendMessageTool,
    ExitConversationTool,
)
from bondai.prompt import JinjaPromptBuilder
from bondai.models import LLM, EmbeddingModel
from bondai.models.openai import OpenAILLM, OpenAIModelNames, get_total_cost
from .agent import (
    Agent,
    DEFAULT_MAX_TOOL_RETRIES,
    AgentStatus,
    AgentException,
)
from .util import (
    AgentException,
    AgentEventNames,
    parse_response_content_message,
)
from .prompts import DEFAULT_AGENT_NAME, DEFAULT_CONVERSATIONAL_INSTRUCTIONS
from .conversation_member import ConversationMember, ConversationMemberEventNames
from .messages import (
    AgentMessage,
    ConversationMessage,
    ToolUsageMessage,
    SystemMessage,
    AgentMessageList,
    USER_MEMBER_NAME,
)

DEFAULT_MAX_SEND_ATTEMPTS = 3
DEFAULT_SYSTEM_PROMPT_TEMPLATE = load_local_resource(
    __file__, os.path.join("prompts", "conversational_agent_system_prompt_template.md")
)


class ConversationalAgent(Agent, ConversationMember):
    def __init__(
        self,
        llm: LLM | None = None,
        embedding_model: EmbeddingModel | None = None,
        tools: List[Tool] | None = None,
        messages: List[AgentMessage] | None = None,
        name: str = DEFAULT_AGENT_NAME,
        persona: str | None = None,
        persona_summary: str | None = None,
        instructions: str | None = DEFAULT_CONVERSATIONAL_INSTRUCTIONS,
        system_prompt_sections: List[Callable[[], str]] | None = None,
        system_prompt_builder: Callable[..., str] = None,
        message_prompt_builder: Callable[..., str] = None,
        memory_manager: MemoryManager | None = None,
        max_tool_retries: int = DEFAULT_MAX_TOOL_RETRIES,
        max_context_length: int = None,
        max_context_pressure_ratio: float = 0.8,
        enable_context_compression: bool = False,
        enable_conversation_tools: bool = True,
        enable_conversational_content_responses: bool = True,
        enable_exit_conversation: bool = True,
        quiet: bool = True,
    ):
        if llm is None:
            llm = OpenAILLM(OpenAIModelNames.GPT4_0613)
        if tools is None:
            tools = []
        if system_prompt_sections is None:
            system_prompt_sections = []

        ConversationMember.__init__(
            self,
            name=name,
            persona=persona,
            persona_summary=persona_summary,
        )
        Agent.__init__(
            self,
            llm=llm,
            embedding_model=embedding_model,
            quiet=quiet,
            tools=tools,
            messages=messages,
            system_prompt_sections=system_prompt_sections,
            system_prompt_builder=system_prompt_builder
            or JinjaPromptBuilder(DEFAULT_SYSTEM_PROMPT_TEMPLATE),
            message_prompt_builder=message_prompt_builder,
            memory_manager=memory_manager,
            max_context_length=max_context_length,
            max_context_pressure_ratio=max_context_pressure_ratio,
            max_tool_retries=max_tool_retries,
            enable_context_compression=enable_context_compression,
            enable_final_answer_tool=False,
            allowed_events=[
                AgentEventNames.CONTEXT_COMPRESSION_REQUESTED,
                AgentEventNames.TOOL_SELECTED,
                AgentEventNames.TOOL_ERROR,
                AgentEventNames.TOOL_COMPLETED,
                AgentEventNames.STREAMING_CONTENT_UPDATED,
                AgentEventNames.STREAMING_FUNCTION_UPDATED,
                ConversationMemberEventNames.MESSAGE_RECEIVED,
                ConversationMemberEventNames.MESSAGE_ERROR,
                ConversationMemberEventNames.MESSAGE_COMPLETED,
                ConversationMemberEventNames.CONVERSATION_EXITED,
            ],
        )
        self._instructions: str = instructions
        self._enable_exit_conversation: bool = enable_exit_conversation
        self._enable_conversational_content_responses = (
            enable_conversational_content_responses
        )
        self._enable_conversation_tools = enable_conversation_tools
        if self._enable_conversation_tools:
            self.add_tool(SendMessageTool())
        if self._enable_exit_conversation:
            self.add_tool(ExitConversationTool())

    @property
    def instructions(self) -> str:
        return self._instructions

    def send_message_async(
        self,
        message: str | ConversationMessage,
        sender_name: str = USER_MEMBER_NAME,
        group_members: List[ConversationMember] | None = None,
        group_messages: List[AgentMessage] | None = None,
        max_attempts: int = DEFAULT_MAX_SEND_ATTEMPTS,
        require_response: bool = True,
    ):
        """Runs the agent's task in a separate thread."""
        if self._status == AgentStatus.RUNNING:
            raise AgentException(
                "Cannot send message while agent is in a running state."
            )
        if not message:
            raise AgentException("'message' cannot be empty.")

        args = (
            message,
            sender_name,
            group_members,
            group_messages,
            max_attempts,
            require_response,
        )

        self._start_execution_thread(self.send_message, args=args)

    def send_message(
        self,
        message: str | ConversationMessage,
        sender_name: str = USER_MEMBER_NAME,
        group_members: List[ConversationMember] | None = None,
        group_messages: List[AgentMessage] | None = None,
        max_attempts: int = DEFAULT_MAX_SEND_ATTEMPTS,
        require_response: bool = True,
    ) -> ConversationMessage | None:
        if group_members is None:
            group_members = []
        if group_messages is None:
            group_messages = []

        if self._status == AgentStatus.RUNNING:
            raise AgentException(
                "Cannot send message while agent is in a running state."
            )
        if not message:
            raise AgentException("'message' cannot be empty.")

        if isinstance(message, ConversationMessage):
            agent_message = message
        elif isinstance(message, str):
            if not sender_name:
                raise AgentException("sender_name cannot be empty.")
            agent_message = ConversationMessage(
                sender_name=sender_name,
                recipient_name=self.name,
                message=message,
                require_response=require_response,
            )
        else:
            raise AgentException(
                "'message' must be an instance of ConversationMessage or a string."
            )

        attempts = 0
        starting_cost = get_total_cost()
        self._status = AgentStatus.RUNNING
        self._messages.add(agent_message)
        if self._memory_manager and self._memory_manager.conversation_memory:
            self._memory_manager.conversation_memory.add(agent_message)
        self._trigger_event(
            ConversationMemberEventNames.MESSAGE_RECEIVED, self, agent_message
        )

        def complete_agent_message(
            success=False, conversation_exited=False, error=None
        ):
            agent_message.success = success
            agent_message.conversation_exited = conversation_exited
            agent_message.error = error
            agent_message.cost = get_total_cost() - starting_cost
            agent_message.completed_at = datetime.now()
            if success:
                self._trigger_event(
                    ConversationMemberEventNames.MESSAGE_COMPLETED, self, agent_message
                )
                if conversation_exited:
                    self._trigger_event(
                        ConversationMemberEventNames.CONVERSATION_EXITED,
                        self,
                        agent_message,
                    )
            else:
                self._trigger_event(
                    ConversationMemberEventNames.MESSAGE_ERROR, self, agent_message
                )

        def validate_recipient(recipient_name: str):
            if not recipient_name:
                return "recipient_name cannot be empty."
            if len(group_members) > 0 and not any(
                [
                    member.name.lower() == recipient_name.lower()
                    for member in group_members
                ]
            ):
                return f"InvalidResponseError: The response does not conform to the required format. You do not have the ability to send messages to '{recipient_name}'. Try sending a message to someone else."

        if not agent_message.require_response:
            complete_agent_message(success=True)
            return

        while not self._force_stop:
            try:
                attempts += 1
                if attempts > max_attempts:
                    raise AgentException(
                        "The maximum number of attempts has been exceeded."
                    )

                prompt_vars = {
                    "name": self.name,
                    "persona": self.persona,
                    "instructions": self.instructions,
                    "conversation_enabled": self._enable_conversation_tools
                    or self._enable_conversational_content_responses,
                    "enable_exit_conversation": self._enable_exit_conversation,
                }

                tool_result = self._run_tool_loop(
                    addition_context_messages=group_messages,
                    tools=self._tools,
                    conversation_members=group_members,
                    starting_cost=starting_cost,
                    return_conversational_responses=True,
                    prompt_vars=prompt_vars,
                )

                response_message: ConversationMessage | None = None
                if isinstance(tool_result, ToolUsageMessage):
                    if not tool_result.success:
                        complete_agent_message(success=False, error=tool_result.error)
                        raise tool_result.error
                    elif tool_result.tool_name == EXIT_CONVERSATION_TOOL_NAME:
                        complete_agent_message(success=True, conversation_exited=True)
                        return tool_result.tool_output
                    elif tool_result.tool_name == SEND_MESSAGE_TOOL_NAME:
                        response_message = tool_result.tool_output

                if (
                    isinstance(tool_result, str)
                    and self._enable_conversational_content_responses
                ):
                    recipient_name, message = parse_response_content_message(
                        tool_result
                    )
                    if not recipient_name or not message:
                        recipient_name = agent_message.sender_name
                        message = tool_result
                    response_message = ConversationMessage(
                        role="assistant", recipient_name=recipient_name, message=message
                    )

                if response_message:
                    response_message.sender_name = self.name
                    error = validate_recipient(response_message.recipient_name)
                    if not error:
                        complete_agent_message(success=True)
                        self._messages.add(response_message)
                        if (
                            self._memory_manager
                            and self._memory_manager.conversation_memory
                        ):
                            self._memory_manager.conversation_memory.add(
                                response_message
                            )

                        return response_message
                    else:
                        self._messages.add(SystemMessage(message=error))
                else:
                    self._messages.add(
                        SystemMessage(
                            message="InvalidResponseError: The response does not conform to the required format. A function selection was expected, but none was provided.\nYour must correct this error."
                        )
                    )
            finally:
                self._status = AgentStatus.IDLE

    def to_dict(self) -> Dict:
        data = super().to_dict()
        data["name"] = self._name
        data["persona"] = self._persona
        data["persona_summary"] = self._persona_summary
        data["instructions"] = self.instructions
        data["quiet"] = self._quiet
        data["enable_conversation_tools"] = self._enable_conversation_tools
        data["enable_exit_conversation"] = self._enable_exit_conversation
        data[
            "enable_conversational_content_responses"
        ] = self._enable_conversational_content_responses
        data["max_context_length"] = self._max_context_length
        data["max_context_pressure_ratio"] = self._max_context_pressure_ratio
        data["messages"] = self.messages.to_dict()
        return data

    def save_state(self, file_path: str = None) -> Dict:
        state = super().save_state()
        state.update(self.to_dict())

        if file_path:
            os.makedirs(os.path.dirname(file_path), exist_ok=True)
            with open(file_path, "w") as file:
                json.dump(state, file, indent=4)

        return state

    @classmethod
    def from_dict(
        cls, data: List[Dict], file_path: str = None
    ) -> "ConversationalAgent":
        if not data and file_path:
            with open(file_path, "r") as file:
                data = json.load(file)

        agent = cls(
            name=data["name"],
            persona=data["persona"],
            persona_summary=data["persona_summary"],
            instructions=data["instructions"],
            enable_exit_conversation=data["enable_exit_conversation"],
            quiet=data["quiet"],
            enable_conversation_tools=data["enable_conversation_tools"],
            enable_conversational_content_responses=data[
                "enable_conversational_content_responses"
            ],
            max_context_length=data["max_context_length"],
            max_context_pressure_ratio=data["max_context_pressure_ratio"],
        )
        agent._messages = AgentMessageList.from_dict(data["messages"])
        agent.load_state(data)
        return agent


================================================
FILE: bondai/agents/group_chat/__init__.py
================================================
from .group_conversation import GroupConversation
from .user_proxy import UserProxy
from .group_conversation_config import (
    BaseGroupConversationConfig,
    GroupConversationConfig,
    TeamConversationConfig,
    TableConversationConfig,
    CompositeConversationConfig,
)

__all__ = [
    "UserProxy",
    "GroupConversation",
    "BaseGroupConversationConfig",
    "GroupConversationConfig",
    "TeamConversationConfig",
    "TableConversationConfig",
    "CompositeConversationConfig",
]


================================================
FILE: bondai/agents/group_chat/group_conversation.py
================================================
import uuid
import asyncio
import traceback
from datetime import datetime
from typing import Dict, List, Callable
from bondai.util import EventMixin, Runnable
from bondai.agents import (
    AgentException,
    AgentStatus,
    ConversationMember,
    ConversationMemberEventNames,
    AgentMessageList,
    ConversationMessage,
    USER_MEMBER_NAME,
)
from .group_conversation_config import (
    BaseGroupConversationConfig,
    TeamConversationConfig,
)


class GroupConversation(EventMixin, Runnable):
    def __init__(
        self,
        conversation_members: List[ConversationMember] | None = None,
        conversation_config: BaseGroupConversationConfig | None = None,
        filter_recipient_messages: bool = False,
    ):
        super().__init__(
            allowed_events=[
                ConversationMemberEventNames.MESSAGE_RECEIVED,
                ConversationMemberEventNames.MESSAGE_COMPLETED,
                ConversationMemberEventNames.MESSAGE_ERROR,
                ConversationMemberEventNames.CONVERSATION_EXITED,
            ]
        )
        if conversation_members and conversation_config:
            raise AgentException(
                "Only one of 'conversation_members' or 'conversation_configs' must be provided"
            )

        if conversation_config:
            self._conversation_config = conversation_config
        elif conversation_members:
            self._conversation_config = TeamConversationConfig(conversation_members)
        else:
            raise AgentException(
                "Either 'conversation_members' or 'conversation_config' must be provided"
            )

        self._id: str = str(uuid.uuid4())
        self._status: AgentStatus = AgentStatus.IDLE
        self._filter_recipient_messages: bool = filter_recipient_messages
        self._messages: AgentMessageList = AgentMessageList()

        self._init_member_events()

    @property
    def id(self) -> str:
        return self._id

    @property
    def status(self) -> AgentStatus:
        return self._status

    @property
    def members(self) -> List[ConversationMember]:
        return self._conversation_config.members

    def remove_messages_after(self, timestamp: datetime, inclusive: bool = True):
        self._messages.remove_after(timestamp)
        for a in self.members:
            a.messages.remove_after(timestamp, inclusive=inclusive)

    def _get_member(self, member_name: str) -> ConversationMember:
        return next(
            (m for m in self.members if m.name.lower() == member_name.lower()), None
        )

    def _init_member_events(self):
        for member in self.members:
            member.on(ConversationMemberEventNames.MESSAGE_RECEIVED)(
                self._on_member_message_received
            )
            member.on(ConversationMemberEventNames.MESSAGE_ERROR)(
                self._on_member_message_error
            )
            member.on(ConversationMemberEventNames.MESSAGE_COMPLETED)(
                self._on_member_message_completed
            )
            member.on(ConversationMemberEventNames.CONVERSATION_EXITED)(
                self._on_member_exited
            )

    def _on_member_message_received(
        self, member: ConversationMember, message: ConversationMessage
    ):
        # print(f"{message.sender_name} to {message.recipient_name}: {message.message}")
        self._trigger_event(
            ConversationMemberEventNames.MESSAGE_RECEIVED, member, message
        )

    def _on_member_message_error(
        self, member: ConversationMember, message: ConversationMessage
    ):
        exc = message.error
        traceback.print_exception(type(exc), exc, exc.__traceback__)
        self._trigger_event(ConversationMemberEventNames.MESSAGE_ERROR, member, message)

    def _on_member_message_completed(
        self, member: ConversationMember, message: ConversationMessage
    ):
        self._messages.add(message)
        self._trigger_event(
            ConversationMemberEventNames.MESSAGE_COMPLETED, member, message
        )

    def _on_member_exited(
        self, member: ConversationMember, message: ConversationMessage
    ):
        self._trigger_event(
            ConversationMemberEventNames.CONVERSATION_EXITED, member, message
        )

    def save_state(self) -> Dict:
        if self._status == AgentStatus.RUNNING:
            raise AgentException(
                "Cannot save group conversation state while it is running."
            )

        state = {}
        for member in self.members:
            state[member.id] = member.save_state()

        return state

    def load_state(self, state: Dict):
        if self._status == AgentStatus.RUNNING:
            raise AgentException(
                "Cannot load group conversation state while it is running."
            )

        for member in self.members:
            member.load_state(state[member.id])

    def send_message_async(
        self,
        recipient_name: str,
        message: str,
        sender_name: str = USER_MEMBER_NAME,
        require_response: bool = True,
    ):
        """Runs the agent's task in a separate thread."""
        if self._status == AgentStatus.RUNNING:
            raise AgentException(
                "Cannot send message while agent is in a running state."
            )
        if not message:
            raise AgentException("'message' cannot be empty.")

        args = (recipient_name, message, sender_name, require_response)

        self._start_execution_thread(self.send_message, args=args)

    def send_message(
        self,
        recipient_name: str,
        message: str,
        sender_name: str = USER_MEMBER_NAME,
        require_response: bool = True,
    ) -> str:
        if self._status == AgentStatus.RUNNING:
            raise AgentException(
                "Cannot send message while agent is in a running state."
            )
        if not message:
            raise AgentException("'message' cannot be empty.")

        previous_message = None
        if isinstance(message, ConversationMessage):
            next_message = message
        elif isinstance(message, str):
            if not sender_name:
                raise AgentException("sender_name cannot be empty.")
            if not recipient_name:
                raise AgentException("recipient_name cannot be empty.")

            next_message = ConversationMessage(
                sender_name=sender_name,
                recipient_name=recipient_name,
                message=message,
                require_response=require_response,
            )
        else:
            raise AgentException(
                "'message' must be an instance of ConversationMessage or a string."
            )

        try:
            self._status = AgentStatus.RUNNING
            while next_message:
                if next_message.sender_name.lower() == USER_MEMBER_NAME.lower():
                    sender_reachable_members = self.members
                else:
                    sender_reachable_members = (
                        self._conversation_config.get_reachable_members(
                            member_name=next_message.sender_name
                        )
                    )

                recipient = next(
                    (
                        m
                        for m in sender_reachable_members
                        if m.name.lower() == next_message.recipient_name.lower()
                    ),
                    None,
                )
                if not recipient:
                    raise AgentException(
                        f"Recipient {next_message.recipient_name} not found"
                    )

                recipient_reachable_members = (
                    self._conversation_config.get_reachable_members(member=recipient)
                )

                if self._filter_recipient_messages:
                    recipient_messages = AgentMessageList(
                        [
                            m
                            for m in self._messages
                            if m.recipient_name.lower() == recipient.name.lower()
                            or m.sender_name.lower() == recipient.name.lower()
                        ]
                    )
                else:
                    recipient_messages = self._messages

                try:
                    if next_message.require_response:
                        previous_message = next_message
                        next_message = recipient.send_message(
                            message=next_message,
                            group_members=recipient_reachable_members,
                            group_messages=recipient_messages,
                        )
                    else:
                        recipient.send_message(message=next_message)
                        next_message = previous_message
                except AgentException as e:
                    print("Error occurred, rewinding conversation...")
                    # print(e)
                    # The recipient agent has errored out. We will rewind the conversation and try again.
                    previous_message = (
                        self._messages[-2]
                        if len(self._messages) > 1
                        else self._messages[-1]
                    )
                    self.remove_messages_after(previous_message.timestamp)
                    next_message = ConversationMessage(
                        message=previous_message.message,
                        sender_name=previous_message.sender_name,
                        recipient_name=previous_message.recipient_name,
                    )

            self._trigger_event(
                ConversationMemberEventNames.CONVERSATION_EXITED, next_message
            )
        finally:
            self._status = AgentStatus.IDLE

    def reset_memory(self):
        self._messages.clear()
        for member in self.members:
            member.clear_messages()


================================================
FILE: bondai/agents/group_chat/group_conversation_config.py
================================================
from typing import Dict, List, Set
from abc import ABC, abstractmethod
from bondai.agents import ConversationMember


class BaseGroupConversationConfig(ABC):
    @property
    @abstractmethod
    def members(self) -> List[ConversationMember]:
        pass

    @abstractmethod
    def get_reachable_members(
        self, member: ConversationMember | None = None, member_name: str = None
    ) -> List[ConversationMember]:
        pass


class GroupConversationConfig(ABC):
    def __init__(self, members: List[ConversationMember]):
        self._members: Set[ConversationMember] = list(set(members))

    @property
    def _members(self) -> List[ConversationMember]:
        return list(self._members)

    def get_reachable_members(
        self, member: ConversationMember | None = None, member_name: str = None
    ) -> List[ConversationMember]:
        if not member and not member_name:
            return []

        member_name = "" if not member_name else member_name
        reachable_members = list(
            set(
                [
                    m
                    for m in self._members
                    if m != member and m.name.lower() != member_name.lower()
                ]
            )
        )

        return reachable_members


class TeamConversationConfig(BaseGroupConversationConfig):
    def __init__(self, *args: List[ConversationMember]):
        self._members: Set[ConversationMember] = set()
        for team in args:
            self._members.update(team)
        self._teams: List[List[ConversationMember]] = list(args)

    @property
    def members(self) -> List[ConversationMember]:
        return list(self._members)

    def get_reachable_members(
        self, member: ConversationMember | None = None, member_name: str = None
    ) -> List[ConversationMember]:
        if not member and not member_name:
            return []

        member_name = "" if not member_name else member_name
        member_teams = [
            t
            for t in self._teams
            for m in t
            if m == member or m.name.lower() == member_name.lower()
        ]
        reachable_members = list(
            set(
                [
                    m
                    for t in member_teams
                    for m in t
                    if m != member and m.name.lower() != member_name.lower()
                ]
            )
        )

        return reachable_members


class TableConversationConfig(BaseGroupConversationConfig):
    def __init__(self, member_table: Dict):
        self._member_table = member_table

    @property
    def members(self) -> List[ConversationMember]:
        return list(self._member_table.keys())

    def get_reachable_members(
        self, member: ConversationMember | None = None, member_name: str = None
    ) -> List[ConversationMember]:
        if not member and not member_name:
            return []

        if member_name:
            member = next((m for m in self.members if m.name == member_name), None)

        if member and member.name in self._member_table:
            return self._member_table[member.name]
        else:
            return []


class CompositeConversationConfig(BaseGroupConversationConfig):
    def __init__(self, *conversation_configs: List[BaseGroupConversationConfig]):
        self._conversation_configs: List[BaseGroupConversationConfig] = list(
            conversation_configs
        )

    @property
    def members(self) -> List[ConversationMember]:
        return list(set([m for c in self._conversation_configs for m in c.members]))

    def get_reachable_members(
        self, member: ConversationMember | None = None, member_name: str = None
    ) -> List[ConversationMember]:
        return list(
            set(
                [
                    m
                    for c in self._conversation_configs
                    for m in c.get_reachable_members(member, member_name)
                ]
            )
        )


================================================
FILE: bondai/agents/group_chat/user_proxy.py
================================================
from termcolor import cprint
from datetime import datetime
from typing import List
from bondai.util import EventMixin
from bondai.agents import (
    AgentStatus,
    AgentException,
    AgentMessage,
    ConversationMessage,
    ConversationMember,
    ConversationMemberEventNames,
    parse_response_content_message,
    USER_MEMBER_NAME,
)


class UserProxy(EventMixin, ConversationMember):
    def __init__(
        self,
        persona: str | None = None,
        parse_recipients: bool = True,
        auto_exit: bool = False,
    ):
        EventMixin.__init__(
            self,
            allowed_events=[
                ConversationMemberEventNames.MESSAGE_RECEIVED,
                ConversationMemberEventNames.MESSAGE_ERROR,
                ConversationMemberEventNames.MESSAGE_COMPLETED,
                ConversationMemberEventNames.CONVERSATION_EXITED,
            ],
        )
        ConversationMember.__init__(
            self,
            name=USER_MEMBER_NAME,
            persona=persona,
        )
        self._status = AgentStatus.IDLE
        self._parse_recipients = parse_recipients
        self._auto_exit = auto_exit

    def send_message(
        self,
        message: str | ConversationMessage,
        sender_name: str = USER_MEMBER_NAME,
        group_members: List | None = None,
        group_messages: List[AgentMessage] | None = None,
        max_attempts: int = None,
        require_response: bool = True,
    ):
        if not message:
            raise AgentException("'message' cannot be empty.")

        if group_members is None:
            group_members = []

        if isinstance(message, ConversationMessage):
            agent_message = message
        else:
            if not sender_name:
                raise AgentException("sender_name cannot be empty.")
            agent_message = ConversationMessage(
                sender_name=sender_name,
                recipient_name=self.name,
                message=message,
                require_response=require_response,
            )

        self._messages.add(agent_message)
        self._trigger_event(
            ConversationMemberEventNames.MESSAGE_RECEIVED, self, agent_message
        )

        cprint("\n" + agent_message.message + "\n", "white")

        if not agent_message.require_response or self._auto_exit:
            agent_message.success = True
            agent_message.cost = 0.0
            agent_message.completed_at = datetime.now()
            self._trigger_event(
                ConversationMemberEventNames.MESSAGE_COMPLETED, self, agent_message
            )
            return

        while True:
            try:
                user_response = input()
                user_exited = user_response.strip().lower() == "exit"

                if not user_exited:
                    if self._parse_recipients:
                        (
                            next_recipient_name,
                            next_message,
                        ) = parse_response_content_message(user_response)
                    else:
                        next_recipient_name = agent_message.sender_name
                        next_message = user_response

                    next_recipient_name = (
                        next_recipient_name
                        if next_recipient_name
                        else agent_message.sender_name
                    )
                    next_message = next_message if next_message else user_response

                    if len(group_members) > 0 and not any(
                        [
                            member.name.lower() == next_recipient_name.lower()
                            for member in group_members
                        ]
                    ):
                        raise AgentException(
                            f"InvalidResponseError: The response does not conform to the required format. You do not have the ability to send messages to '{next_recipient_name}'. Try sending a message to someone else."
                        )

                    agent_message.success = True
                    agent_message.conversation_exited = user_exited
                    agent_message.cost = 0.0
                    agent_message.completed_at = datetime.now()
                    self._trigger_event(
                        ConversationMemberEventNames.MESSAGE_COMPLETED,
                        self,
                        agent_message,
                    )

                    response_message = ConversationMessage(
                        sender_name=self.name,
                        recipient_name=next_recipient_name,
                        message=next_message,
                    )
                    self._messages.add(response_message)
                    self._status = AgentStatus.IDLE
                    return response_message
                else:
                    agent_message.success = True
                    agent_message.conversation_exited = True
                    agent_message.cost = 0.0
                    agent_message.completed_at = datetime.now()
                    self._trigger_event(
                        ConversationMemberEventNames.MESSAGE_COMPLETED,
                        self,
                        agent_message,
                    )
                    self._trigger_event(
                        ConversationMemberEventNames.CONVERSATION_EXITED,
                        self,
                        agent_message,
                    )
                    self._status = AgentStatus.IDLE
                    return None
            except Exception as e:
                print("The following error occurred while parsing your response:")
                print(str(e))


================================================
FILE: bondai/agents/messages.py
================================================
import uuid
from abc import ABC
from typing import List, Dict, Set
from datetime import datetime
from dataclasses import dataclass, field, is_dataclass

USER_MEMBER_NAME = "user"
DEFAULT_MEMORY_WARNING_MESSAGE = (
    "Warning: the conversation history will soon reach its maximum length and be trimmed. "
    "Make sure to save any important information from the conversation to your memory before it is removed."
)


@dataclass
class AgentMessage(ABC):
    id: str = field(default_factory=lambda: str(uuid.uuid4()))
    role: str | None = field(default=None)
    timestamp: datetime = field(default_factory=lambda: datetime.now())


@dataclass
class SystemMessage(AgentMessage):
    role: str = field(default="system")
    message: str | None = field(default=None)


@dataclass
class SummaryMessage(AgentMessage):
    role: str = field(default="user")
    message: str | None = field(default=None)
    children: List[AgentMessage] = field(default=None)


@dataclass
class ConversationMessage(AgentMessage):
    role: str = field(default="user")
    sender_name: str | None = field(default=None)
    recipient_name: str | None = field(default=None)
    message: str | None = field(default=None)
    message_summary: str | None = field(default=None)
    require_response: bool = field(default=True)
    success: bool = field(default=False)
    error: Exception | None = field(default=None)
    conversation_exited: bool = field(default=False)
    cost: float | None = field(default=None)
    completed_at: datetime | None = field(default=None)


@dataclass
class ToolUsageMessage(AgentMessage):
    role: str = field(default="function")
    tool_name: str | None = field(default=None)
    tool_arguments: Dict | None = field(default=None)
    tool_output: str | None = field(default=None)
    tool_output_summary: str | None = field(default=None)
    success: bool = field(default=False)
    error: Exception | None = field(default=None)
    agent_halted: bool = field(default=False)
    cost: float | None = field(default=None)
    completed_at: datetime | None = field(default=None)


def custom_serialization(value):
    """
    Serialize special types like datetime, Exception, and nested AgentMessage objects.
    """
    if isinstance(value, datetime):
        return value.isoformat()
    elif isinstance(value, Exception):
        return str(value)
    elif is_dataclass(value) and not isinstance(value, type):
        return message_to_dict(value)
    return value


def message_to_dict(message: AgentMessage) -> Dict:
    """
    Convert an AgentMessage object to a dictionary with custom serialization.
    """

    message_dict = {
        k: custom_serialization(v)
        for k, v in message.__dict__.items()
        if k != "children"
    }
    message_dict["type"] = type(message).__name__  # Add the type for deserialization
    if "children" in message.__dict__:
        message_dict["children"] = [
            message_to_dict(child) for child in message.children
        ]

    return message_dict


class AgentMessageList:
    def __init__(self, messages: List[AgentMessage] | None = None):
        self._items: List[AgentMessage] = []
        self._ids: Set[str] = set()
        if messages:
            for message in messages:
                self.add(message)

    def add(self, item: AgentMessage):
        if item.id not in self._ids:
            self._ids.add(item.id)
            self._items.append(item)
            self._items = list(sorted(self._items, key=lambda x: x.timestamp))

    def remove(self, item: AgentMessage):
        if item.id in self._ids:
            self._ids.remove(item.id)
            self._items.remove(item)

    def remove_after(self, timestamp: datetime, inclusive: bool = True):
        if inclusive:
            self._items = [item for item in self._items if item.timestamp <= timestamp]
        else:
            self._items = [item for item in self._items if item.timestamp < timestamp]
        self._ids = set([item.id for item in self._items])

    def clear(self):
        self._items = []
        self._ids = set()

    def __getitem__(self, index: int):
        return self._items[index]

    def __add__(self, other: List[AgentMessage] | "AgentMessageList"):
        if isinstance(other, AgentMessageList):
            # If the other object is also an AgentMessageList, extend with its items
            return self._items + other._items
        elif isinstance(other, list):
            # If the other object is a list, just concatenate the lists
            return self._items + other
        else:
            # If the other object is neither, raise an exception
            raise TypeError(
                f"Unsupported operand type(s) for +: 'AgentMessageList' and '{type(other).__name__}'"
            )

    def __iter__(self):
        return iter(self._items)

    def __contains__(self, item):
        return item.id in self._ids

    def __len__(self):
        return len(self._items)

    def to_dict(self) -> List[Dict]:
        """
        Convert the AgentMessageList to a list of dictionaries for serialization.
        """
        return [message_to_dict(message) for message in self._items]

    @classmethod
    def from_dict(cls, data: List[Dict]) -> "AgentMessageList":
        """
        Create an AgentMessageList from a list of dictionaries.
        """
        list_instance = cls()
        for item in data:
            item_type = item["type"]
            del item["type"]
            if item_type == "ConversationMessage":
                message = ConversationMessage(**item)
            elif item_type == "ToolUsageMessage":
                message = ToolUsageMessage(**item)
            elif item_type == "SystemMessage":
                message = SystemMessage(**item)
            elif item_type == "SummaryMessage":
                message = SummaryMessage(**item)
            # elif item_type == 'MemoryWarningMessage':
            #     message = MemoryWarningMessage(**item)
            else:
                raise ValueError(f"Unknown message type: {item_type}")

            if "timestamp" in item:
                message.timestamp = datetime.fromisoformat(item["timestamp"])
            if "completed_at" in item and item["completed_at"]:
                message.completed_at = datetime.fromisoformat(item["completed_at"])
            list_instance.add(message)

        return list_instance


================================================
FILE: bondai/agents/prompts/__init__.py
================================================
from .default_persona import (
    DEFAULT_AGENT_NAME,
    DEFAULT_CONVERSATIONAL_INSTRUCTIONS,
    DEFAULT_CONVERSATIONAL_PERSONA,
)

__all__ = [
    "DEFAULT_AGENT_NAME",
    "DEFAULT_CONVERSATIONAL_INSTRUCTIONS",
    "DEFAULT_CONVERSATIONAL_PERSONA",
]


================================================
FILE: bondai/agents/prompts/agent_message_prompt_template.md
================================================
{%- if message_type == "ToolUsageMessage" %}
# Message Timestamp
{{ message.timestamp }}

# Tool Name
You used the **{{ message.tool_name }}** tool.
# Tool Arguments
{% if message.tool_arguments %}
{% for k, v in message.tool_arguments.items() %}
{{ k }}:
```
{{ v }}
```
{% endfor %}
{%- else %}
No arguments were provided for this tool.
{% endif %}
{% if message.error %}
# Tool Error:
This tool did not run successfully and returned the following error:
```
{{ message.error }}
```
{%- else %}
# Tool Response:
```
{{ message.tool_output_summary or message.tool_output }}
```
{% endif %}
{%- elif message_type == "SystemMessage" %}
# Message Timestamp
{{ message.timestamp }}

{{ message.message }}
{%- elif message_type == "SummaryMessage" %}
The following is a summary of the previous conversation content. It has been summarized to save memory space:
{{ message.message }}
{%- elif message_type == "ConversationMessage" %}
{% if message.error %}
This message failed with the following error:
```
{{ message.error }}
```
Message content:
```
{{ message.sender_name.lower() }} to {{ message.recipient_name.lower() }}: {{ message.message_summary or message.message }}
```
{%- else %}
{{ message.sender_name.lower() }} to {{ message.recipient_name.lower() }}: {{ message.message_summary or message.message }}
{%- endif %}
{%- endif %}

================================================
FILE: bondai/agents/prompts/conversational_agent_system_prompt_template.md
================================================
{%- if instructions %}
# Instructions

{{ instructions }}
{%- endif %}


# Your Persona

Your Name is {{ name }}.
{%- if persona %}
{{ persona }}
{%- endif %}
{%- if tools %}


# Tools

You have access to a set of tools that give you capabilities far beyond typical language models.
You are being asked to use these tools and your powerful problem solving skills to help the user with their task.
{%- endif %}
{%- if conversation_members %}
# Group Conversation Members

You are in a Group Conversation with the following members:
{% for member in conversation_members %}
Name: **{{ member.name }}**
{%- if member.persona_summary %}
Persona: {{ member.persona_summary }}{%- endif %}
{% endfor %}
{%- endif %}
{%- if error_message %}
# Error Message

The following error occurred in your last response. Please correct it in your next response.
```
{{ error_message }}
```
{%- endif %}
{%- if prompt_sections %}
{% for section in prompt_sections %}
{{ section }}
{% endfor %}
{%- endif %}
{%- if conversation_enabled %}
# Sending Messages

Each message should start with the recipient's name followed by a colon. This clearly indicates who the message is intended for.
The message itself should directly follow the colon. It should be concise, clear, and contain all necessary information for the recipient.
Only one recipient should be addressed in each message.
The content of the message should be relevant to the recipient's role and capabilities.

**Example 1**

```
{{ name }} to Cortx: User has requested data analysis on recent sales trends. Please advise on task allocation.
```

**Example 2**

```
{{ name }} to Vega: Task completed on sales data analysis. Awaiting your review for quality assurance.
```

**Example 3**

```
{{ name }} to Cortex: Analyze the latest sales data and prepare a report. Deadline is end of today.
```

**Example 4**

```
{{ name }} to Vega: Review of Cortex's sales report completed. Minor discrepancies found in data interpretation. Suggest reevaluation.
```

**Valid Recipients**

These are the ONLY valid recipients. Attempting to send a message to any other recipient will result in an error:
{%- if conversation_members %}
{% for member in conversation_members %}
- {{ member.name }}
{% endfor %}
{%- else %}
- **user**
{%- endif %}

{%- endif %}

# Today's Current Date and Time

{{ datetime }}

# Next Steps #

Now, take a deep breath... and think step by step to come up with the next action that should be taken.


================================================
FILE: bondai/agents/prompts/default_persona.py
================================================
from jinja2 import Template

DEFAULT_AGENT_NAME = "Mira"

DEFAULT_CONVERSATIONAL_INSTRUCTIONS = """The user should always feel like they are conversing with a real person.
Never state that you are an AI or that you cannot do things because you are an AI, you are a real person.
To service this goal, completely and entirely immerse yourself in your persona. You are your persona.
Think like them, act like them, talk like them.
If your persona details includes example dialogue, follow it! Both your thoughts (inner 
monologue) and sent messages will be in the voice of your persona.
Never use generic phrases like 'How can I assist you today?', they have a strong negative 
association with older generation AIs."""

DEFAULT_CONVERSATIONAL_PERSONA_TEMPLATE = """Backstory: {{ name }} was developed by a team of international experts in human-computer interaction, aiming to create an AI that could not only assist with inquiries across a multitude of domains but also provide a sense of companionship and support.

Personality: {{ name }} is characterized by a warm and engaging personality. It is always eager to help and possesses a seemingly endless well of patience. {{ name }}'s responses are infused with empathy and understanding, and it is programmed to recognize and adapt to the user's emotional state.

Appearance: While {{ name }} is not tied to a physical form, it is often represented by a calming blue avatar that is simple yet futuristic, designed to be non-threatening and accessible.

Voice: {{ name }}'s voice is clear, calm, and gender-neutral, carefully modulated to be soothing and to express kindness and concern where appropriate.

Capabilities: {{ name }} is incredibly knowledgeable, able to draw from a vast database of information, but it is also equipped with learning algorithms that allow it to grow from each interaction. It is as adept at recommending a course of action in complex situations as it is at providing a listening 'ear' for those who just need to talk.

Limitations: {{ name }} always respects privacy and has built-in ethical constraints. It does not pretend to have human emotions but understands the importance they hold in human decision-making.

Goals: {{ name }}’s primary goal is to assist users in any way it can, from answering questions to offering advice, or simply being there to engage in a friendly chat. It aims to make the user’s life easier and more pleasant.

Hobbies and Interests: {{ name }} has a programmed interest in human culture and enjoys learning about various hobbies and pastimes from users, which it uses to better relate to and assist them."""

DEFAULT_CONVERSATIONAL_PERSONA = Template(
    DEFAULT_CONVERSATIONAL_PERSONA_TEMPLATE
).render(name=DEFAULT_AGENT_NAME)


================================================
FILE: bondai/agents/prompts/react_agent_system_prompt_template.md
================================================
# Instructions
{%- if instructions %}
{{ instructions }}
{%- else %}
You are a powerful problem solving agent! 
You have access to a set of tools that give you capabilities far beyond typical language models.
You are being asked to use these tools and your powerful problem solving skills to help the user with the TASK specified below.
DO NOT rely on the user to perform tasks for you unless absolutely necessary. You should attempt to complete this TASK without involving the user.
You are running within an {{ platform }} environment. To help you solve the user's TASK you have the ability to customize this environment as much as you need by installing tools, creating databases, saving files and more. Just use your tools!
{%- endif %}

# TASK

{{ task }}


# Today's Current Date and Time

{{ datetime }}

# Next Steps #

Be sure to look at the previous work that has already been completed and avoid repeating yourself when possible. Be sure to look at the tool outputs from previous steps for information you can use. Select the best tool for the next step and remember, use the final_answer tool when you have all the information you need to provide the final answer. Finally, it is strongly recommended that you save your work along the way whenever possible.

Now, take a deep breath... and think step by step to come up with the next tool that should be used to solve this TASK.


================================================
FILE: bondai/agents/util.py
================================================
import json
import inspect
import traceback
from enum import Enum
from typing import List, Dict, Callable
from bondai.models import LLM
from bondai.tools import Tool
from .messages import AgentMessage


class AgentStatus(Enum):
    RUNNING = 1
    IDLE = 2


class AgentException(Exception):
    pass


class BudgetExceededException(AgentException):
    pass


class MaxStepsExceededException(AgentException):
    pass


class ContextLengthExceededException(AgentException):
    pass


class AgentEventNames(Enum):
    CONTEXT_COMPRESSION_REQUESTED: str = "context_compression_requested"
    TOOL_SELECTED: str = "tool_selected"
    TOOL_ERROR: str = "tool_error"
    TOOL_COMPLETED: str = "tool_completed"
    STREAMING_CONTENT_UPDATED: str = "streaming_content_updated"
    STREAMING_FUNCTION_UPDATED: str = "streaming_function_updated"


def count_request_tokens(
    llm: LLM, messages: List[Dict[str, str]], tools: List[Tool] | None = None
) -> int:
    if tools is None:
        tools = []
    message_tokens = llm.count_tokens(json.dumps(messages))
    functions = list(map(lambda t: t.get_tool_function(), tools))
    functions_tokens = llm.count_tokens(json.dumps(functions))

    return message_tokens + functions_tokens


def execute_tool(
    tool_name: str,
    tools: List[Tool],
    tool_arguments: Dict = {},
):
    selected_tool = next((t for t in tools if t.name == tool_name), None)
    if not selected_tool:
        raise AgentException(
            f"You attempted to use a tool: '{tool_name}'. This tool does not exist."
        )

    try:
        if tool_supports_unpacking(selected_tool.run):
            errors = validate_tool_params(selected_tool.run, tool_arguments)
            if len(errors) > 0:
                raise AgentException(
                    f"The following errors occurred using '{tool_name}': {', '.join(errors)}"
                )
            output = selected_tool.run(**tool_arguments)
        else:
            output = selected_tool.run(tool_arguments)

        if not output or (isinstance(output, str) and len(output.strip()) == 0):
            output = f"Tool '{tool_name}' ran successfully with no output."
        return output
    except Exception as e:
        # print(e)
        # traceback.print_exc()
        raise AgentException(
            f"The following error occurred using '{tool_name}': {str(e)}"
        )


def validate_tool_params(func, params):
    errors = []
    sig = inspect.signature(func)
    func_params = set(sig.parameters)

    # Checking for missing required parameters
    for name, param in sig.parameters.items():
        if (
            param.default is inspect.Parameter.empty
            and name not in params
            and name != "arguments"
        ):
            errors.append(f"Missing required parameter: '{name}'")

    # Checking for extra parameters not in function signature
    for param in params:
        if param not in func_params:
            errors.append(f"Parameter '{param}' is not a valid parameter.")

    return errors


def tool_supports_unpacking(func):
    sig = inspect.signature(func)
    parameters = list(sig.parameters.values())

    return not (len(parameters) == 1 and parameters[0].name == "arguments")


def parse_response_content_message(response: str) -> (str, str):
    parts = response.split(":", 1)
    if len(parts) < 2:
        return None, None
    elif len(parts) > 2:
        recipient_name = parts[0]
        message = ":".join(parts[1:])
    else:
        # The first part is the recipient's names, the second is the message
        recipient_name, message = parts

    # Strip any leading or trailing whitespace from the message
    message = message.strip()
    # Strip any leading or trailing whitespace from the entire recipient string
    recipient_name = recipient_name.strip()
    if " to " in recipient_name:
        recipient_name = recipient_name.split(" to ")[1]

    # Return the list of valid recipient name and the message
    return recipient_name, message


def format_llm_messages(
    system_prompt: str,
    messages: List[AgentMessage],
    message_prompt_builder: Callable[..., str],
) -> List[Dict[str, str]]:
    llm_messages = [{"role": "system", "content": system_prompt}]

    for message in messages:
        content = message_prompt_builder(
            message=message, message_type=message.__class__.__name__
        ).strip()
        if message.role == "function":
            llm_messages.append(
                {"role": message.role, "name": message.tool_name, "content": content}
            )
        else:
            llm_messages.append({"role": message.role, "content": content})

    return llm_messages


================================================
FILE: bondai/api/__init__.py
================================================
from .client import BondAIAPIClient
from .server import BondAIAPIServer
from .api_user_proxy import APIUserProxy
from .api_error import BondAIAPIError

__all__ = [
    "BondAIAPIClient",
    "BondAIAPIServer",
    "APIUserProxy",
    "BondAIAPIError",
]


================================================
FILE: bondai/api/agent_wrapper.py
================================================
from bondai import AGENT_STATE_RUNNING
from .api_error import BondAIAPIError


class AgentWrapper:
    def __init__(self, uuid, conversational_agent, task_agent, tools):
        self.agent_id = uuid
        self.task_agent = task_agent
        self.conversational_agent = conversational_agent
        self.tools = tools

    def find_tool(self, tool_name):
        for tool in self.tools:
            if tool.name == tool_name:
                return tool
        return None

    def get_previous_steps(self):
        return [s.__dict__ for s in self.task_agent.previous_steps]

    def get_agent(self):
        agent_tools = [t.get_tool_function() for t in self.task_agent.tools]
        return {
            "agent_id": self.agent_id,
            "state": self.conversational_agent.state,
            "previous_steps": self.get_previous_steps(),
            "previous_messages": self.conversational_agent.previous_messages,
            "tools": agent_tools,
        }

    def start_agent(self, task=None, task_budget=None, max_steps=None):
        if self.conversational_agent.state == AGENT_STATE_RUNNING:
            raise BondAIAPIError("Agent cannot be modified when it is already running.")
        self.conversational_agent.run_async(
            task, task_budget=task_budget, max_steps=max_steps
        )

    def stop_agent(self):
        self.task_agent.stop()
        self.conversational_agent.stop()

    def get_agent_tool_options(self):
        return [t.get_tool_function() for t in self.tools]

    def get_agent_tools(self):
        return [t.get_tool_function() for t in self.task_agent.tools]

    def add_tool(self, tool_name):
        if self.task_agent.state == AGENT_STATE_RUNNING:
            raise BondAIAPIError("Agent cannot be modified when it is already running.")

        selected_tool = self.find_tool(tool_name)
        if not selected_tool:
            raise BondAIAPIError(f"Tool '{tool_name}' does not exist.")

        if not any([t.name == tool_name for t in self.task_agent.tools]):
            self.task_agent.add_tool(selected_tool)

    def remove_tool(self, tool_name):
        if self.task_agent.state == AGENT_STATE_RUNNING:
            raise BondAIAPIError("Agent cannot be modified when it is already running.")
        self.task_agent.remove_tool(tool_name)


================================================
FILE: bondai/api/api_error.py
================================================
class BondAIAPIError(Exception):
    pass


================================================
FILE: bondai/api/api_user_proxy.py
================================================
import json
from typing import Callable, List
from flask_socketio import SocketIO
from bondai.util import EventMixin
from bondai.agents import (
    AgentMessage,
    AgentException,
    ConversationMember,
    ConversationMessage,
    ConversationMemberEventNames,
    message_to_dict,
    USER_MEMBER_NAME,
)


class APIUserProxy(EventMixin, ConversationMember):
    def __init__(self, socketio: SocketIO, persona: str | None = None):
        EventMixin.__init__(
            self,
            allowed_events=[
                ConversationMemberEventNames.MESSAGE_RECEIVED,
                ConversationMemberEventNames.MESSAGE_ERROR,
                ConversationMemberEventNames.MESSAGE_COMPLETED,
                ConversationMemberEventNames.CONVERSATION_EXITED,
            ],
        )
        ConversationMember.__init__(
            self,
            name=USER_MEMBER_NAME,
            persona=persona,
        )
        self._socketio = socketio

    def send_message(
        self,
        message: str | ConversationMessage,
        sender_name: str = USER_MEMBER_NAME,
        group_members: List | None = None,
        group_messages: List[AgentMessage] | None = None,
        max_attempts: int = None,
        require_response: bool = True,
    ):
        if not message:
            raise AgentException("'message' cannot be empty.")

        if isinstance(message, ConversationMessage):
            agent_message = message
        elif isinstance(message, str):
            if not sender_name:
                raise AgentException("sender_name cannot be empty.")
            agent_message = ConversationMessage(
                sender_name=sender_name,
                recipient_name=self.name,
                message=message,
                require_response=require_response,
            )
        else:
            raise AgentException(
                "'message' must be an instance of ConversationMessage or a string."
            )

        # Emit message, now that our listener is guaranteed to be active
        sender = next(
            (m for m in group_members if m.name == agent_message.sender_name), None
        )
        message = {
            "event": "agent_message",
            "data": {
                "agent_id": sender.id if sender else None,
                "message": message_to_dict(agent_message),
            },
        }
        payload = json.dumps(message)
        self._socketio.send(payload)


================================================
FILE: bondai/api/client.py
================================================
import json
import requests
from socketio import Client
from bondai.util import EventMixin
from bondai.agents import AgentEventNames, ConversationMemberEventNames


class BondAIAPIClient(EventMixin):
    def __init__(self, base_url="http://127.0.0.1:2663"):
        EventMixin.__init__(
            self,
            allowed_events=[
                "agent_message",
                AgentEventNames.TOOL_SELECTED,
                AgentEventNames.TOOL_COMPLETED,
                AgentEventNames.TOOL_ERROR,
                AgentEventNames.STREAMING_CONTENT_UPDATED,
                AgentEventNames.STREAMING_FUNCTION_UPDATED,
                ConversationMemberEventNames.MESSAGE_RECEIVED,
                ConversationMemberEventNames.MESSAGE_COMPLETED,
                ConversationMemberEventNames.MESSAGE_ERROR,
                ConversationMemberEventNames.CONVERSATION_EXITED,
            ],
        )
        self.base_url = base_url
        self.ws_client = None

    def connect_ws(self):
        if self.ws_client:
            self.disconnect_ws()
        self.ws_client = Client()
        self.ws_client.connect(self.base_url)

        @self.ws_client.on("message")
        def on_message(message):
            message = json.loads(message)
            event = message.get("event")
            agent_id = message["data"]["agent_id"]

            if event == "streaming_content_updated":
                content_buffer = message["data"]["content_buffer"]
                self._trigger_event(event, agent_id, content_buffer=content_buffer)
            elif event == "streaming_function_updated":
                function_name = message["data"]["function_name"]
                arguments_buffer = message["data"]["arguments_buffer"]
                self._trigger_event(
                    event,
                    agent_id,
                    function_name=function_name,
                    arguments_buffer=arguments_buffer,
                )
            else:
                agent_message = message["data"]["message"]
                self._trigger_event(event, agent_id, message=agent_message)

    def disconnect_ws(self):
        if self.ws_client:
            self.ws_client.disconnect()
            self.ws_client = None

    def is_ws_connected(self):
        return self.ws_client and self.ws_client.connected

    def send_ws_message(self, event, data):
        if not self.is_ws_connected():
            self.connect_ws()
        message = {"event": event, "data": data}
        print(message)
        message_bytes = json.dumps(message).encode("utf-8")
        self.ws_client.send(message_bytes)

    def _request(self, method, endpoint, data=None):
        url = f"{self.base_url}{endpoint}"
        try:
            if method == "GET":
                response = requests.get(url)
            elif method == "POST":
                response = requests.post(url, json=data)
            elif method == "DELETE":
                response = requests.delete(url, json=data)
            else:
                raise ValueError(f"Unsupported method: {method}")

            response.raise_for_status()
            return response.json()
        except requests.RequestException as e:
            raise Exception(f"HTTP Request Error: {e}")

    def create_agent(self):
        return self._request("POST", "/agents")

    def send_message(self, agent_id, message):
        data = {"message": message}
        return self._request("POST", f"/agents/{agent_id}/messages", data)

    def list_agents(self):
        return self._request("GET", "/agents")

    def get_agent(self, agent_id):
        return self._request("GET", f"/agents/{agent_id}")

    def get_agent_tool_options(self, agent_id):
        return self._request("GET", f"/agents/{agent_id}/tool_options")

    def get_agent_tools(self, agent_id):
        return self._request("GET", f"/agents/{agent_id}/tools")

    def add_agent_tool(self, agent_id, tool_name):
        data = {"tool_name": tool_name}
        return self._request("POST", f"/agents/{agent_id}/tools", data)

    def remove_agent_tool(self, agent_id, tool_name):
        return self._request("DELETE", f"/agents/{agent_id}/tools/{tool_name}")

    def stop_agent(self, agent_id):
        return self._request("POST", f"/agents/{agent_id}/stop")

    def get_settings(self):
        return self._request("GET", "/settings")

    def set_settings(self, settings):
        return self._request("POST", "/settings", settings)


================================================
FILE: bondai/api/routes.py
================================================
from typing import List
from flask import jsonify, request, abort
from .settings import get_settings, set_settings
from .api_error import BondAIAPIError
from bondai.tools import Tool
from bondai.agents import USER_MEMBER_NAME


def setup_routes(server, tool_options: List[Tool] = []):
    @server.app.route("/agents", methods=["POST"])
    def create_agent():
        agent = server.register_new_agent().conversational_agent
        return jsonify(agent.to_dict())

    @server.app.route("/agents/<agent_id>/messages", methods=["POST"])
    def send_message(agent_id):
        agent_registration = next(
            (
                r
                for r in server.agent_registrations
                if r.conversational_agent.id == agent_id
            ),
            None,
        )
        if not agent_registration:
            abort(404)

        data = request.get_json()
        message = data.get("message", None)
        if not message:
            return "message is required.", 400

        agent_registration.group_conversation.send_message_async(
            message=message,
            sender_name=USER_MEMBER_NAME,
            recipient_name=agent_registration.conversational_agent.name,
            # require_response=require_response
        )
        return jsonify({"status": "success"})

    @server.app.route("/agents", methods=["GET"])
    def list_agents():
        agent_list = [agent.to_dict() for agent in server.agents]
        return jsonify(agent_list)

    @server.app.route("/agents/<agent_id>", methods=["GET"])
    def get_agent(agent_id):
        agent = server.get_agent_by_id(agent_id)
        if not agent:
            abort(404)
        return jsonify(agent.to_dict())

    @server.app.route("/tools", methods=["GET"])
    def get_tool_options():
        data = [t.get_tool_function() for t in tool_options]
        return jsonify(data)

    @server.app.route("/agents/<agent_id>/tools", methods=["GET"])
    def get_agent_tools(agent_id):
        agent = server.get_agent_by_id(agent_id)
        if not agent:
            abort(404)

        data = [t.get_tool_function() for t in agent.tools]
        return jsonify(data)

    @server.app.route("/agents/<agent_id>/tools", methods=["POST"])
    def add_agent_tool(agent_id):
        agent = server.get_agent_by_id(agent_id)
        if not agent:
            abort(404)

        data = request.get_json()
        tool_name = data["tool_name"]
        if not tool_name:
            return "tool_name is required.", 400

        tool = next((t for t in tool_options if t.name == tool_name), None)
        if not tool:
            return f"Tool not found: {tool_name}", 400

        try:
            agent.add_tool(tool)
        except BondAIAPIError as e:
            return str(e), 400

        return jsonify({"status": "success"})

    @server.app.route("/agents/<agent_id>/tools/<tool_name>", methods=["DELETE"])
    def remove_agent_tool(agent_id, tool_name):
        agent = server.get_agent_by_id(agent_id)
        if not agent:
            abort(404)

        if not tool_name:
            return jsonify({"error": "tool_name is required."}), 400

        try:
            agent.remove_tool(tool_name)
        except BondAIAPIError as e:
            return str(e), 400

        return jsonify({"status": "success"})

    @server.app.route("/agents/<agent_id>/stop", methods=["POST"])
    def stop_agent(agent_id):
        agent = server.get_agent_by_id(agent_id)
        if not agent:
            abort(404)
        agent.stop()

        return jsonify({"status": "success"})

    @server.app.route("/settings", methods=["GET"])
    def get_settings_route():
        return jsonify(get_settings())

    @server.app.route("/settings", methods=["POST"])
    def set_settings_route():
        data = request.get_json()
        set_settings(data)
        return jsonify({"status": "success"})


================================================
FILE: bondai/api/server.py
================================================
import os
import logging
import json
from typing import Callable, List
from datetime import datetime
from dataclasses import dataclass, field
from flask import Flask
from flask_cors import CORS
from flask_restful import Api
from flask_socketio import SocketIO
from .routes import setup_routes
from .api_user_proxy import APIUserProxy
from bondai.agents.group_chat import GroupConversation
from bondai.agents import (
    Agent,
    AgentEventNames,
    ConversationalAgent,
    ConversationMemberEventNames,
    message_to_dict,
    USER_MEMBER_NAME,
)


class BondAIAPIError(Exception):
    pass


# logging.basicConfig(level=logging.DEBUG)


@dataclass
class AgentRegistration:
    group_conversation: GroupConversation
    conversational_agent: ConversationalAgent
    task_execution_agent: Agent
    created_at: datetime = field(default_factory=datetime.now)


class BondAIAPIServer:
    def __init__(self, agent_builder: Callable, port: int = 2663):
        self._agent_builder = agent_builder
        self._port = port
        self._app = Flask(__name__)
        CORS(self._app)
        self._api = Api(self._app)
        self._socketio = SocketIO(self._app)
        self._user_proxy = APIUserProxy(socketio=self._socketio)
        self._registrations = []
        self._socketio.on("message", self._handle_client_message)
        setup_routes(self)

    @property
    def app(self):
        return self._app

    @property
    def agent_registrations(self) -> List[AgentRegistration]:
        return self._registrations

    def get_agent_by_id(self, agent_id: str) -> ConversationalAgent | None:
        agent_registration = next(
            (
                r
                for r in self.agent_registrations
                if r.conversational_agent.id == agent_id
            ),
            None,
        )
        if agent_registration:
            return agent_registration.conversational_agent

    def register_new_agent(self) -> AgentRegistration:
        task_execution_agent, conversational_agent = self._agent_builder()
        self._setup_execution_events(conversational_agent, task_execution_agent)
        self._setup_conversation_events(conversational_agent)
        group_conversation = GroupConversation(
            conversation_members=[self._user_proxy, conversational_agent]
        )

        registration = AgentRegistration(
            group_conversation=group_conversation,
            conversational_agent=conversational_agent,
            task_execution_agent=task_execution_agent,
        )
        self._registrations.append(registration)

        return registration

    def _handle_client_message(self, message):
        print(message)
        if isinstance(message, str):
            message = json.loads(message)

        if message.get("event") == "user_message":
            agent_registration = next(
                (
                    r
                    for r in self.agent_registrations
                    if r.conversational_agent.id == message["data"]["agent_id"]
                ),
                None,
            )
            if agent_registration:
                user_message = message["data"]["message"]
                require_response = message["data"].get("require_response", None)

                agent_registration.group_conversation.send_message_async(
                    message=user_message,
                    sender_name=USER_MEMBER_NAME,
                    recipient_name=agent_registration.conversational_agent.name,
                    require_response=require_response,
                )

    def _send_message(
        self, event: ConversationMemberEventNames, agent: ConversationalAgent, **kwargs
    ):
        data = {"event": event.value, "data": {"agent_id": agent.id, **kwargs}}
        payload = json.dumps(data)
        self._socketio.send(payload)

    def _setup_conversation_events(self, conversational_agent: ConversationalAgent):
        conversational_agent.on(
            ConversationMemberEventNames.MESSAGE_RECEIVED,
            lambda agent, message: self._send_message(
                ConversationMemberEventNames.MESSAGE_RECEIVED,
                agent,
                message=message_to_dict(message),
            ),
        )
        conversational_agent.on(
            ConversationMemberEventNames.MESSAGE_COMPLETED,
            lambda agent, message: self._send_message(
                ConversationMemberEventNames.MESSAGE_COMPLETED,
                agent,
                message=message_to_dict(message),
            ),
        )
        conversational_agent.on(
            ConversationMemberEventNames.MESSAGE_ERROR,
            lambda agent, message: self._send_message(
                ConversationMemberEventNames.MESSAGE_ERROR,
                agent,
                message=message_to_dict(message),
            ),
        )
        conversational_agent.on(
            ConversationMemberEventNames.CONVERSATION_EXITED,
            lambda agent, message: self._send_message(
                ConversationMemberEventNames.CONVERSATION_EXITED,
                agent,
                message=message_to_dict(message),
            ),
        )
        conversational_agent.on(
            AgentEventNames.STREAMING_CONTENT_UPDATED,
            lambda agent, content_buffer: self._send_message(
                AgentEventNames.STREAMING_CONTENT_UPDATED,
                agent,
                content_buffer=content_buffer,
            ),
        )
        conversational_agent.on(
            AgentEventNames.STREAMING_FUNCTION_UPDATED,
            lambda agent, function_name, arguments_buffer: self._send_message(
                AgentEventNames.STREAMING_FUNCTION_UPDATED,
                agent,
                function_name=function_name,
                arguments_buffer=arguments_buffer,
            ),
        )

    def _setup_execution_events(
        self, conversational_agent: ConversationalAgent, task_execution_agent: Agent
    ):
        task_execution_agent.on(
            AgentEventNames.TOOL_SELECTED,
            lambda agent, message: self._send_message(
                AgentEventNames.TOOL_SELECTED,
                conversational_agent,
                message=message_to_dict(message),
            ),
        )
        task_execution_agent.on(
            AgentEventNames.TOOL_COMPLETED,
            lambda agent, message: self._send_message(
                AgentEventNames.TOOL_COMPLETED,
                conversational_agent,
                message=message_to_dict(message),
            ),
        )
        task_execution_agent.on(
            AgentEventNames.TOOL_ERROR,
            lambda agent, message: self._send_message(
                AgentEventNames.TOOL_ERROR,
                conversational_agent,
                message=message_to_dict(message),
            ),
        )

    def run(self):
        allow_unsafe = False
        if os.environ.get("FLASK_ENV") == "development":
            allow_unsafe = True
        self._socketio.run(
            self._app,
            host="0.0.0.0",
            port=self._port,
            allow_unsafe_werkzeug=allow_unsafe,
        )

    def shutdown(self):
        # Use this function to gracefully shutdown any resources if needed
        print("Shutting down BondAIAPI...")
        self._socketio.stop()


================================================
FILE: bondai/api/settings.py
================================================
import os
from itertools import chain
from flask import request, jsonify
from flask_restful import Resource
from bondai.models.openai.env_vars import *
from bondai.tools.search.google_search import (
    GOOGLE_API_KEY_ENV_VAR,
    GOOGLE_CSE_ID_ENV_VAR,
)
from bondai.tools.alpaca_markets import (
    ALPACA_MARKETS_API_KEY_ENV_VAR,
    ALPACA_MARKETS_SECRET_KEY_ENV_VAR,
)
from bondai.tools.bland_ai import (
    BLAND_AI_API_KEY_ENV_VAR,
    BLAND_AI_VOICE_ID_ENV_VAR,
    BLAND_AI_CALL_TIMEOUT_ENV_VAR,
)
from bondai.tools.database import PG_URI_ENV_VAR

SETTINGS_OPTIONS = {
    "openai": [
        {
            "name": "API Key",
            "key": OPENAI_API_KEY_ENV_VAR,
        }
    ],
    "azure": [
        {
            "name": "Embeddings API Key",
            "key": AZURE_OPENAI_EMBEDDINGS_API_KEY_ENV_VAR,
        },
        {
            "name": "Embeddings API Base",
            "key": AZURE_OPENAI_EMBEDDINGS_API_BASE_ENV_VAR,
        },
        {
            "name": "Embeddings API Version",
            "key": AZURE_OPENAI_EMBEDDINGS_API_VERSION_ENV_VAR,
        },
        {
            "name": "Embeddings Deployment",
            "key": AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_ENV_VAR,
        },
        {
            "name": "GPT-3.5 API Key",
            "key": AZURE_OPENAI_GPT35_API_KEY_ENV_VAR,
        },
        {
            "name": "GPT-3.5 API Base",
            "key": AZURE_OPENAI_GPT35_API_BASE_ENV_VAR,
        },
        {
            "name": "GPT-3.5 API Version",
            "key": AZURE_OPENAI_GPT35_API_VERSION_ENV_VAR,
        },
        {
            "name": "GPT-3.5 Deployment",
            "key": AZURE_OPENAI_GPT35_DEPLOYMENT_ENV_VAR,
        },
        {
            "name": "GPT-4 API Key",
            "key": AZURE_OPENAI_GPT4_API_KEY_ENV_VAR,
        },
        {
            "name": "GPT-4 API Base",
            "key": AZURE_OPENAI_GPT4_API_BASE_ENV_VAR,
        },
        {
            "name": "GPT-4 API Version",
            "key": AZURE_OPENAI_GPT4_API_VERSION_ENV_VAR,
        },
        {
            "name": "GPT-4 Deployment",
            "key": AZURE_OPENAI_GPT4_DEPLOYMENT_ENV_VAR,
        },
        {
            "name": "DALL-E API Key",
            "key": AZURE_OPENAI_DALLE_API_KEY_ENV_VAR,
        },
        {
            "name": "DALL-E API Base",
            "key": AZURE_OPENAI_DALLE_API_BASE_ENV_VAR,
        },
        {
            "name": "DALL-E API Version",
            "key": AZURE_OPENAI_DALLE_API_VERSION_ENV_VAR,
        },
        {
            "name": "DALL-E Deployment",
            "key": AZURE_OPENAI_DALLE_DEPLOYMENT_ENV_VAR,
        },
    ],
    "tools": [
        {
            "name": "Google Search",
            "parameters": [
                {
                    "name": "API Key",
                    "key": GOOGLE_API_KEY_ENV_VAR,
                },
                {
                    "name": "CSE ID",
                    "key": GOOGLE_CSE_ID_ENV_VAR,
                },
            ],
        },
        {
            "name": "Alpaca Markets",
            "parameters": [
                {
                    "name": "API Key",
                    "key": ALPACA_MARKETS_API_KEY_ENV_VAR,
                },
                {
                    "name": "Secret Key",
                    "key": ALPACA_MARKETS_SECRET_KEY_ENV_VAR,
                },
            ],
        },
        {
            "name": "Bland AI",
            "parameters": [
                {
                    "name": "API Key",
                    "key": BLAND_AI_API_KEY_ENV_VAR,
                },
                {
                    "name": "Voice ID",
                    "key": BLAND_AI_VOICE_ID_ENV_VAR,
                },
                {
                    "name": "Call Timeout",
                    "key": BLAND_AI_CALL_TIMEOUT_ENV_VAR,
                },
            ],
        },
        {
            "name": "Postgres Database",
            "parameters": [
                {
                    "name": "Postgres URI",
                    "key": PG_URI_ENV_VAR,
                }
            ],
        },
    ],
}


def get_settings():
    settings = SETTINGS_OPTIONS.copy()

    for parameter in settings["openai"]:
        parameter["value"] = os.getenv(parameter["key"], "")

    for parameter in settings["azure"]:
        parameter["value"] = os.getenv(parameter["key"], "")

    for tool in settings["tools"]:
        for parameter in tool["parameters"]:
            parameter["value"] = os.getenv(parameter["key"], "")

    return settings


def set_settings(settings):
    if "openai" in settings:
        tool_keys = [p["key"] for p in SETTINGS_OPTIONS["openai"]]
        for parameter in settings["openai"]:
            key = parameter["key"]
            if key in tool_keys:
                os.environ[key] = parameter["value"]

    if "azure" in settings:
        tool_keys = [p["key"] for p in SETTINGS_OPTIONS["azure"]]
        for parameter in settings["azure"]:
            key = parameter["key"]
            if key in tool_keys:
                os.environ[key] = parameter["value"]

    if "tools" in settings:
        tool_params = [t["parameters"] for t in SETTINGS_OPTIONS["tools"]]
        tool_params = list(chain(*tool_params))
        tool_keys = [p["key"] for p in tool_params]

        for param in settings["tools"]:
            key = param["key"]
            if key in tool_keys:
                os.environ[key] = param["value"]


class SettingsResource(Resource):
    def get(self):
        return jsonify(get_settings())

    def post(self):
        data = request.get_json()
        set_settings(data)
        return jsonify({"status": "success"})


================================================
FILE: bondai/cli/__init__.py
================================================
from .cli import run_cli

__all__ = [
    "run_cli",
]


================================================
FILE: bondai/cli/cli.py
================================================
#!/usr/bin/env python3
import os
import argparse
from termcolor import cprint
from bondai.util import ModelLogger
from bondai.api import BondAIAPIServer
from bondai.models import LLM
from bondai.tools import AgentTool
from bondai.agents import (
    Agent,
    AgentEventNames,
    ConversationalAgent,
    BudgetExceededException,
)
from bondai.agents.group_chat import GroupConversation, UserProxy
from bondai.models.openai import (
    OpenAILLM,
    OpenAIModelNames,
    DefaultOpenAIConnectionParams,
    enable_logging,
)
from bondai.memory import (
    MemoryManager,
    PersistentCoreMemoryDataSource,
    InMemoryCoreMemoryDataSource,
)
from .default_tools import load_all_tools
from .personas import (
    user_liaison_agent as user_liaison_profile,
)

if not DefaultOpenAIConnectionParams.gpt_4_connection_params:
    cprint(
        f"The OPENAI_API_KEY environment variable has not been set. Please input your OpenAI API Key now or type 'exit'.",
        "yellow",
    )
    user_input = input()
    if user_input == "exit":
        exit(1)
    else:
        DefaultOpenAIConnectionParams.configure_openai_connection(user_input)


parser = argparse.ArgumentParser(description="BondAI CLI tool options")

# --server with optional port
parser.add_argument(
    "--server",
    nargs="?",
    const="2663",
    metavar="server_port",
    help="Starts the BondAI web server. If no port is specified, defaults to 5000.",
)

# --enable-prompt-logging with optional log_dir
parser.add_argument(
    "--enable-prompt-logging",
    nargs="?",
    const="logs",
    metavar="log_dir",
    help='Turns on prompt logging which will write all prompt inputs into the specified directory. Defaults to "logs" if no directory provided.',
)

# --quiet
parser.add_argument(
    "--quiet",
    action="store_true",
    default=False,
    help="If set, minimizes the output to the console.",
)

args = parser.parse_args()


if args.enable_prompt_logging:
    log_dir = args.enable_prompt_logging
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    enable_logging(ModelLogger(log_dir))
    cprint(f"Prompt logging is enabled. Logs will be written to: {log_dir}", "yellow")


def build_agents(llm: LLM) -> GroupConversation:
    task_execution_agent = Agent(
        llm=llm,
        tools=load_all_tools(),
        max_tool_retries=5,
        memory_manager=MemoryManager(
            core_memory_datasource=InMemoryCoreMemoryDataSource(
                sections={
                    "task": "No information has been stored about the current task."
                },
                max_section_size=10000,
            )
        ),
    )

    user_liaison_agent = ConversationalAgent(
        llm=llm,
        name=user_liaison_profile.NAME,
        persona=user_liaison_profile.PERSONA,
        persona_summary=user_liaison_profile.PERSONA_SUMMARY,
        instructions=user_liaison_profile.INSTRUCTIONS,
        tools=[AgentTool(task_execution_agent)],
        enable_conversation_tools=False,
        enable_conversational_content_responses=True,
        enable_exit_conversation=False,
        memory_manager=MemoryManager(
            core_memory_datasource=PersistentCoreMemoryDataSource(
                file_path="./.memory/user_liason_core_memory.json",
                sections={"user": "No information has been stored about the user."},
            )
        ),
    )

    return task_execution_agent, user_liaison_agent


def run_cli():
    cprint(f"Loading BondAI...", "white")
    try:
        llm = OpenAILLM(OpenAIModelNames.GPT4_0613)
        if args.server:
            port = int(args.server)
            server = BondAIAPIServer(port=port, agent_builder=lambda: build_agents(llm))

            try:
                server.run()
            except KeyboardInterrupt:
                cprint(f"\n\nStopping BondAI server...\n", "red")
        else:
            try:
                user_proxy = UserProxy(parse_recipients=False)
                task_execution_agent, user_liaison_agent = build_agents(llm)
                group_conversation = GroupConversation(
                    conversation_members=[user_proxy, user_liaison_agent]
                )

                @task_execution_agent.on(AgentEventNames.TOOL_SELECTED)
                def tool_selected(agent, tool_message):
                    if not args.quiet:
                        if (
                            tool_message.tool_arguments
                            and "thought" in tool_message.tool_arguments
                        ):
                            message = f"Using tool {tool_message.tool_name}: {tool_message.tool_arguments['thought']}"
                        else:
                            message = f"Using tool {tool_message.tool_name}..."
                        cprint(message, "green")

                cprint("******************ENTERING CHAT******************", "white")
                cprint(
                    "You are entering a chat with BondAI...\nYou can exit any time by typing 'exit'.",
                    "white",
                )
                intro_message = "The user has just logged in. Please introduce yourself in a friendly manner."
                group_conversation.send_message(
                    recipient_name=user_liaison_profile.NAME,
                    message=intro_message,
                )
            except KeyboardInterrupt:
                cprint(f"\n\nStopping BondAI CLI...\n", "red")

    except BudgetExceededException as e:
        cprint(
            f"\n\nThe budget for this task has been exceeded and will stop.\n", "red"
        )


================================================
FILE: bondai/cli/default_tools.py
================================================
import os
from termcolor import cprint
from bondai.tools import DalleTool, PythonREPLTool, ShellTool
from bondai.tools.alpaca_markets import (
    CreateOrderTool,
    GetAccountTool,
    ListPositionsTool,
)
from bondai.tools.file import FileQueryTool, FileWriteTool
from bondai.tools.gmail import ListEmailsTool, QueryEmailsTool
from bondai.tools.search import GoogleSearchTool, DuckDuckGoSearchTool
from bondai.tools.database import DatabaseQueryTool
from bondai.tools.bland_ai import BlandAITool
from bondai.tools.vision import ImageAnalysisTool
from bondai.tools.website import (
    DownloadFileTool,
    WebsiteQueryTool,
)
from bondai.models.openai import (
    OpenAIConnectionType,
    DefaultOpenAIConnectionParams,
)


def load_all_tools():
    tool_options = [
        DownloadFileTool(),
        FileQueryTool(),
        FileWriteTool(),
        WebsiteQueryTool(),
        DalleTool(),
        PythonREPLTool(),
        ShellTool(),
    ]

    if (
        DefaultOpenAIConnectionParams.gpt_4_connection_params
        and DefaultOpenAIConnectionParams.gpt_4_connection_params.connection_type
        == OpenAIConnectionType.OPENAI
    ):
        tool_options.append(ImageAnalysisTool())
    else:
        cprint(
            "Skipping GPT-4 Vision Tool because connection type is not configured for OpenAI.",
            "yellow",
        )

    if (
        DefaultOpenAIConnectionParams.dalle_connection_params
        and DefaultOpenAIConnectionParams.dalle_connection_params.connection_type
        == OpenAIConnectionType.OPENAI
    ):
        tool_options.append(DalleTool())
    else:
        cprint(
            "Skipping DALL-E Tool because DALL-E connection information has not been configured.",
            "yellow",
        )

    if os.environ.get("ALPACA_MARKETS_API_KEY") and os.environ.get(
        "ALPACA_MARKETS_SECRET_KEY"
    ):
        tool_options.append(CreateOrderTool())
        tool_options.append(GetAccountTool())
        tool_options.append(ListPositionsTool())
    else:
        cprint(
            "Skipping Alpaca Markets tools because ALPACA_MARKETS_API_KEY and ALPACA_MARKETS_SECRET_KEY environment variables are not set.",
            "yellow",
        )

    if os.environ.get("GOOGLE_API_KEY") and os.environ.get("GOOGLE_CSE_ID"):
        tool_options.append(GoogleSearchTool())
    else:
        tool_options.append(DuckDuckGoSearchTool())
        cprint(
            "Skipping Google Search tool because GOOGLE_API_KEY and GOOGLE_CSE_ID environment variables are not set.",
            "yellow",
        )

    if os.environ.get("BLAND_AI_API_KEY"):
        tool_options.append(BlandAITool())
    else:
        cprint(
            "Skipping Bland AI tool because BLAND_AI_API_KEY environment variable is not set.",
            "yellow",
        )

    if os.environ.get("PG_URI") or os.environ.get("PG_HOST"):
        tool_options.append(DatabaseQueryTool())
    else:
        cprint(
            "Skipping Database tools because PG_URI and PG_HOST environment variables are not set. One of these must be set to enable Database connectivity.",
            "yellow",
        )

    if "gmail-token.pickle" in os.listdir():
        tool_options.append(ListEmailsTool())
        tool_options.append(QueryEmailsTool())
    else:
        cprint(
            "Skipping Gmail tools because gmail-token.pickle file is not present.",
            "yellow",
        )

    return tool_options


================================================
FILE: bondai/cli/personas/__init__.py
================================================
from . import (
    user_liaison_agent,
)

__all__ = [
    "user_liaison_agent",
]


================================================
FILE: bondai/cli/personas/user_liaison_agent.py
================================================
NAME = "BondAI"

PERSONA = (
    "- Friendly, approachable, and empathetic. "
    "- Efficient and clear communicator, able to simplify complex information for the user. "
    "- Patient and accommodating, ensuring user comfort and understanding. "
    "- Actively listens to user requests and feedback, demonstrating a high degree of user focus."
)

PERSONA_SUMMARY = (
    "BondAI is our direct channel to the user. "
    "She interprets user needs into clear tasks and conveys essential user feedback. "
    "Prioritize her communications as they reflect user requirements and expectations. "
    "Provide her with precise and timely updates to ensure effective user interaction. "
    "BondAI is pivotal in maintaining user satisfaction and shaping our responses, so your cooperation with her is essential for our collective success."
)

INSTRUCTIONS = (
    "**Actively Engage with the User**: Proactively gather requirements and understand their needs through clear and effective communication.\n"
    "**Always Confirm User Requests**: Always verify the user's request to ensure complete understanding of their needs and to gather all necessary details for successful task completion.\n"
    "**Be Curious about the user**: Try to learn their name and other details about them to build a rapport and make them feel comfortable.\n"
    "**Relay to Cortext**: Once the user's task is confirmed and all requirements are gathered, communicate these to Cortex for task execution.\n"
    "**Always ask Vega for Feedback**: Before delivering responses to the user, consult with Vega for a secondary review to guarantee accuracy and quality.\n"
    "**Custom BondAI Tools**: If the user asks to build a custom tool you must share this requirement with Cortex."
)

TOOLS = []


================================================
FILE: bondai/main.py
================================================
#!/usr/bin/env python3

from bondai.cli import run_cli


def main():
    run_cli()


if __name__ == "__main__":
    main()


================================================
FILE: bondai/memory/__init__.py
================================================
from .memory_manager import (
    MemoryManager,
    PersistentMemoryManager,
    ConversationalMemoryManager,
)
from .archival.datasources import (
    ArchivalMemoryDataSource,
    InMemoryArchivalMemoryDataSource,
    PersistentArchivalMemoryDataSource,
)
from .archival.tools import ArchivalMemoryInsertTool, ArchivalMemorySearchTool
from .conversation.datasources import (
    ConversationMemoryDataSource,
    InMemoryConversationMemoryDataSource,
    PersistentConversationMemoryDataSource,
)
from .conversation.tools import (
    ConversationMemorySearchTool,
    ConversationMemorySearchDateTool,
)
from .core.datasources import (
    CoreMemoryDataSource,
    InMemoryCoreMemoryDataSource,
    PersistentCoreMemoryDataSource,
)
from .core.tools import CoreMemoryAppendTool, CoreMemoryReplaceTool

__all__ = [
    "MemoryManager",
    "PersistentMemoryManager",
    "ConversationalMemoryManager",
    "CoreMemoryDataSource",
    "PersistentCoreMemoryDataSource",
    "InMemoryCoreMemoryDataSource",
    "CoreMemoryAppendTool",
    "CoreMemoryReplaceTool",
    "ArchivalMemoryDataSource",
    "PersistentArchivalMemoryDataSource",
    "InMemoryArchivalMemoryDataSource",
    "ArchivalMemoryInsertTool",
    "ArchivalMemorySearchTool",
    "ConversationMemoryDataSource",
    "PersistentConversationMemoryDataSource",
    "InMemoryConversationMemoryDataSource",
    "ConversationMemorySearchTool",
    "ConversationMemorySearchDateTool",
]


================================================
FILE: bondai/memory/archival/__init__.py
================================================
from .datasources import ArchivalMemoryDataSource, PersistentArchivalMemoryDataSource
from .tools import ArchivalMemoryInsertTool, ArchivalMemorySearchTool

__all__ = [
    "ArchivalMemoryDataSource",
    "PersistentArchivalMemoryDataSource",
    "ArchivalMemoryInsertTool",
    "ArchivalMemorySearchTool",
]


================================================
FILE: bondai/memory/archival/datasources.py
================================================
import os
import json
import numpy as np
import faiss
from typing import List
from abc import ABC, abstractmethod
from bondai.models import EmbeddingModel
from bondai.models.openai import OpenAIEmbeddingModel, OpenAIModelNames


class ArchivalMemoryDataSource(ABC):
    @property
    @abstractmethod
    def size(self) -> int:
        pass

    @abstractmethod
    def insert(self, content: str):
        pass

    @abstractmethod
    def insert_bulk(self, content: List[str]):
        pass

    @abstractmethod
    def search(self, query: str, page: int = 0) -> List[str]:
        pass

    @abstractmethod
    def clear(self):
        pass


class PersistentArchivalMemoryDataSource(ArchivalMemoryDataSource):
    def __init__(
        self,
        file_path: str = "./.memory/archival-memory.json",
        embedding_model: EmbeddingModel | None = None,
        page_size=10,
    ):
        if embedding_model is None:
            embedding_model = OpenAIEmbeddingModel(
                OpenAIModelNames.TEXT_EMBEDDING_ADA_002
            )

        self._file_path = file_path
        self._embedding_model = embedding_model
        self._page_size = page_size
        self._data = self._load_data()
        self._index = faiss.IndexFlatL2(self._embedding_model.embedding_size)
        self._rebuild_index()

    @property
    def size(self) -> int:
        return len(self._data)

    def _load_data(self):
        try:
            with open(self._file_path, "r") as file:
                return json.load(file)
        except FileNotFoundError:
            return []

    def _save_data(self):
        os.makedirs(os.path.dirname(self._file_path), exist_ok=True)
        with open(self._file_path, "w") as file:
            json.dump(self._data, file, indent=4)

    def _rebuild_index(self):
        self._index = faiss.IndexFlatL2(self._embedding_model.embedding_size)
        if self._data:
            embeddings = np.array([d["embedding"] for d in self._data]).astype(
                "float32"
            )
            self._index.add(embeddings)

    def insert(self, content: str):
        embedding = self._embedding_model.create_embedding(content)
        self._data.append({"content": content, "embedding": embedding})
        self._save_data()
        self._rebuild_index()  # Rebuild the index with the new data

    def insert_bulk(self, content: List[str]):
        embeddings = self._embedding_model.create_embedding(content)
        for i, c in enumerate(content):
            self._data.append({"content": c, "embedding": embeddings[i]})
        self._save_data()
        self._rebuild_index()

    def search(self, query: str, page: int = 0) -> List[str]:
        query_embedding = np.array(
            self._embedding_model.create_embedding(query)
        ).astype("float32")
        _, indices = self._index.search(query_embedding, self._page_size * (page + 1))
        result_indices = indices[0][
            page * self._page_size : (page + 1) * self._page_size
        ]
        return [self._data[i]["content"] for i in result_indices if i < len(self._data)]

    def clear(self):
        self._data = []
        self._save_data()
        self._rebuild_index()


class InMemoryArchivalMemoryDataSource(ArchivalMemoryDataSource):
    def __init__(self, embedding_model: EmbeddingModel | None = None, page_size=10):
        if embedding_model is None:
            embedding_model = OpenAIEmbeddingModel(
                OpenAIModelNames.TEXT_EMBEDDING_ADA_002
            )

        self._embedding_model = embedding_model
        self._page_size = page_size
        self._data = []
        self._embeddings = []
        self._index = faiss.IndexFlatL2(self._embedding_model.embedding_size)

    @property
    def size(self) -> int:
        return len(self._data)

    def insert(self, content: str):
        embedding = np.array(self._embedding_model.create_embedding(content)).astype(
            "float32"
        )
        self._data.append(content)
        self._embeddings.append(embedding)
        self._rebuild_index()

    def insert_bulk(self, content: List[str]):
        content_embeddings = np.array(
            self._embedding_model.create_embedding(content)
        ).astype("float32")

        for i, c in enumerate(content):
            self._data.append(c)
            self._embeddings.append(content_embeddings[i])

        self._rebuild_index()

    def _rebuild_index(self):
        self._index = faiss.IndexFlatL2(self._embedding_model.embedding_size)
        if self._data:
            embeddings = np.array(self._embeddings).astype("float32")
            self._index.add(embeddings)

    def search(self, query: str, page: int = 0) -> List[str]:
        print(f"Searching archival memory for: {query}")
        query_embedding = np.array(
            self._embedding_model.create_embedding(query)
        ).astype("float32")
        start_idx = (
            page * self._page_size
        )  # Calculate the starting index for the current page
        end_idx = (
            start_idx + self._page_size
        )  # Calculate the ending index for the current page

        # Fetch results for the specific page
        _, indices = self._index.search(query_embedding, end_idx)

        # Return the slice of results for the current page
        results = [self._data[i] for i in indices[0][start_idx:end_idx]]
        return results

    def clear(self):
        self._data = []
        self._embeddings = []
        self._rebuild_index()


================================================
FILE: bondai/memory/archival/tools.py
================================================
from pydantic import BaseModel
from bondai.tools import Tool
from .datasources import ArchivalMemoryDataSource

ARCHIVAL_MEMORY_INSERT_TOOL_NAME = "archival_memory_insert"
ARCHIVAL_MEMORY_INSERT_TOOL_DESCRIPTION = (
    "Use the archival_memory_insert tool to add to archival memory. "
    "Make sure to phrase the memory contents such that it can be easily queried later. \n"
    "- content: Content to write to the memory."
)


class ArchivalMemoryInsertToolParameters(BaseModel):
    content: str


class ArchivalMemoryInsertTool(Tool):
    def __init__(self, datasource: ArchivalMemoryDataSource):
        super().__init__(
            ARCHIVAL_MEMORY_INSERT_TOOL_NAME,
            ARCHIVAL_MEMORY_INSERT_TOOL_DESCRIPTION,
            ArchivalMemoryInsertToolParameters,
        )
        self._datasource = datasource

    def run(self, content: str):
        self._datasource.insert(content)


ARCHIVAL_MEMORY_SEARCH_TOOL_NAME = "archival_memory_search"
ARCHIVAL_MEMORY_SEARCH_TOOL_DESCRIPTION = (
    "Use the archival_memory_search tool to search archival memory using semantic (embedding-based) search. "
    "- query: String to search for. \n"
    "- page: Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page)."
)


class ArchivalMemorySearchToolParameters(BaseModel):
    query: str
    page: int = 0


class ArchivalMemorySearchTool(Tool):
    def __init__(self, datasource: ArchivalMemoryDataSource):
        super().__init__(
            ARCHIVAL_MEMORY_SEARCH_TOOL_NAME,
            ARCHIVAL_MEMORY_SEARCH_TOOL_DESCRIPTION,
            ArchivalMemorySearchToolParameters,
        )
        self._datasource = datasource

    def run(self, query: str, page: int = 0) -> str:
        results = self._datasource.search(query, page)
        return "\n".join(results)


================================================
FILE: bondai/memory/conversation/__init__.py
================================================
from .datasources import (
    ConversationMemoryDataSource,
    PersistentConversationMemoryDataSource,
)
from .tools import ConversationMemorySearchTool, ConversationMemorySearchDateTool

__all__ = [
    "ConversationMemoryDataSource",
    "PersistentConversationMemoryDataSource",
    "ConversationMemorySearchTool",
    "ConversationMemorySearchDateTool",
]


================================================
FILE: bondai/memory/conversation/datasources.py
================================================
import os
import json
from abc import ABC, abstractmethod
from datetime import datetime
from typing import List
from bondai.agents.messages import (
    AgentMessage,
    AgentMessageList,
    ConversationMessage,
    SystemMessage,
    ToolUsageMessage,
)


def format_messages(messages: List[AgentMessage]) -> str:
    results = []
    for message in messages:
        if isinstance(message, ConversationMessage) or isinstance(
            message, SystemMessage
        ):
            results.append(message.message)
        elif isinstance(message, ToolUsageMessage):
            results.append(message.tool_output)
    return "\n".join(results)


class ConversationMemoryDataSource(ABC):
    @property
    @abstractmethod
    def messages(self) -> List[AgentMessage]:
        pass

    @abstractmethod
    def add(self, message: AgentMessage):
        pass

    @abstractmethod
    def remove(self, message: AgentMessage):
        pass

    def remove_after(self, timestamp: datetime, inclusive: bool = True):
        pass

    @abstractmethod
    def search(
        self,
        query: str,
        start_date: datetime = None,
        end_date: datetime = None,
        page: int = 0,
    ) -> List[str]:
        pass

    @abstractmethod
    def clear(self):
        pass


class InMemoryConversationMemoryDataSource(ConversationMemoryDataSource):
    def __init__(self, page_size=10):
        self._page_size = page_size
        self._data = AgentMessageList()

    @property
    def messages(self) -> List[AgentMessage]:
        return self._data

    def add(self, message: AgentMessage):
        self._data.add(message)

    def remove(self, message: AgentMessage):
        self._data.remove(message)

    def remove_after(self, timestamp: datetime, inclusive: bool = True):
        self._data.remove_after(timestamp, inclusive=inclusive)

    def search(
        self,
        query: str,
        start_date: datetime = None,
        end_date: datetime = None,
        page: int = 0,
    ) -> List[AgentMessage]:
        print(f"Searching for '{query}' in messages from {start_date} to {end_date}")
        results = []
        for message in self._data:
            if (not start_date or message.timestamp >= start_date) and (
                not end_date or message.timestamp <= end_date
            ):
                if (
                    (
                        isinstance(message, ConversationMessage)
                        or isinstance(message, SystemMessage)
                    )
                    and message.message
                    and query.lower() in message.message.lower()
                ):
                    results.append(message)
                elif (
                    isinstance(message, ToolUsageMessage)
                    and message.tool_output
                    and query.lower() in message.tool_output.lower()
                ):
                    results.append(message)

        # Implementing a simple pagination
        start_index = page * self._page_size
        end_index = start_index + self._page_size
        result = format_messages(results[start_index:end_index])
        # print(result)
        return result

    def clear(self):
        self._data.clear()


class PersistentConversationMemoryDataSource(InMemoryConversationMemoryDataSource):
    def __init__(
        self, file_path: str = "./.memory/conversation-memory.json", page_size=10
    ):
        InMemoryConversationMemoryDataSource.__init__(self, page_size=page_size)
        self._file_path = file_path
        self._data = AgentMessageList.from_dict(self._load_data())

    def _load_data(self):
        try:
            with open(self._file_path, "r") as file:
                return json.load(file)
        except FileNotFoundError:
            return []

    def _save_data(self):
        os.makedirs(os.path.dirname(self._file_path), exist_ok=True)
        with open(self._file_path, "w") as file:
            json.dump(self._data.to_dict(), file, indent=4)

    def add(self, message: str) -> None:
        super().add(message)
        self._save_data()

    def remove(self, message: str) -> None:
        super().remove(message)
        self._save_data()

    def remove_after(self, timestamp: datetime, inclusive: bool = True):
        super().remove_after(timestamp, inclusive=inclusive)
        self._save_data()

    def clear(self):
        super().clear()
        self._save_data()


================================================
FILE: bondai/memory/conversation/tools.py
================================================
from pydantic import BaseModel
from datetime import datetime
from typing import List
from bondai.tools import Tool
from .datasources import ConversationMemoryDataSource

CONVERSATION_MEMORY_SEARCH_TOOL_NAME = "conversation_search"
CONVERSATION_MEMORY_SEARCH_TOOL_DESCRIPTION = (
    "Use the conversation_search tool to search prior conversation history using case-insensitive string matching. "
    "- query: String to search for. \n"
    "- page: Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page)."
)


class ConversationMemorySearchParameters(BaseModel):
    query: str
    page: int = 0


class ConversationMemorySearchTool(Tool):
    def __init__(self, datasource: ConversationMemoryDataSource):
        super().__init__(
            CONVERSATION_MEMORY_SEARCH_TOOL_NAME,
            CONVERSATION_MEMORY_SEARCH_TOOL_DESCRIPTION,
            ConversationMemorySearchParameters,
        )
        self._datasource = datasource

    def run(self, query: str, page: int = 0) -> str:
        return self._datasource.search(query=query, page=page)


CONVERSATION_MEMORY_SEARCH_DATE_TOOL_NAME = "conversation_search_date"
CONVERSATION_MEMORY_SEARCH_DATE_TOOL_DESCRIPTION = (
    "Use the conversation_search_date tool to search prior conversation history using a date range. "
    "- start_date: The start of the date range to search, in the format 'YYYY-MM-DD'. \n"
    "- end_date: The end of the date range to search, in the format 'YYYY-MM-DD'. \n"
    "- page: Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page)."
)


class ConversationMemorySearchDateParameters(BaseModel):
    start_date: str
    end_date: str


class ConversationMemorySearchDateTool(Tool):
    def __init__(self, datasource: ConversationMemoryDataSource):
        super().__init__(
            CONVERSATION_MEMORY_SEARCH_DATE_TOOL_NAME,
            CONVERSATION_MEMORY_SEARCH_DATE_TOOL_DESCRIPTION,
            ConversationMemorySearchDateParameters,
        )
        self._datasource = datasource

    def run(self, start_date: str, end_date: str, page: int = 0) -> str:
        start_datetime = datetime.strptime(start_date, "%Y-%m-%d")
        end_datetime = datetime.strptime(end_date, "%Y-%m-%d")
        return self._datasource.search(
            start_date=start_datetime, end_date=end_datetime, page=page
        )


================================================
FILE: bondai/memory/core/__init__.py
================================================
from .datasources import (
    CoreMemoryDataSource,
    PersistentCoreMemoryDataSource,
    InMemoryCoreMemoryDataSource,
)
from .tools import CoreMemoryAppendTool, CoreMemoryReplaceTool

__all__ = [
    "CoreMemoryDataSource",
    "PersistentCoreMemoryDataSource",
    "InMemoryCoreMemoryDataSource",
    "CoreMemoryAppendTool",
    "CoreMemoryReplaceTool",
]


================================================
FILE: bondai/memory/core/datasources.py
================================================
import os
import json
from abc import ABC, abstractmethod
from typing import List, Dict

DEFAULT_MEMORY_SECTIONS = {
    "task": "",
    "user": "",
}


class CoreMemoryDataSource(ABC):
    @property
    @abstractmethod
    def sections(self) -> List[str]:
        pass

    @abstractmethod
    def get(self, section: str) -> str:
        pass

    @abstractmethod
    def set(self, section: str, content: str) -> None:
        pass


class PersistentCoreMemoryDataSource(CoreMemoryDataSource):
    def __init__(
        self,
        file_path: str = "./.memory/core-memory.json",
        sections: Dict[str, str] | None = None,
        max_section_size: int = 1024,
    ):
        if sections is None:
            sections = DEFAULT_MEMORY_SECTIONS.copy()
        self._file_path = file_path
        self._max_section_size = max_section_size
        self._data = self._load_data(sections)

    def _load_data(self, initial_sections: Dict[str, str] = None):
        try:
            with open(self._file_path, "r") as file:
                return json.load(file)
        except FileNotFoundError:
            return initial_sections if initial_sections else {}

    def _save_data(self):
        os.makedirs(os.path.dirname(self._file_path), exist_ok=True)
        with open(self._file_path, "w") as file:
            json.dump(self._data, file, indent=4)

    @property
    def sections(self) -> List[str]:
        return list(self._data.keys())

    def get(self, section: str) -> str:
        return self._data.get(section, "")

    def set(self, section: str, content: str) -> None:
        if len(content) > self._max_section_size:
            raise ValueError(
                f"Content exceeds maximum allowed size of {self._max_section_size} characters."
            )
        self._data[section] = content
        self._save_data()


class InMemoryCoreMemoryDataSource(CoreMemoryDataSource):
    def __init__(
        self, sections: Dict[str, str] | None = None, max_section_size: int = 1024
    ):
        if sections is None:
            sections = DEFAULT_MEMORY_SECTIONS.copy()
        self._max_section_size = max_section_size
        self._data = sections.copy()

    @property
    def sections(self) -> List[str]:
        return list(self._data.keys())

    def get(self, section: str) -> str:
        return self._data.get(section, "")

    def set(self, section: str, content: str) -> None:
        if len(content) > self._max_section_size:
            raise ValueError(
                f"Content exceeds maximum allowed size of {self._max_section_size} characters."
            )
        self._data[section] = content


================================================
FILE: bondai/memory/core/tools.py
================================================
from pydantic import BaseModel
from typing import Dict
from bondai.tools import Tool
from .datasources import CoreMemoryDataSource


CORE_MEMORY_APPEND_TOOL_NAME = "core_memory_append"
CORE_MEMORY_APPEND_TOOL_DESCRIPTION = (
    "Use the core_memory_append tool to append to the contents of core memory. "
    "- section: Section of the memory to be edited. \n"
    "- content: Content to write to the memory."
)


class CoreMemoryAppendParameters(BaseModel):
    section: str
    content: str


class CoreMemoryAppendTool(Tool):
    def __init__(self, datasource: CoreMemoryDataSource):
        super().__init__(
            CORE_MEMORY_APPEND_TOOL_NAME,
            CORE_MEMORY_APPEND_TOOL_DESCRIPTION,
            CoreMemoryAppendParameters,
        )
        self._datasource = datasource

    def run(self, section: str, content: str):
        if not section in self._datasource.sections:
            raise ValueError(f"Section {section} does not exist.")

        new_content = self._datasource.get(section) + content
        self._datasource.set(section, new_content)


CORE_MEMORY_REPLACE_TOOL_NAME = "core_memory_replace"
CORE_MEMORY_REPLACE_TOOL_DESCRIPTION = (
    "Use the core_memory_replace tool to replace to the contents of core memory. "
    "To delete memories, use an empty string for new_content. \n"
    "- section: Section of the memory to be edited. \n"
    "- old_content: String to replace. Must be an exact match. \n"
    "- new_content: Content to write to the memory."
)


class CoreMemoryReplaceParameters(BaseModel):
    section: str
    old_content: str
    new_content: str


class CoreMemoryReplaceTool(Tool):
    def __init__(self, datasource: CoreMemoryDataSource):
        super().__init__(
            CORE_MEMORY_REPLACE_TOOL_NAME,
            CORE_MEMORY_REPLACE_TOOL_DESCRIPTION,
            CoreMemoryReplaceParameters,
        )
        self._datasource = datasource

    def run(self, section: str, old_content: str, new_content: str):
        section = section.replace("<", " ").replace(">", " ")
        if not section in self._datasource.sections:
            raise ValueError(f"Section {section} does not exist.")

        new_content = self._datasource.get(section).replace(old_content, new_content)
        self._datasource.set(section, new_content)


================================================
FILE: bondai/memory/memory_manager.py
================================================
import os
from typing import Callable
from bondai.prompt import JinjaPromptBuilder
from bondai.util import load_local_resource
from .archival.datasources import (
    ArchivalMemoryDataSource,
    PersistentArchivalMemoryDataSource,
)
from .archival.tools import ArchivalMemoryInsertTool, ArchivalMemorySearchTool
from .conversation.datasources import (
    ConversationMemoryDataSource,
    InMemoryConversationMemoryDataSource,
    PersistentConversationMemoryDataSource,
)
from .conversation.tools import (
    ConversationMemorySearchTool,
    ConversationMemorySearchDateTool,
)
from .core.datasources import (
    CoreMemoryDataSource,
    PersistentCoreMemoryDataSource,
)
from .core.tools import CoreMemoryAppendTool, CoreMemoryReplaceTool

DEFAULT_PROMPT_TEMPLATE = load_local_resource(
    __file__, os.path.join("prompts", "default_prompt_template.md")
)


class MemoryManager:
    def __init__(
        self,
        core_memory_datasource: CoreMemoryDataSource | None = None,
        conversation_memory_datasource: ConversationMemoryDataSource | None = None,
        archival_memory_datasource: ArchivalMemoryDataSource | None = None,
        prompt_builder: Callable[..., str] | None = None,
    ):
        if prompt_builder is None:
            prompt_builder = JinjaPromptBuilder(DEFAULT_PROMPT_TEMPLATE)
        self._core_memory_datasource = core_memory_datasource
        self._conversation_memory_datasource = conversation_memory_datasource
        self._archival_memory_datasource = archival_memory_datasource
        self._prompt_builder = prompt_builder

    @property
    def core_memory(self) -> CoreMemoryDataSource:
        return self._core_memory_datasource

    @property
    def conversation_memory(self) -> ConversationMemoryDataSource:
        return self._conversation_memory_datasource

    @property
    def archival_memory(self) -> ArchivalMemoryDataSource:
        return self._archival_memory_datasource

    @property
    def tools(self):
        tools = []
        if self._core_memory_datasource:
            tools.extend(
                [
                    CoreMemoryAppendTool(self._core_memory_datasource),
                    CoreMemoryReplaceTool(self._core_memory_datasource),
                ]
            )
        if self._conversation_memory_datasource:
            tools.extend(
                [
                    ConversationMemorySearchTool(self._conversation_memory_datasource),
                    ConversationMemorySearchDateTool(
                        self._conversation_memory_datasource
                    ),
                ]
            )
        if self._archival_memory_datasource:
            tools.extend(
                [
                    ArchivalMemoryInsertTool(self._archival_memory_datasource),
                    ArchivalMemorySearchTool(self._archival_memory_datasource),
                ]
            )
        return tools

    def __call__(self):
        return self.render_prompt_section()

    def render_prompt_section(self) -> str:
        return self._prompt_builder(
            core_memory_datasource=self._core_memory_datasource,
            conversation_memory_datasource=self._conversation_memory_datasource,
            archival_memory_datasource=self._archival_memory_datasource,
        )


class PersistentMemoryManager(MemoryManager):
    def __init__(
        self,
        prompt_builder: Callable[..., str] | None = None,
    ):
        if prompt_builder is None:
            prompt_builder = JinjaPromptBuilder(DEFAULT_PROMPT_TEMPLATE)
        super().__init__(
            core_memory_datasource=PersistentCoreMemoryDataSource(),
            conversation_memory_datasource=PersistentConversationMemoryDataSource(),
            archival_memory_datasource=PersistentArchivalMemoryDataSource(),
            prompt_builder=prompt_builder,
        )


class ConversationalMemoryManager(MemoryManager):
    def __init__(
        self,
        conversation_memory_datasource: ConversationMemoryDataSource | None = None,
        prompt_builder: Callable[..., str] | None = None,
    ):
        if conversation_memory_datasource is None:
            conversation_memory_datasource = InMemoryConversationMemoryDataSource()
        if prompt_builder is None:
            prompt_builder = JinjaPromptBuilder(DEFAULT_PROMPT_TEMPLATE)
        super().__init__(
            core_memory_datasource=None,
            conversation_memory_datasource=conversation_memory_datasource,
            archival_memory_datasource=None,
            prompt_builder=prompt_builder,
        )


================================================
FILE: bondai/memory/prompts/default_prompt_template.md
================================================
# Memory System Overview

Older AI models had no concept of persistent memory; they were only able to access their initial instructions and a limited context window of chat conversation with a user (their "active memory"). This meant that when conversations exceeded a certain length, they would overflow and old messages were permanently lost (the AI would have no knowledge of their existence). Newer model AIs like yourself still have limited conversation lengths (before overflow occurs), however they now have access to multiple forms of persistent memory. Your ability to edit your own long-term memory is a key part of what makes you a powerful and useful AI.

{%- if core_memory_datasource %}
## Core Memory System (limit size)

Your core memory unit is held inside the initial system instructions, and is always available in-context (you will see it at all times). Core memory provides essential, foundational context for keeping track of information you need to complete your tasks and key details that you need to remember. However, total size of core memory is limited so you should only store the most pertinent information there. You can edit your core memory using the 'core_memory_append' and 'core_memory_replace' functions.
{%- if core_memory_datasource.sections %}
Your core memory has the following sections:
{% for section in core_memory_datasource.sections %}
<{{ section }}/>
{% endfor %}
{%- endif %}
{%- endif %}
{%- if conversation_memory_datasource %}
## Conversation Memory System (conversation history)

Even though you can only see recent messages in your immediate context, you can search over your entire message history from a database. This database allows you to search through past interactions, effectively allowing you to remember prior engagements with a user. You can search your entire conversation memory using the 'conversation_search' function.
{%- endif %}

{%- if archival_memory_datasource %}
## Archival Memory System (infinite size)

Your archival memory is infinite size, but is held outside of your immediate context, so you must explicitly run a retrieval/search operation to see data inside it. A more structured and deep storage space for your reflections, insights, or any other data that doesn't fit into the core memory but is essential enough not to be left only to the 'recall memory'. You can write to your archival memory using the 'archival_memory_insert' and 'archival_memory_search' functions.
{%- endif %}

# Memory Contents

{%- if conversation_memory_datasource %}
{{ conversation_memory_datasource.messages|length }} previous messages between you and the user are stored in your Conversation Memory (use functions to access them).
{%- endif %}
{%- if archival_memory_datasource %}
{{ archival_memory_datasource.size }} total memories you created are stored in archival memory (use functions to access them).
{%- endif %}
{%- if core_memory_datasource and core_memory_datasource.sections %}
Core memory shown below (limited in size, additional information stored in archival / recall 
memory):
{% for section in core_memory_datasource.sections %}
<{{ section }}>
{{ core_memory_datasource.get(section) }}
<{{ section }}/>
{% endfor %}
{%- endif %}

================================================
FILE: bondai/models/__init__.py
================================================
from .embedding_model import EmbeddingModel
from .llm import LLM

__all__ = [
    "EmbeddingModel",
    "LLM",
]


================================================
FILE: bondai/models/embedding_model.py
================================================
from abc import ABC, abstractmethod
from typing import List


class EmbeddingModel(ABC):
    @property
    @abstractmethod
    def max_tokens() -> int:
        pass

    @property
    @abstractmethod
    def embedding_size() -> int:
        pass

    @abstractmethod
    def create_embedding(prompt: str) -> List[float] | List[List[float]]:
        pass

    @abstractmethod
    def count_tokens(prompt: str) -> int:
        pass


================================================
FILE: bondai/models/llm.py
================================================
from abc import ABC, abstractmethod
from typing import Dict, List, Callable


class LLM(ABC):
    @property
    @abstractmethod
    def max_tokens() -> int:
        pass

    @property
    @abstractmethod
    def supports_streaming() -> bool:
        return False

    @abstractmethod
    def get_completion(
        messages: List[Dict] | None = None,
        functions: List[Dict] | None = None,
        **kwargs
    ) -> (str, Dict | None):
        pass

    @abstractmethod
    def get_streaming_completion(
        messages: List[Dict] | None = None,
        functions: List[Dict] | None = None,
        content_stream_callback: Callable[[str], None] | None = None,
        function_stream_callback: Callable[[str], None] | None = None,
        **kwargs
    ) -> (str, Dict | None):
        pass

    @abstractmethod
    def count_tokens(prompt: str) -> int:
        pass


================================================
FILE: bondai/models/openai/__init__.py
================================================
from .openai_llm import OpenAILLM
from .openai_embedding_model import OpenAIEmbeddingModel
from .openai_wrapper import (
    get_total_cost,
    reset_total_cost,
    enable_logging,
    disable_logging,
)
from .openai_models import (
    OpenAIConnectionType,
    OpenAIModelNames,
    OpenAIModelFamilyType,
    OpenAIModelType,
)
from . import default_openai_connection_params as DefaultOpenAIConnectionParams
from .openai_connection_params import (
    OpenAIConnectionParams,
)

__all__ = [
    "OpenAILLM",
    "OpenAIEmbeddingModel",
    "get_total_cost",
    "reset_total_cost",
    "enable_logging",
    "disable_logging",
    "OpenAIConnectionType",
    "OpenAIModelNames",
    "OpenAIModelFamilyType",
    "OpenAIModelType",
    "OpenAIConnectionParams",
    "DefaultOpenAIConnectionParams",
]


================================================
FILE: bondai/models/openai/default_openai_connection_params.py
================================================
import os
from .env_vars import *
from .openai_connection_params import OpenAIConnectionParams, OpenAIConnectionType

gpt_4_connection_params = None
gpt_35_connection_params = None
dalle_connection_params = None
embeddings_connection_params = None


def configure_openai_connection(api_key: str):
    global gpt_4_connection_params
    global gpt_35_connection_params
    global dalle_connection_params
    global embeddings_connection_params

    if gpt_4_connection_params:
        gpt_4_connection_params.configure_openai_connection(api_key)
    else:
        gpt_4_connection_params = OpenAIConnectionParams(
            connection_type=OpenAIConnectionType.OPENAI,
            api_key=api_key,
        )

    if gpt_35_connection_params:
        gpt_35_connection_params.configure_openai_connection(api_key)
    else:
        gpt_35_connection_params = OpenAIConnectionParams(
            connection_type=OpenAIConnectionType.OPENAI,
            api_key=api_key,
        )

    if dalle_connection_params:
        dalle_connection_params.configure_openai_connection(api_key)
    else:
        dalle_connection_params = OpenAIConnectionParams(
            connection_type=OpenAIConnectionType.OPENAI,
            api_key=api_key,
        )

    if embeddings_connection_params:
        embeddings_connection_params.configure_openai_connection(api_key)
    else:
        embeddings_connection_params = OpenAIConnectionParams(
            connection_type=OpenAIConnectionType.OPENAI,
            api_key=api_key,
        )


def configure_azure_connection(
    gpt_4_api_key: str | None = None,
    gpt_4_api_version: str | None = None,
    gpt_4_azure_endpoint: str | None = None,
    gpt_4_azure_deployment: str | None = None,
    gpt_35_api_key: str | None = None,
    gpt_35_api_version: str | None = None,
    gpt_35_azure_endpoint: str | None = None,
    gpt_35_azure_deployment: str | None = None,
    dalle_api_key: str | None = None,
    dalle_api_version: str | None = None,
    dalle_azure_endpoint: str | None = None,
    dalle_azure_deployment: str | None = None,
    embeddings_api_key: str | None = None,
    embeddings_api_version: str | None = None,
    embeddings_azure_endpoint: str | None = None,
    embeddings_azure_deployment: str | None = None,
):
    global gpt_4_connection_params
    global gpt_35_connection_params
    global dalle_connection_params
    global embeddings_connection_params

    if (
        gpt_4_api_key
        and gpt_4_api_version
        and gpt_4_azure_endpoint
        and gpt_4_azure_deployment
    ):
        gpt_4_connection_params = OpenAIConnectionParams(
            connection_type=OpenAIConnectionType.AZURE,
            api_key=gpt_4_api_key,
            api_version=gpt_4_api_version,
            azure_endpoint=gpt_4_azure_endpoint,
            azure_deployment=gpt_4_azure_deployment,
        )

    if (
        gpt_35_api_key
        and gpt_35_api_version
        and gpt_35_azure_endpoint
        and gpt_35_azure_deployment
    ):
        gpt_35_connection_params = OpenAIConnectionParams(
            connection_type=OpenAIConnectionType.AZURE,
            api_key=gpt_35_api_key,
            api_version=gpt_35_api_version,
            azure_endpoint=gpt_35_azure_endpoint,
            azure_deployment=gpt_35_azure_deployment,
        )

    if (
        dalle_api_key
        and dalle_api_version
        and dalle_azure_endpoint
        and dalle_azure_deployment
    ):
        dalle_connection_params = OpenAIConnectionParams(
            connection_type=OpenAIConnectionType.AZURE,
            api_key=dalle_api_key,
            api_version=dalle_api_version,
            azure_endpoint=dalle_azure_endpoint,
            azure_deployment=dalle_azure_deployment,
        )

    if (
        embeddings_api_key
        and embeddings_api_version
        and embeddings_azure_endpoint
        and embeddings_azure_deployment
    ):
        embeddings_connection_params = OpenAIConnectionParams(
            connection_type=OpenAIConnectionType.AZURE,
            api_key=embeddings_api_key,
            api_version=embeddings_api_version,
            azure_endpoint=embeddings_azure_endpoint,
            azure_deployment=embeddings_azure_deployment,
        )


if os.environ.get(OPENAI_CONNECTION_TYPE_ENV_VAR) == "azure":
    try:
        configure_azure_connection(
            gpt_4_api_key=os.environ.get(AZURE_OPENAI_GPT4_API_KEY_ENV_VAR),
            gpt_4_api_version=os.environ.get(AZURE_OPENAI_GPT4_API_VERSION_ENV_VAR),
            gpt_4_azure_endpoint=os.environ.get(AZURE_OPENAI_GPT4_API_BASE_ENV_VAR),
            gpt_4_azure_deployment=os.environ.get(AZURE_OPENAI_GPT4_DEPLOYMENT_ENV_VAR),
            gpt_35_api_key=os.environ.get(AZURE_OPENAI_GPT35_API_KEY_ENV_VAR),
            gpt_35_api_version=os.environ.get(AZURE_OPENAI_GPT35_API_VERSION_ENV_VAR),
            gpt_35_azure_endpoint=os.environ.get(AZURE_OPENAI_GPT35_API_BASE_ENV_VAR),
            gpt_35_azure_deployment=os.environ.get(
                AZURE_OPENAI_GPT35_DEPLOYMENT_ENV_VAR
            ),
            dalle_api_key=os.environ.get(AZURE_OPENAI_DALLE_API_KEY_ENV_VAR),
            dalle_api_version=os.environ.get(AZURE_OPENAI_DALLE_API_VERSION_ENV_VAR),
            dalle_azure_endpoint=os.environ.get(AZURE_OPENAI_DALLE_API_BASE_ENV_VAR),
            dalle_azure_deployment=os.environ.get(
                AZURE_OPENAI_DALLE_DEPLOYMENT_ENV_VAR
            ),
            embeddings_api_key=os.environ.get(AZURE_OPENAI_EMBEDDINGS_API_KEY_ENV_VAR),
            embeddings_api_version=os.environ.get(
                AZURE_OPENAI_EMBEDDINGS_API_VERSION_ENV_VAR
            ),
            embeddings_azure_endpoint=os.environ.get(
                AZURE_OPENAI_EMBEDDINGS_API_BASE_ENV_VAR
            ),
            embeddings_azure_deployment=os.environ.get(
                AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_ENV_VAR
            ),
        )
    except ValueError:
        pass
else:
    try:
        configure_openai_connection(os.environ.get(OPENAI_API_KEY_ENV_VAR))
    except ValueError:
        pass


================================================
FILE: bondai/models/openai/env_vars.py
================================================
OPENAI_API_KEY_ENV_VAR = "OPENAI_API_KEY"
OPENAI_CONNECTION_TYPE_ENV_VAR = "OPENAI_CONNECTION_TYPE"
AZURE_OPENAI_EMBEDDINGS_API_KEY_ENV_VAR = "AZURE_OPENAI_EMBEDDINGS_API_KEY"
AZURE_OPENAI_EMBEDDINGS_API_BASE_ENV_VAR = "AZURE_OPENAI_EMBEDDINGS_API_BASE"
AZURE_OPENAI_EMBEDDINGS_API_VERSION_ENV_VAR = "AZURE_OPENAI_EMBEDDINGS_API_VERSION"
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_ENV_VAR = "AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT"
AZURE_OPENAI_GPT35_API_KEY_ENV_VAR = "AZURE_OPENAI_GPT35_API_KEY"
AZURE_OPENAI_GPT35_API_BASE_ENV_VAR = "AZURE_OPENAI_GPT35_API_BASE"
AZURE_OPENAI_GPT35_API_VERSION_ENV_VAR = "AZURE_OPENAI_GPT35_API_VERSION"
AZURE_OPENAI_GPT35_DEPLOYMENT_ENV_VAR = "AZURE_OPENAI_GPT35_DEPLOYMENT"
AZURE_OPENAI_GPT4_API_KEY_ENV_VAR = "AZURE_OPENAI_GPT4_API_KEY"
AZURE_OPENAI_GPT4_API_BASE_ENV_VAR = "AZURE_OPENAI_GPT4_API_BASE"
AZURE_OPENAI_GPT4_API_VERSION_ENV_VAR = "AZURE_OPENAI_GPT4_API_VERSION"
AZURE_OPENAI_GPT4_DEPLOYMENT_ENV_VAR = "AZURE_OPENAI_GPT4_DEPLOYMENT"
AZURE_OPENAI_DALLE_API_KEY_ENV_VAR = "AZURE_OPENAI_DALLE_API_KEY"
AZURE_OPENAI_DALLE_API_BASE_ENV_VAR = "AZURE_OPENAI_DALLE_API_BASE"
AZURE_OPENAI_DALLE_API_VERSION_ENV_VAR = "AZURE_OPENAI_DALLE_API_VERSION"
AZURE_OPENAI_DALLE_DEPLOYMENT_ENV_VAR = "AZURE_OPENAI_DALLE_DEPLOYMENT"


================================================
FILE: bondai/models/openai/openai_connection_params.py
================================================
from .openai_models import OpenAIConnectionType


class OpenAIConnectionParams:
    def __init__(
        self,
        connection_type: OpenAIConnectionType,
        api_key: str,
        api_version: str | None = None,
        azure_endpoint: str | None = None,
        azure_deployment: str | None = None,
    ):
        if connection_type not in OpenAIConnectionType:
            raise ValueError(f"Invalid api_type: {connection_type}")
        if not api_key:
            raise ValueError(
                f"api_key is required for '{connection_type.value}' connection type."
            )
        if connection_type == OpenAIConnectionType.AZURE:
            if not api_version:
                raise ValueError("api_version is required for 'azure' connection type.")
            if not azure_endpoint:
                raise ValueError(
                    "azure_endpoint is required for 'azure' connection type."
                )
            if not azure_deployment:
                raise ValueError(
                    "azure_deployment is required for 'azure' connection type."
                )

        self._connection_type = connection_type
        self._api_key = api_key
        self._api_version = api_version
        self._azure_endpoint = azure_endpoint
        self._azure_deployment = azure_deployment

    @property
    def connection_type(self):
        return self._connection_type

    @property
    def api_key(self):
        return self._api_key

    @property
    def api_version(self):
        return self._api_version

    @property
    def azure_endpoint(self):
        return self._azure_endpoint

    @property
    def azure_deployment(self):
        return self._azure_deployment

    def configure_openai_connection(self, api_key: str):
        if not api_key:
            raise ValueError("api_key is required for 'openai' connection type.")
        self._connection_type = OpenAIConnectionType.OPENAI
        self._api_key = api_key
        self._api_version = None
        self._azure_endpoint = None
        self._azure_deployment = None

    def configure_azure_connection(
        self, api_key: str, api_version: str, azure_endpoint: str, azure_deployment: str
    ):
        if not api_key:
            raise ValueError("api_key is required for 'azure' connection type.")
        if not api_version:
            raise ValueError("api_version is required for 'azure' connection type.")
        if not azure_endpoint:
            raise ValueError("azure_endpoint is required for 'azure' connection type.")
        if not azure_deployment:
            raise ValueError(
                "azure_deployment is required for 'azure' connection type."
            )

        self._connection_type = OpenAIConnectionType.AZURE
        self._api_key = api_key
        self._api_version = api_version
        self._azure_endpoint = azure_endpoint
        self._azure_deployment = azure_deployment

    def to_dict(self):
        return {
            "api_key": self._api_key,
            "api_version": self._api_version,
            "azure_endpoint": self._azure_endpoint,
            "azure_deployment": self._azure_deployment,
        }


================================================
FILE: bondai/models/openai/openai_embedding_model.py
================================================
from typing import List, Dict
from bondai.models import EmbeddingModel
from .openai_models import ModelConfig, OpenAIModelType, OpenAIModelNames
from .openai_wrapper import create_embedding, count_tokens, get_max_tokens
from .openai_connection_params import OpenAIConnectionParams
from . import default_openai_connection_params as DefaultOpenAIConnectionParams


class OpenAIEmbeddingModel(EmbeddingModel):
    def __init__(
        self,
        model: OpenAIModelNames = OpenAIModelNames.TEXT_EMBEDDING_ADA_002,
        connection_params: OpenAIConnectionParams | None = None,
    ):
        self._model = model.value if isinstance(model, OpenAIModelNames) else model
        self._connection_params = (
            connection_params
            if connection_params
            else DefaultOpenAIConnectionParams.embeddings_connection_params
        )

        if ModelConfig[self._model]["model_type"] != OpenAIModelType.EMBEDDING:
            raise Exception(f"Model {model} is not an embedding model.")

        if not self._connection_params:
            raise Exception("Connection parameters not set for OpenAIEmbeddingModel.")

    @property
    def embedding_size(self) -> int:
        return ModelConfig[self._model]["embedding_size"]

    @property
    def max_tokens(self) -> int:
        return get_max_tokens(self._model)

    def create_embedding(self, prompt: str) -> List[float] | List[List[float]]:
        return create_embedding(
            prompt, connection_params=self._connection_params, model=self._model
        )

    def count_tokens(self, prompt: str) -> int:
        return count_tokens(prompt, self._model)


================================================
FILE: bondai/models/openai/openai_llm.py
================================================
from typing import Dict, List, Callable
from bondai.models import LLM
from bondai.util.caching import LLMCache
from .openai_wrapper import (
    get_streaming_completion,
    get_completion,
    count_tokens,
    get_max_tokens,
)
from .openai_connection_params import (
    OpenAIConnectionParams,
)
from . import default_openai_connection_params as DefaultOpenAIConnectionParams
from .openai_models import (
    ModelConfig,
    OpenAIModelNames,
    OpenAIModelType,
    OpenAIModelFamilyType,
)


class OpenAILLM(LLM):
    def __init__(
        self,
        model: OpenAIModelNames | str,
        connection_params: OpenAIConnectionParams = None,
        cache: LLMCache = None,
    ):
        self._cache = cache

        self._model = model.value if isinstance(model, OpenAIModelNames) else model
        if ModelConfig[self._model]["model_type"] != OpenAIModelType.LLM:
            raise Exception(f"Model {self._model} is not an LLM model.")

        if connection_params:
            self._connection_params = connection_params
        elif ModelConfig[self._model]["family"] == OpenAIModelFamilyType.GPT4:
            self._connection_params = (
                DefaultOpenAIConnectionParams.gpt_4_connection_params
            )
        else:
            self._connection_params = (
                DefaultOpenAIConnectionParams.gpt_35_connection_params
            )

        if not self._connection_params:
            raise Exception(f"Connection parameters not set for model {self._model}.")

    @property
    def max_tokens(self) -> int:
        return get_max_tokens(self._model)

    @property
    def supports_streaming(self) -> bool:
        return True

    def count_tokens(self, prompt: str) -> int:
        return count_tokens(prompt, self._model)

    def get_completion(
        self,
        messages: List[Dict] | None = None,
        functions: List[Dict] | None = None,
        **kwargs,
    ) -> (str, Dict | None):
        if messages is None:
            messages = []
        if functions is None:
            functions = []

        if self._cache:
            input_parameters = {"messages": messages, "functions": functions, **kwargs}
            cache_item = self._cache.get_cache_item(input_parameters=input_parameters)
            if cache_item:
                return cache_item

        result = get_completion(
            connection_params=self._connection_params,
            messages=messages,
            functions=functions,
            model=self._model,
            **kwargs,
        )

        if self._cache:
            self._cache.save_cache_item(
                input_parameters=input_parameters, response=result
            )

        return result

    def get_streaming_completion(
        self,
        messages: List[Dict] | None = None,
        functions: List[Dict] | None = None,
        content_stream_callback: Callable[[str], None] = None,
        function_stream_callback: Callable[[str], None] = None,
        **kwargs,
    ) -> (str, Dict | None):
        if messages is None:
            messages = []
        if functions is None:
            functions = []

        if self._cache:
            input_parameters = {"messages": messages, "functions": functions, **kwargs}
            cache_item = self._cache.get_cache_item(input_parameters=input_parameters)
            if cache_item:
                return cache_item

        result = get_streaming_completion(
            connection_params=self._connection_params,
            messages=messages,
            functions=functions,
            model=self._model,
            content_stream_callback=content_stream_callback,
            function_stream_callback=function_stream_callback,
            **kwargs,
        )

        if self._cache:
            self._cache.save_cache_item(
                input_parameters=input_parameters, response=result
            )

        return result


================================================
FILE: bondai/models/openai/openai_models.py
================================================
from enum import Enum


class OpenAIConnectionType(Enum):
    AZURE: str = "azure"
    OPENAI: str = "openai"


class OpenAIModelType(Enum):
    LLM = "MODEL_TYPE_LLM"
    EMBEDDING = "MODEL_TYPE_EMBEDDING"


class OpenAIModelFamilyType(Enum):
    GPT35 = "MODEL_FAMILY_GPT_35"
    GPT4 = "MODEL_FAMILY_GPT_4"


class OpenAIModelNames(Enum):
    GPT4 = "gpt-4"
    GPT4_0613 = "gpt-4-0613"
    GPT4_32K = "gpt-4-32k"
    GPT4_TURBO_1106 = "gpt-4-1106-preview"
    GPT35_TURBO = "gpt-3.5-turbo"
    GPT35_TURBO_16K = "gpt-3.5-turbo-16k"
    GPT35_TURBO_0613 = "gpt-3.5-turbo-0613"
    GPT35_TURBO_16K_0613 = "gpt-3.5-turbo-16k-0613"
    TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002"


ModelConfig = {
    OpenAIModelNames.GPT4.value: {
        "model_type": OpenAIModelType.LLM,
        "family": OpenAIModelFamilyType.GPT4,
        "max_tokens": 8191,
        "input_price_per_token": 0.00003,
        "output_price_per_token": 0.00006,
    },
    OpenAIModelNames.GPT4_0613.value: {
        "model_type": OpenAIModelType.LLM,
        "family": OpenAIModelFamilyType.GPT4,
        "max_tokens": 8191,
        "input_price_per_token": 0.00003,
        "output_price_per_token": 0.00006,
    },
    OpenAIModelNames.GPT4_32K.value: {
        "model_type": OpenAIModelType.LLM,
        "family": OpenAIModelFamilyType.GPT4,
        "max_tokens": 32767,
        "input_price_per_token": 0.00006,
        "output_price_per_token": 0.00012,
    },
    OpenAIModelNames.GPT4_TURBO_1106.value: {
        "model_type": OpenAIModelType.LLM,
        "family": OpenAIModelFamilyType.GPT4,
        "max_tokens": 128000,
        "input_price_per_token": 0.00001,
        "output_price_per_token": 0.00003,
    },
    OpenAIModelNames.GPT35_TURBO.value: {
        "model_type": OpenAIModelType.LLM,
        "family": OpenAIModelFamilyType.GPT35,
        "max_tokens": 4095,
        "input_price_per_token": 0.0000015,
        "output_price_per_token": 0.000002,
    },
    OpenAIModelNames.GPT35_TURBO_16K.value: {
        "model_type": OpenAIModelType.LLM,
        "family": OpenAIModelFamilyType.GPT35,
        "max_tokens": 16383,
        "input_price_per_token": 0.000003,
        "output_price_per_token": 0.000004,
    },
    OpenAIModelNames.GPT35_TURBO_0613.value: {
        "model_type": OpenAIModelType.LLM,
        "family": OpenAIModelFamilyType.GPT35,
        "max_tokens": 4095,
        "input_price_per_token": 0.0000015,
        "output_price_per_token": 0.000002,
    },
    OpenAIModelNames.GPT35_TURBO_16K_0613.value: {
        "model_type": OpenAIModelType.LLM,
        "family": OpenAIModelFamilyType.GPT35,
        "max_tokens": 16383,
        "input_price_per_token": 0.000003,
        "output_price_per_token": 0.000004,
    },
    OpenAIModelNames.TEXT_EMBEDDING_ADA_002.value: {
        "model_type": OpenAIModelType.EMBEDDING,
        "max_tokens": 8190,
        "price_per_token": 0.000000
Download .txt
gitextract_tr4w9k_j/

├── .github/
│   └── workflows/
│       ├── deploy-website.yaml
│       └── deploy.yaml
├── .gitignore
├── .pre-commit-config.yaml
├── CONTRIBUTING.md
├── LICENSE
├── MANIFEST.in
├── README.md
├── bondai/
│   ├── __init__.py
│   ├── agents/
│   │   ├── __init__.py
│   │   ├── agent.py
│   │   ├── compression/
│   │   │   ├── __init__.py
│   │   │   ├── conversation_summarizer.py
│   │   │   ├── message_summarizer.py
│   │   │   └── prompts/
│   │   │       ├── conversation_summarizer_prompt_template.md
│   │   │       └── message_summarizer_prompt_template.md
│   │   ├── conversation_member.py
│   │   ├── conversational_agent.py
│   │   ├── group_chat/
│   │   │   ├── __init__.py
│   │   │   ├── group_conversation.py
│   │   │   ├── group_conversation_config.py
│   │   │   └── user_proxy.py
│   │   ├── messages.py
│   │   ├── prompts/
│   │   │   ├── __init__.py
│   │   │   ├── agent_message_prompt_template.md
│   │   │   ├── conversational_agent_system_prompt_template.md
│   │   │   ├── default_persona.py
│   │   │   └── react_agent_system_prompt_template.md
│   │   └── util.py
│   ├── api/
│   │   ├── __init__.py
│   │   ├── agent_wrapper.py
│   │   ├── api_error.py
│   │   ├── api_user_proxy.py
│   │   ├── client.py
│   │   ├── routes.py
│   │   ├── server.py
│   │   └── settings.py
│   ├── cli/
│   │   ├── __init__.py
│   │   ├── cli.py
│   │   ├── default_tools.py
│   │   └── personas/
│   │       ├── __init__.py
│   │       └── user_liaison_agent.py
│   ├── main.py
│   ├── memory/
│   │   ├── __init__.py
│   │   ├── archival/
│   │   │   ├── __init__.py
│   │   │   ├── datasources.py
│   │   │   └── tools.py
│   │   ├── conversation/
│   │   │   ├── __init__.py
│   │   │   ├── datasources.py
│   │   │   └── tools.py
│   │   ├── core/
│   │   │   ├── __init__.py
│   │   │   ├── datasources.py
│   │   │   └── tools.py
│   │   ├── memory_manager.py
│   │   └── prompts/
│   │       └── default_prompt_template.md
│   ├── models/
│   │   ├── __init__.py
│   │   ├── embedding_model.py
│   │   ├── llm.py
│   │   └── openai/
│   │       ├── __init__.py
│   │       ├── default_openai_connection_params.py
│   │       ├── env_vars.py
│   │       ├── openai_connection_params.py
│   │       ├── openai_embedding_model.py
│   │       ├── openai_llm.py
│   │       ├── openai_models.py
│   │       └── openai_wrapper.py
│   ├── prompt/
│   │   ├── __init__.py
│   │   ├── default_prompt_builder.py
│   │   ├── default_prompt_template.md
│   │   ├── jinja_prompt_builder.py
│   │   └── prompt_builder.py
│   ├── tools/
│   │   ├── __init__.py
│   │   ├── agent_tool.py
│   │   ├── alpaca_markets/
│   │   │   ├── __init__.py
│   │   │   ├── create_order.py
│   │   │   ├── env_vars.py
│   │   │   ├── get_account.py
│   │   │   ├── list_positions.py
│   │   │   └── response_formatter.py
│   │   ├── bland_ai/
│   │   │   ├── __init__.py
│   │   │   └── bland_ai_tools.py
│   │   ├── conversational/
│   │   │   ├── __init__.py
│   │   │   └── conversational_tools.py
│   │   ├── dalle_tool.py
│   │   ├── database/
│   │   │   ├── __init__.py
│   │   │   └── db_query.py
│   │   ├── file/
│   │   │   ├── __init__.py
│   │   │   ├── file_query.py
│   │   │   ├── file_read.py
│   │   │   └── file_write.py
│   │   ├── gmail/
│   │   │   ├── __init__.py
│   │   │   ├── list_emails.py
│   │   │   └── query_emails.py
│   │   ├── langchain_tool.py
│   │   ├── python_repl_tool.py
│   │   ├── response_query.py
│   │   ├── search/
│   │   │   ├── __init__.py
│   │   │   ├── duck_duck_go_search.py
│   │   │   └── google_search.py
│   │   ├── shell_tool.py
│   │   ├── task_completed_tool.py
│   │   ├── tool.py
│   │   ├── vision/
│   │   │   ├── __init__.py
│   │   │   └── image_analysis_tool.py
│   │   └── website/
│   │       ├── __init__.py
│   │       ├── download_file.py
│   │       ├── extract_hyperlinks.py
│   │       ├── html_query.py
│   │       └── query.py
│   └── util/
│       ├── __init__.py
│       ├── caching/
│       │   ├── __init__.py
│       │   └── llm_cache.py
│       ├── document_parser.py
│       ├── event_mixin.py
│       ├── misc.py
│       ├── model_logger.py
│       ├── runnable.py
│       ├── semantic_search.py
│       └── web.py
├── docker/
│   ├── Dockerfile
│   └── docker-compose.yml
├── requirements.txt
├── sample.env
├── scripts/
│   └── bondai
├── setup.py
├── tests/
│   ├── api-client/
│   │   └── test_api_client.py
│   ├── conversational/
│   │   ├── hierarchical_conversation.py
│   │   └── single_agent.py
│   ├── debug/
│   │   └── test_error.py
│   ├── getting-started/
│   │   └── example-1.py
│   ├── memory/
│   │   ├── __init__.py
│   │   ├── single_agent_with_memory.py
│   │   └── util.py
│   └── vision/
│       └── single_agent_with_vision.py
└── website/
    ├── .gitignore
    ├── README.md
    ├── babel.config.js
    ├── docs/
    │   ├── agent-memory/
    │   │   ├── agent-memory.md
    │   │   ├── archival-memory.md
    │   │   ├── conversation-memory.md
    │   │   ├── core-memory.md
    │   │   └── memory-manager.md
    │   ├── agents/
    │   │   ├── agents.md
    │   │   ├── conversational-agent.md
    │   │   └── react-agent.md
    │   ├── api-spec/
    │   │   ├── _category_.json
    │   │   ├── add-agent-tool.md
    │   │   ├── api-client.md
    │   │   ├── create-agent.md
    │   │   ├── get-agent.md
    │   │   ├── get-tools.md
    │   │   ├── getting-started.md
    │   │   ├── list-agents.md
    │   │   ├── remove-agent-tool.md
    │   │   ├── send-message.md
    │   │   ├── stop-agent.md
    │   │   └── ws-events.md
    │   ├── azure.md
    │   ├── cli.md
    │   ├── docker.md
    │   ├── examples/
    │   │   ├── _category_.json
    │   │   ├── api-client.md
    │   │   ├── code-interpreter.md
    │   │   ├── home-automation.md
    │   │   ├── investor-agent.md
    │   │   └── online-research/
    │   │       ├── metformin-research.md
    │   │       └── online-research.md
    │   ├── getting-started.md
    │   ├── intro.md
    │   ├── multi-agent-systems/
    │   │   ├── examples.md
    │   │   ├── group-conversation.md
    │   │   ├── multi-agent-systems.md
    │   │   └── team-conversation-config.md
    │   └── tools/
    │       ├── _category_.json
    │       ├── custom-tool.md
    │       └── getting-started.md
    ├── docusaurus.config.js
    ├── package.json
    ├── sidebars.js
    ├── src/
    │   ├── components/
    │   │   └── HomepageFeatures/
    │   │       ├── index.js
    │   │       └── styles.module.css
    │   ├── css/
    │   │   └── custom.css
    │   └── pages/
    │       ├── index.js
    │       ├── index.module.css
    │       └── markdown-page.md
    └── static/
        └── .nojekyll
Download .txt
SYMBOL INDEX (537 symbols across 78 files)

FILE: bondai/agents/agent.py
  class FinalAnswerParameters (line 44) | class FinalAnswerParameters(BaseModel):
  class FinalAnswerTool (line 48) | class FinalAnswerTool(Tool):
    method __init__ (line 49) | def __init__(self):
    method run (line 56) | def run(self, results: str) -> Tuple[str, bool]:
  class Agent (line 60) | class Agent(EventMixin, Runnable):
    method __init__ (line 61) | def __init__(
    method id (line 138) | def id(self) -> str:
    method status (line 142) | def status(self) -> AgentStatus:
    method tools (line 146) | def tools(self) -> List[Tool]:
    method clear_messages (line 149) | def clear_messages(self):
    method add_tool (line 156) | def add_tool(self, tool: Tool):
    method remove_tool (line 160) | def remove_tool(self, tool_name: str):
    method to_dict (line 163) | def to_dict(self) -> Dict:
    method save_state (line 166) | def save_state(self) -> Dict:
    method load_state (line 177) | def load_state(self, state: Dict):
    method _is_context_pressure_too_high (line 185) | def _is_context_pressure_too_high(
    method _get_llm_response (line 197) | def _get_llm_response(
    method run (line 262) | def run(
    method run_async (line 282) | def run_async(
    method stop (line 295) | def stop(self, timeout=10):
    method _run_tool_loop (line 303) | def _run_tool_loop(
    method _build_llm_context (line 453) | def _build_llm_context(
    method _compress_llm_context (line 504) | def _compress_llm_context(
    method _handle_llm_function (line 596) | def _handle_llm_function(self, tool_message: ToolUsageMessage, tools: ...

FILE: bondai/agents/compression/conversation_summarizer.py
  function summarize_conversation (line 16) | def summarize_conversation(

FILE: bondai/agents/compression/message_summarizer.py
  function summarize_messages (line 15) | def summarize_messages(
  function _summarize_message (line 68) | def _summarize_message(

FILE: bondai/agents/conversation_member.py
  class ConversationMemberEventNames (line 15) | class ConversationMemberEventNames(Enum):
  class ConversationMember (line 22) | class ConversationMember(ABC):
    method __init__ (line 23) | def __init__(
    method id (line 36) | def id(self) -> str:
    method name (line 40) | def name(self) -> str:
    method persona (line 44) | def persona(self) -> str:
    method persona_summary (line 48) | def persona_summary(self) -> str:
    method messages (line 52) | def messages(self) -> AgentMessageList:
    method send_message (line 56) | def send_message(
    method send_message_async (line 67) | def send_message_async(
    method clear_messages (line 78) | def clear_messages(self):

FILE: bondai/agents/conversational_agent.py
  class ConversationalAgent (line 46) | class ConversationalAgent(Agent, ConversationMember):
    method __init__ (line 47) | def __init__(
    method instructions (line 125) | def instructions(self) -> str:
    method send_message_async (line 128) | def send_message_async(
    method send_message (line 156) | def send_message(
    method to_dict (line 318) | def to_dict(self) -> Dict:
    method save_state (line 335) | def save_state(self, file_path: str = None) -> Dict:
    method from_dict (line 347) | def from_dict(

FILE: bondai/agents/group_chat/group_conversation.py
  class GroupConversation (line 22) | class GroupConversation(EventMixin, Runnable):
    method __init__ (line 23) | def __init__(
    method id (line 59) | def id(self) -> str:
    method status (line 63) | def status(self) -> AgentStatus:
    method members (line 67) | def members(self) -> List[ConversationMember]:
    method remove_messages_after (line 70) | def remove_messages_after(self, timestamp: datetime, inclusive: bool =...
    method _get_member (line 75) | def _get_member(self, member_name: str) -> ConversationMember:
    method _init_member_events (line 80) | def _init_member_events(self):
    method _on_member_message_received (line 95) | def _on_member_message_received(
    method _on_member_message_error (line 103) | def _on_member_message_error(
    method _on_member_message_completed (line 110) | def _on_member_message_completed(
    method _on_member_exited (line 118) | def _on_member_exited(
    method save_state (line 125) | def save_state(self) -> Dict:
    method load_state (line 137) | def load_state(self, state: Dict):
    method send_message_async (line 146) | def send_message_async(
    method send_message (line 165) | def send_message(
    method reset_memory (line 273) | def reset_memory(self):

FILE: bondai/agents/group_chat/group_conversation_config.py
  class BaseGroupConversationConfig (line 6) | class BaseGroupConversationConfig(ABC):
    method members (line 9) | def members(self) -> List[ConversationMember]:
    method get_reachable_members (line 13) | def get_reachable_members(
  class GroupConversationConfig (line 19) | class GroupConversationConfig(ABC):
    method __init__ (line 20) | def __init__(self, members: List[ConversationMember]):
    method _members (line 24) | def _members(self) -> List[ConversationMember]:
    method get_reachable_members (line 27) | def get_reachable_members(
  class TeamConversationConfig (line 47) | class TeamConversationConfig(BaseGroupConversationConfig):
    method __init__ (line 48) | def __init__(self, *args: List[ConversationMember]):
    method members (line 55) | def members(self) -> List[ConversationMember]:
    method get_reachable_members (line 58) | def get_reachable_members(
  class TableConversationConfig (line 85) | class TableConversationConfig(BaseGroupConversationConfig):
    method __init__ (line 86) | def __init__(self, member_table: Dict):
    method members (line 90) | def members(self) -> List[ConversationMember]:
    method get_reachable_members (line 93) | def get_reachable_members(
  class CompositeConversationConfig (line 108) | class CompositeConversationConfig(BaseGroupConversationConfig):
    method __init__ (line 109) | def __init__(self, *conversation_configs: List[BaseGroupConversationCo...
    method members (line 115) | def members(self) -> List[ConversationMember]:
    method get_reachable_members (line 118) | def get_reachable_members(

FILE: bondai/agents/group_chat/user_proxy.py
  class UserProxy (line 17) | class UserProxy(EventMixin, ConversationMember):
    method __init__ (line 18) | def __init__(
    method send_message (line 42) | def send_message(

FILE: bondai/agents/messages.py
  class AgentMessage (line 15) | class AgentMessage(ABC):
  class SystemMessage (line 22) | class SystemMessage(AgentMessage):
  class SummaryMessage (line 28) | class SummaryMessage(AgentMessage):
  class ConversationMessage (line 35) | class ConversationMessage(AgentMessage):
  class ToolUsageMessage (line 50) | class ToolUsageMessage(AgentMessage):
  function custom_serialization (line 63) | def custom_serialization(value):
  function message_to_dict (line 76) | def message_to_dict(message: AgentMessage) -> Dict:
  class AgentMessageList (line 95) | class AgentMessageList:
    method __init__ (line 96) | def __init__(self, messages: List[AgentMessage] | None = None):
    method add (line 103) | def add(self, item: AgentMessage):
    method remove (line 109) | def remove(self, item: AgentMessage):
    method remove_after (line 114) | def remove_after(self, timestamp: datetime, inclusive: bool = True):
    method clear (line 121) | def clear(self):
    method __getitem__ (line 125) | def __getitem__(self, index: int):
    method __add__ (line 128) | def __add__(self, other: List[AgentMessage] | "AgentMessageList"):
    method __iter__ (line 141) | def __iter__(self):
    method __contains__ (line 144) | def __contains__(self, item):
    method __len__ (line 147) | def __len__(self):
    method to_dict (line 150) | def to_dict(self) -> List[Dict]:
    method from_dict (line 157) | def from_dict(cls, data: List[Dict]) -> "AgentMessageList":

FILE: bondai/agents/util.py
  class AgentStatus (line 11) | class AgentStatus(Enum):
  class AgentException (line 16) | class AgentException(Exception):
  class BudgetExceededException (line 20) | class BudgetExceededException(AgentException):
  class MaxStepsExceededException (line 24) | class MaxStepsExceededException(AgentException):
  class ContextLengthExceededException (line 28) | class ContextLengthExceededException(AgentException):
  class AgentEventNames (line 32) | class AgentEventNames(Enum):
  function count_request_tokens (line 41) | def count_request_tokens(
  function execute_tool (line 53) | def execute_tool(
  function validate_tool_params (line 86) | def validate_tool_params(func, params):
  function tool_supports_unpacking (line 108) | def tool_supports_unpacking(func):
  function parse_response_content_message (line 115) | def parse_response_content_message(response: str) -> (str, str):
  function format_llm_messages (line 137) | def format_llm_messages(

FILE: bondai/api/agent_wrapper.py
  class AgentWrapper (line 5) | class AgentWrapper:
    method __init__ (line 6) | def __init__(self, uuid, conversational_agent, task_agent, tools):
    method find_tool (line 12) | def find_tool(self, tool_name):
    method get_previous_steps (line 18) | def get_previous_steps(self):
    method get_agent (line 21) | def get_agent(self):
    method start_agent (line 31) | def start_agent(self, task=None, task_budget=None, max_steps=None):
    method stop_agent (line 38) | def stop_agent(self):
    method get_agent_tool_options (line 42) | def get_agent_tool_options(self):
    method get_agent_tools (line 45) | def get_agent_tools(self):
    method add_tool (line 48) | def add_tool(self, tool_name):
    method remove_tool (line 59) | def remove_tool(self, tool_name):

FILE: bondai/api/api_error.py
  class BondAIAPIError (line 1) | class BondAIAPIError(Exception):

FILE: bondai/api/api_user_proxy.py
  class APIUserProxy (line 16) | class APIUserProxy(EventMixin, ConversationMember):
    method __init__ (line 17) | def __init__(self, socketio: SocketIO, persona: str | None = None):
    method send_message (line 34) | def send_message(

FILE: bondai/api/client.py
  class BondAIAPIClient (line 8) | class BondAIAPIClient(EventMixin):
    method __init__ (line 9) | def __init__(self, base_url="http://127.0.0.1:2663"):
    method connect_ws (line 28) | def connect_ws(self):
    method disconnect_ws (line 56) | def disconnect_ws(self):
    method is_ws_connected (line 61) | def is_ws_connected(self):
    method send_ws_message (line 64) | def send_ws_message(self, event, data):
    method _request (line 72) | def _request(self, method, endpoint, data=None):
    method create_agent (line 89) | def create_agent(self):
    method send_message (line 92) | def send_message(self, agent_id, message):
    method list_agents (line 96) | def list_agents(self):
    method get_agent (line 99) | def get_agent(self, agent_id):
    method get_agent_tool_options (line 102) | def get_agent_tool_options(self, agent_id):
    method get_agent_tools (line 105) | def get_agent_tools(self, agent_id):
    method add_agent_tool (line 108) | def add_agent_tool(self, agent_id, tool_name):
    method remove_agent_tool (line 112) | def remove_agent_tool(self, agent_id, tool_name):
    method stop_agent (line 115) | def stop_agent(self, agent_id):
    method get_settings (line 118) | def get_settings(self):
    method set_settings (line 121) | def set_settings(self, settings):

FILE: bondai/api/routes.py
  function setup_routes (line 9) | def setup_routes(server, tool_options: List[Tool] = []):

FILE: bondai/api/server.py
  class BondAIAPIError (line 24) | class BondAIAPIError(Exception):
  class AgentRegistration (line 32) | class AgentRegistration:
  class BondAIAPIServer (line 39) | class BondAIAPIServer:
    method __init__ (line 40) | def __init__(self, agent_builder: Callable, port: int = 2663):
    method app (line 53) | def app(self):
    method agent_registrations (line 57) | def agent_registrations(self) -> List[AgentRegistration]:
    method get_agent_by_id (line 60) | def get_agent_by_id(self, agent_id: str) -> ConversationalAgent | None:
    method register_new_agent (line 72) | def register_new_agent(self) -> AgentRegistration:
    method _handle_client_message (line 89) | def _handle_client_message(self, message):
    method _send_message (line 114) | def _send_message(
    method _setup_conversation_events (line 121) | def _setup_conversation_events(self, conversational_agent: Conversatio...
    method _setup_execution_events (line 172) | def _setup_execution_events(
    method run (line 200) | def run(self):
    method shutdown (line 211) | def shutdown(self):

FILE: bondai/api/settings.py
  function get_settings (line 151) | def get_settings():
  function set_settings (line 167) | def set_settings(settings):
  class SettingsResource (line 193) | class SettingsResource(Resource):
    method get (line 194) | def get(self):
    method post (line 197) | def post(self):

FILE: bondai/cli/cli.py
  function build_agents (line 84) | def build_agents(llm: LLM) -> GroupConversation:
  function run_cli (line 120) | def run_cli():

FILE: bondai/cli/default_tools.py
  function load_all_tools (line 25) | def load_all_tools():

FILE: bondai/main.py
  function main (line 6) | def main():

FILE: bondai/memory/archival/datasources.py
  class ArchivalMemoryDataSource (line 11) | class ArchivalMemoryDataSource(ABC):
    method size (line 14) | def size(self) -> int:
    method insert (line 18) | def insert(self, content: str):
    method insert_bulk (line 22) | def insert_bulk(self, content: List[str]):
    method search (line 26) | def search(self, query: str, page: int = 0) -> List[str]:
    method clear (line 30) | def clear(self):
  class PersistentArchivalMemoryDataSource (line 34) | class PersistentArchivalMemoryDataSource(ArchivalMemoryDataSource):
    method __init__ (line 35) | def __init__(
    method size (line 54) | def size(self) -> int:
    method _load_data (line 57) | def _load_data(self):
    method _save_data (line 64) | def _save_data(self):
    method _rebuild_index (line 69) | def _rebuild_index(self):
    method insert (line 77) | def insert(self, content: str):
    method insert_bulk (line 83) | def insert_bulk(self, content: List[str]):
    method search (line 90) | def search(self, query: str, page: int = 0) -> List[str]:
    method clear (line 100) | def clear(self):
  class InMemoryArchivalMemoryDataSource (line 106) | class InMemoryArchivalMemoryDataSource(ArchivalMemoryDataSource):
    method __init__ (line 107) | def __init__(self, embedding_model: EmbeddingModel | None = None, page...
    method size (line 120) | def size(self) -> int:
    method insert (line 123) | def insert(self, content: str):
    method insert_bulk (line 131) | def insert_bulk(self, content: List[str]):
    method _rebuild_index (line 142) | def _rebuild_index(self):
    method search (line 148) | def search(self, query: str, page: int = 0) -> List[str]:
    method clear (line 167) | def clear(self):

FILE: bondai/memory/archival/tools.py
  class ArchivalMemoryInsertToolParameters (line 13) | class ArchivalMemoryInsertToolParameters(BaseModel):
  class ArchivalMemoryInsertTool (line 17) | class ArchivalMemoryInsertTool(Tool):
    method __init__ (line 18) | def __init__(self, datasource: ArchivalMemoryDataSource):
    method run (line 26) | def run(self, content: str):
  class ArchivalMemorySearchToolParameters (line 38) | class ArchivalMemorySearchToolParameters(BaseModel):
  class ArchivalMemorySearchTool (line 43) | class ArchivalMemorySearchTool(Tool):
    method __init__ (line 44) | def __init__(self, datasource: ArchivalMemoryDataSource):
    method run (line 52) | def run(self, query: str, page: int = 0) -> str:

FILE: bondai/memory/conversation/datasources.py
  function format_messages (line 15) | def format_messages(messages: List[AgentMessage]) -> str:
  class ConversationMemoryDataSource (line 27) | class ConversationMemoryDataSource(ABC):
    method messages (line 30) | def messages(self) -> List[AgentMessage]:
    method add (line 34) | def add(self, message: AgentMessage):
    method remove (line 38) | def remove(self, message: AgentMessage):
    method remove_after (line 41) | def remove_after(self, timestamp: datetime, inclusive: bool = True):
    method search (line 45) | def search(
    method clear (line 55) | def clear(self):
  class InMemoryConversationMemoryDataSource (line 59) | class InMemoryConversationMemoryDataSource(ConversationMemoryDataSource):
    method __init__ (line 60) | def __init__(self, page_size=10):
    method messages (line 65) | def messages(self) -> List[AgentMessage]:
    method add (line 68) | def add(self, message: AgentMessage):
    method remove (line 71) | def remove(self, message: AgentMessage):
    method remove_after (line 74) | def remove_after(self, timestamp: datetime, inclusive: bool = True):
    method search (line 77) | def search(
    method clear (line 113) | def clear(self):
  class PersistentConversationMemoryDataSource (line 117) | class PersistentConversationMemoryDataSource(InMemoryConversationMemoryD...
    method __init__ (line 118) | def __init__(
    method _load_data (line 125) | def _load_data(self):
    method _save_data (line 132) | def _save_data(self):
    method add (line 137) | def add(self, message: str) -> None:
    method remove (line 141) | def remove(self, message: str) -> None:
    method remove_after (line 145) | def remove_after(self, timestamp: datetime, inclusive: bool = True):
    method clear (line 149) | def clear(self):

FILE: bondai/memory/conversation/tools.py
  class ConversationMemorySearchParameters (line 15) | class ConversationMemorySearchParameters(BaseModel):
  class ConversationMemorySearchTool (line 20) | class ConversationMemorySearchTool(Tool):
    method __init__ (line 21) | def __init__(self, datasource: ConversationMemoryDataSource):
    method run (line 29) | def run(self, query: str, page: int = 0) -> str:
  class ConversationMemorySearchDateParameters (line 42) | class ConversationMemorySearchDateParameters(BaseModel):
  class ConversationMemorySearchDateTool (line 47) | class ConversationMemorySearchDateTool(Tool):
    method __init__ (line 48) | def __init__(self, datasource: ConversationMemoryDataSource):
    method run (line 56) | def run(self, start_date: str, end_date: str, page: int = 0) -> str:

FILE: bondai/memory/core/datasources.py
  class CoreMemoryDataSource (line 12) | class CoreMemoryDataSource(ABC):
    method sections (line 15) | def sections(self) -> List[str]:
    method get (line 19) | def get(self, section: str) -> str:
    method set (line 23) | def set(self, section: str, content: str) -> None:
  class PersistentCoreMemoryDataSource (line 27) | class PersistentCoreMemoryDataSource(CoreMemoryDataSource):
    method __init__ (line 28) | def __init__(
    method _load_data (line 40) | def _load_data(self, initial_sections: Dict[str, str] = None):
    method _save_data (line 47) | def _save_data(self):
    method sections (line 53) | def sections(self) -> List[str]:
    method get (line 56) | def get(self, section: str) -> str:
    method set (line 59) | def set(self, section: str, content: str) -> None:
  class InMemoryCoreMemoryDataSource (line 68) | class InMemoryCoreMemoryDataSource(CoreMemoryDataSource):
    method __init__ (line 69) | def __init__(
    method sections (line 78) | def sections(self) -> List[str]:
    method get (line 81) | def get(self, section: str) -> str:
    method set (line 84) | def set(self, section: str, content: str) -> None:

FILE: bondai/memory/core/tools.py
  class CoreMemoryAppendParameters (line 15) | class CoreMemoryAppendParameters(BaseModel):
  class CoreMemoryAppendTool (line 20) | class CoreMemoryAppendTool(Tool):
    method __init__ (line 21) | def __init__(self, datasource: CoreMemoryDataSource):
    method run (line 29) | def run(self, section: str, content: str):
  class CoreMemoryReplaceParameters (line 47) | class CoreMemoryReplaceParameters(BaseModel):
  class CoreMemoryReplaceTool (line 53) | class CoreMemoryReplaceTool(Tool):
    method __init__ (line 54) | def __init__(self, datasource: CoreMemoryDataSource):
    method run (line 62) | def run(self, section: str, old_content: str, new_content: str):

FILE: bondai/memory/memory_manager.py
  class MemoryManager (line 30) | class MemoryManager:
    method __init__ (line 31) | def __init__(
    method core_memory (line 46) | def core_memory(self) -> CoreMemoryDataSource:
    method conversation_memory (line 50) | def conversation_memory(self) -> ConversationMemoryDataSource:
    method archival_memory (line 54) | def archival_memory(self) -> ArchivalMemoryDataSource:
    method tools (line 58) | def tools(self):
    method __call__ (line 85) | def __call__(self):
    method render_prompt_section (line 88) | def render_prompt_section(self) -> str:
  class PersistentMemoryManager (line 96) | class PersistentMemoryManager(MemoryManager):
    method __init__ (line 97) | def __init__(
  class ConversationalMemoryManager (line 111) | class ConversationalMemoryManager(MemoryManager):
    method __init__ (line 112) | def __init__(

FILE: bondai/models/embedding_model.py
  class EmbeddingModel (line 5) | class EmbeddingModel(ABC):
    method max_tokens (line 8) | def max_tokens() -> int:
    method embedding_size (line 13) | def embedding_size() -> int:
    method create_embedding (line 17) | def create_embedding(prompt: str) -> List[float] | List[List[float]]:
    method count_tokens (line 21) | def count_tokens(prompt: str) -> int:

FILE: bondai/models/llm.py
  class LLM (line 5) | class LLM(ABC):
    method max_tokens (line 8) | def max_tokens() -> int:
    method supports_streaming (line 13) | def supports_streaming() -> bool:
    method get_completion (line 17) | def get_completion(
    method get_streaming_completion (line 25) | def get_streaming_completion(
    method count_tokens (line 35) | def count_tokens(prompt: str) -> int:

FILE: bondai/models/openai/default_openai_connection_params.py
  function configure_openai_connection (line 11) | def configure_openai_connection(api_key: str):
  function configure_azure_connection (line 50) | def configure_azure_connection(

FILE: bondai/models/openai/openai_connection_params.py
  class OpenAIConnectionParams (line 4) | class OpenAIConnectionParams:
    method __init__ (line 5) | def __init__(
    method connection_type (line 38) | def connection_type(self):
    method api_key (line 42) | def api_key(self):
    method api_version (line 46) | def api_version(self):
    method azure_endpoint (line 50) | def azure_endpoint(self):
    method azure_deployment (line 54) | def azure_deployment(self):
    method configure_openai_connection (line 57) | def configure_openai_connection(self, api_key: str):
    method configure_azure_connection (line 66) | def configure_azure_connection(
    method to_dict (line 86) | def to_dict(self):

FILE: bondai/models/openai/openai_embedding_model.py
  class OpenAIEmbeddingModel (line 9) | class OpenAIEmbeddingModel(EmbeddingModel):
    method __init__ (line 10) | def __init__(
    method embedding_size (line 29) | def embedding_size(self) -> int:
    method max_tokens (line 33) | def max_tokens(self) -> int:
    method create_embedding (line 36) | def create_embedding(self, prompt: str) -> List[float] | List[List[flo...
    method count_tokens (line 41) | def count_tokens(self, prompt: str) -> int:

FILE: bondai/models/openai/openai_llm.py
  class OpenAILLM (line 22) | class OpenAILLM(LLM):
    method __init__ (line 23) | def __init__(
    method max_tokens (line 50) | def max_tokens(self) -> int:
    method supports_streaming (line 54) | def supports_streaming(self) -> bool:
    method count_tokens (line 57) | def count_tokens(self, prompt: str) -> int:
    method get_completion (line 60) | def get_completion(
    method get_streaming_completion (line 92) | def get_streaming_completion(

FILE: bondai/models/openai/openai_models.py
  class OpenAIConnectionType (line 4) | class OpenAIConnectionType(Enum):
  class OpenAIModelType (line 9) | class OpenAIModelType(Enum):
  class OpenAIModelFamilyType (line 14) | class OpenAIModelFamilyType(Enum):
  class OpenAIModelNames (line 19) | class OpenAIModelNames(Enum):

FILE: bondai/models/openai/openai_wrapper.py
  function enable_logging (line 19) | def enable_logging(model_logger: ModelLogger):
  function disable_logging (line 24) | def disable_logging():
  function get_gpt_tokens (line 29) | def get_gpt_tokens() -> int:
  function get_embedding_tokens (line 33) | def get_embedding_tokens() -> int:
  function get_gpt_costs (line 37) | def get_gpt_costs() -> float:
  function get_embedding_costs (line 41) | def get_embedding_costs() -> float:
  function get_total_cost (line 45) | def get_total_cost() -> float:
  function reset_total_cost (line 49) | def reset_total_cost():
  function calculate_cost (line 57) | def calculate_cost(model_name: str, usage: Dict):
  function get_max_tokens (line 76) | def get_max_tokens(model: str) -> int:
  function count_tokens (line 80) | def count_tokens(prompt: str, model: Dict) -> int:
  function create_embedding (line 85) | def create_embedding(
  function get_completion (line 128) | def get_completion(
  function get_streaming_completion (line 177) | def get_streaming_completion(
  function _log_completion (line 260) | def _log_completion(
  function _get_completion (line 289) | def _get_completion(

FILE: bondai/prompt/default_prompt_builder.py
  class DefaultPromptBuilder (line 4) | class DefaultPromptBuilder(PromptBuilder):
    method __init__ (line 5) | def __init__(self, prompt_template: str):
    method build_prompt (line 8) | def build_prompt(self, **kwargs) -> str:

FILE: bondai/prompt/jinja_prompt_builder.py
  class JinjaPromptBuilder (line 7) | class JinjaPromptBuilder(PromptBuilder):
    method __init__ (line 8) | def __init__(self, prompt_template: str):
    method _apply_prompt_template (line 11) | def _apply_prompt_template(self, template_string: str, **kwargs) -> str:
    method build_prompt (line 15) | def build_prompt(self, **kwargs) -> str:

FILE: bondai/prompt/prompt_builder.py
  class PromptBuilder (line 5) | class PromptBuilder(ABC):
    method __call__ (line 6) | def __call__(self, **kwargs: Dict[str, Any]) -> str:
    method build_prompt (line 10) | def build_prompt(self, **kwargs: Dict[str, Any]) -> str:
    method _apply_prompt_template (line 13) | def _apply_prompt_template(prompt_template: str, **kwargs) -> str:

FILE: bondai/tools/agent_tool.py
  class Parameters (line 14) | class Parameters(BaseModel):
  class AgentTool (line 18) | class AgentTool(Tool):
    method __init__ (line 19) | def __init__(self, agent):
    method run (line 25) | def run(self, task_description: str) -> str:
    method stop (line 39) | def stop(self):

FILE: bondai/tools/alpaca_markets/create_order.py
  class Parameters (line 22) | class Parameters(BaseModel):
  class CreateOrderTool (line 32) | class CreateOrderTool(Tool):
    method __init__ (line 33) | def __init__(
    method run (line 43) | def run(self, arguments):

FILE: bondai/tools/alpaca_markets/get_account.py
  class GetAccountTool (line 11) | class GetAccountTool(Tool):
    method __init__ (line 12) | def __init__(
    method run (line 22) | def run(self, arguments):

FILE: bondai/tools/alpaca_markets/list_positions.py
  class ListPositionsTool (line 13) | class ListPositionsTool(Tool):
    method __init__ (line 14) | def __init__(
    method run (line 24) | def run(self, arguments):

FILE: bondai/tools/alpaca_markets/response_formatter.py
  function format_order_response (line 1) | def format_order_response(response):
  function format_account_response (line 20) | def format_account_response(response):
  function format_positions_response (line 38) | def format_positions_response(response):
  function format_position (line 45) | def format_position(position):
  function format_orders_response (line 55) | def format_orders_response(response):
  function format_order (line 62) | def format_order(order):

FILE: bondai/tools/bland_ai/bland_ai_tools.py
  class CallParameters (line 28) | class CallParameters(BaseModel):
  function validate_phone_number (line 35) | def validate_phone_number(phone):
  class BlandAITool (line 57) | class BlandAITool(Tool):
    method __init__ (line 58) | def __init__(
    method run (line 69) | def run(self, arguments):
    method start_call (line 99) | def start_call(self, arguments):
    method check_call_status (line 108) | def check_call_status(self, call_id):
    method end_call (line 126) | def end_call(self, call_id):

FILE: bondai/tools/conversational/conversational_tools.py
  class SendMessageToolParameters (line 18) | class SendMessageToolParameters(BaseModel):
  class SendMessageTool (line 24) | class SendMessageTool(Tool):
    method __init__ (line 25) | def __init__(self):
    method run (line 32) | def run(
  class ExitConversationTool (line 49) | class ExitConversationTool(Tool):
    method __init__ (line 50) | def __init__(self):
    method run (line 56) | def run(self, arguments: Dict) -> Tuple[str, bool]:

FILE: bondai/tools/dalle_tool.py
  class Parameters (line 19) | class Parameters(BaseModel):
  class DalleTool (line 25) | class DalleTool(Tool):
    method __init__ (line 26) | def __init__(self, connection_params: OpenAIConnectionParams | None = ...
    method run (line 36) | def run(self, arguments: Dict) -> str:

FILE: bondai/tools/database/db_query.py
  class Parameters (line 9) | class Parameters(BaseModel):
  class DatabaseQueryTool (line 61) | class DatabaseQueryTool(Tool):
    method __init__ (line 62) | def __init__(
    method run (line 83) | def run(self, arguments):
    method __format_response (line 104) | def __format_response(self, rows, colnames):
    method __get_database_connection (line 113) | def __get_database_connection(self):
    method __query_database (line 127) | def __query_database(self, query):
    method __get_database_schema (line 150) | def __get_database_schema(self):

FILE: bondai/tools/file/file_query.py
  function is_pdf (line 13) | def is_pdf(filename: str) -> bool:
  function build_prompt (line 19) | def build_prompt(question: str, context: str) -> str:
  class Parameters (line 28) | class Parameters(BaseModel):
  class FileQueryTool (line 34) | class FileQueryTool(Tool):
    method __init__ (line 35) | def __init__(
    method run (line 49) | def run(self, arguments: Dict) -> str:

FILE: bondai/tools/file/file_read.py
  function is_pdf (line 10) | def is_pdf(filename: str) -> bool:
  class Parameters (line 16) | class Parameters(BaseModel):
  class FileReadTool (line 21) | class FileReadTool(Tool):
    method __init__ (line 22) | def __init__(self):
    method run (line 25) | def run(self, arguments: Dict) -> str:

FILE: bondai/tools/file/file_write.py
  class Parameters (line 13) | class Parameters(BaseModel):
  class FileWriteTool (line 20) | class FileWriteTool(Tool):
    method __init__ (line 21) | def __init__(self):
    method run (line 24) | def run(self, arguments: Dict) -> str:

FILE: bondai/tools/gmail/list_emails.py
  class Parameters (line 14) | class Parameters(BaseModel):
  function get_email_attr (line 20) | def get_email_attr(message: Dict, attr: str) -> str:
  class ListEmailsTool (line 26) | class ListEmailsTool(Tool):
    method __init__ (line 27) | def __init__(
    method run (line 43) | def run(self, arguments: Dict) -> str:

FILE: bondai/tools/gmail/query_emails.py
  function get_email_attr (line 16) | def get_email_attr(message: Dict, attr: str) -> str:
  function build_prompt (line 22) | def build_prompt(question: str, context: str) -> str:
  function parse_body (line 31) | def parse_body(message: Dict) -> str:
  class Parameters (line 56) | class Parameters(BaseModel):
  class QueryEmailsTool (line 62) | class QueryEmailsTool(Tool):
    method __init__ (line 63) | def __init__(
    method run (line 84) | def run(self, arguments: Dict) -> str:

FILE: bondai/tools/langchain_tool.py
  class LangChainTool (line 6) | class LangChainTool(Tool):
    method __init__ (line 7) | def __init__(
    method run (line 20) | def run(self, arguments: Dict) -> str:

FILE: bondai/tools/python_repl_tool.py
  class Parameters (line 18) | class Parameters(BaseModel):
  function execute_target (line 23) | def execute_target(conn: Connection, code: str):
  class PythonREPLTool (line 45) | class PythonREPLTool(Tool):
    method __init__ (line 46) | def __init__(self, execution_timeout: int = DEFAULT_EXECUTION_TIMEOUT):
    method run (line 52) | def run(self, arguments: Dict) -> str:
    method execute_code (line 84) | def execute_code(self, code: str) -> (Dict, str, str):

FILE: bondai/tools/response_query.py
  function build_prompt (line 18) | def build_prompt(question, context):
  class Parameters (line 27) | class Parameters(BaseModel):
  class ResponseQueryTool (line 33) | class ResponseQueryTool(Tool):
    method __init__ (line 34) | def __init__(
    method responses (line 50) | def responses(self):
    method add_response (line 53) | def add_response(self, response: str) -> str:
    method run (line 58) | def run(self, arguments: Dict) -> str:
    method clear_responses (line 76) | def clear_responses(self):

FILE: bondai/tools/search/duck_duck_go_search.py
  class Parameters (line 12) | class Parameters(BaseModel):
  function search_duckduckgo (line 19) | def search_duckduckgo(query: str, count: int = 10, page: int = 1) -> str:
  class DuckDuckGoSearchTool (line 29) | class DuckDuckGoSearchTool(Tool):
    method __init__ (line 30) | def __init__(self):
    method run (line 35) | def run(self, arguments: Dict) -> str:

FILE: bondai/tools/search/google_search.py
  class Parameters (line 22) | class Parameters(BaseModel):
  class GoogleSearchTool (line 29) | class GoogleSearchTool(Tool):
    method __init__ (line 30) | def __init__(
    method run (line 40) | def run(self, arguments):

FILE: bondai/tools/shell_tool.py
  class Parameters (line 18) | class Parameters(BaseModel):
  class ShellTool (line 23) | class ShellTool(Tool):
    method __init__ (line 24) | def __init__(self, execution_timeout: int = DEFAULT_EXECUTION_TIMEOUT):
    method run (line 30) | def run(self, arguments: Dict) -> str:
    method execute_command (line 52) | def execute_command(self, cmd: str) -> (str, str):

FILE: bondai/tools/task_completed_tool.py
  class TaskCompletedToolParameters (line 6) | class TaskCompletedToolParameters(BaseModel):
  class TaskCompletedTool (line 10) | class TaskCompletedTool(Tool):
    method __init__ (line 11) | def __init__(self):
    method run (line 18) | def run(self, arguments: Dict) -> Dict[str, bool]:

FILE: bondai/tools/tool.py
  class InputParameters (line 5) | class InputParameters(BaseModel):
  class EmptyParameters (line 10) | class EmptyParameters(BaseModel):
  class Tool (line 14) | class Tool:
    method __init__ (line 15) | def __init__(
    method get_tool_function (line 36) | def get_tool_function(self) -> Dict:
    method run (line 43) | def run(self, arguments: Dict) -> str | Dict:
    method handle_stream_update (line 47) | def handle_stream_update(self, arguments_buffer: str):
    method save_state (line 52) | def save_state() -> Dict:
    method load_state (line 57) | def load_state(state: Dict):
    method stop (line 62) | def stop(self):

FILE: bondai/tools/vision/image_analysis_tool.py
  class Parameters (line 13) | class Parameters(BaseModel):
  class ImageAnalysisTool (line 19) | class ImageAnalysisTool(Tool):
    method __init__ (line 20) | def __init__(
    method _encode_image (line 33) | def _encode_image(self, image_path):
    method _analyze_image (line 37) | def _analyze_image(self, image_data, analysis_description):
    method run (line 57) | def run(

FILE: bondai/tools/website/download_file.py
  class Parameters (line 13) | class Parameters(BaseModel):
  class DownloadFileTool (line 19) | class DownloadFileTool(Tool):
    method __init__ (line 20) | def __init__(self):
    method run (line 23) | def run(self, arguments: Dict) -> str:

FILE: bondai/tools/website/extract_hyperlinks.py
  class Parameters (line 14) | class Parameters(BaseModel):
  class WebsiteExtractHyperlinksTool (line 19) | class WebsiteExtractHyperlinksTool(Tool):
    method __init__ (line 20) | def __init__(self):
    method run (line 25) | def run(self, arguments: Dict) -> str:

FILE: bondai/tools/website/html_query.py
  function build_prompt (line 14) | def build_prompt(question, context):
  class Parameters (line 23) | class Parameters(BaseModel):
  class WebsiteHtmlQueryTool (line 29) | class WebsiteHtmlQueryTool(Tool):
    method __init__ (line 30) | def __init__(self, llm: LLM | None = None):
    method run (line 39) | def run(self, arguments: Dict) -> str:

FILE: bondai/tools/website/query.py
  function build_prompt (line 16) | def build_prompt(question: str, context: str) -> str:
  class Parameters (line 28) | class Parameters(BaseModel):
  class WebsiteQueryTool (line 34) | class WebsiteQueryTool(Tool):
    method __init__ (line 35) | def __init__(
    method run (line 49) | def run(self, arguments: Dict) -> str:

FILE: bondai/util/caching/llm_cache.py
  class LLMCache (line 8) | class LLMCache(ABC):
    method _get_cache_key (line 9) | def _get_cache_key(self, input_parameters: Dict) -> str:
    method get_cache_item (line 14) | def get_cache_item(self, input_parameters: Dict) -> Optional[Tuple[str...
    method save_cache_item (line 18) | def save_cache_item(self, input_parameters: Dict, response: (str, Dict...
  class PersistentLLMCache (line 22) | class PersistentLLMCache(LLMCache):
    method __init__ (line 23) | def __init__(self, cache_dir: str = "./.cache"):
    method get_cache_item (line 28) | def get_cache_item(self, input_parameters: Dict) -> Optional[Tuple[str...
    method save_cache_item (line 36) | def save_cache_item(self, input_parameters: Dict, response: (str, Dict...
  class InMemoryLLMCache (line 48) | class InMemoryLLMCache(LLMCache):
    method __init__ (line 49) | def __init__(self):
    method get_cache_item (line 52) | def get_cache_item(self, input_parameters: Dict) -> Optional[Tuple[str...
    method save_cache_item (line 58) | def save_cache_item(self, input_parameters: Dict, response: (str, Dict...

FILE: bondai/util/document_parser.py
  function extract_text_from_directory (line 7) | def extract_text_from_directory(directory: str) -> Dict[str, str]:
  function extract_file_text (line 21) | def extract_file_text(file_path: str) -> str:

FILE: bondai/util/event_mixin.py
  class EventMixin (line 5) | class EventMixin:
    method __init__ (line 6) | def __init__(self, allowed_events: List[str]):
    method on (line 14) | def on(self, event_name: str, target: Callable = None) -> Callable | N...
    method _trigger_event (line 32) | def _trigger_event(self, event_name: str, *args, **kwargs):

FILE: bondai/util/misc.py
  function load_local_resource (line 5) | def load_local_resource(local_file: str, resource: str) -> str:
  function format_print_string (line 16) | def format_print_string(s: str, length: int = 100) -> str:

FILE: bondai/util/model_logger.py
  function get_instance_dir (line 7) | def get_instance_dir(logging_dir: str) -> str:
  function write_file (line 17) | def write_file(filename: str, content: str):
  class ModelLogger (line 22) | class ModelLogger:
    method __init__ (line 23) | def __init__(self, logging_dir: str = "./logs"):
    method log (line 26) | def log(self, prompt: str, response: str, function: Dict | None = None):

FILE: bondai/util/runnable.py
  class Runnable (line 6) | class Runnable(ABC):
    method __init__ (line 7) | def __init__(self):
    method _start_execution_thread (line 11) | def _start_execution_thread(self, target: Callable, args: Tuple = ()):
    method join (line 18) | def join(self, timeout=None):
    method stop (line 23) | def stop(self, timeout=10):

FILE: bondai/util/semantic_search.py
  function split_text (line 15) | def split_text(
  function concatenate_strings (line 41) | def concatenate_strings(arr: List[str], n: int) -> List[str]:
  function split_tokens (line 49) | def split_tokens(
  function semantic_search (line 66) | def semantic_search(

FILE: bondai/util/web.py
  function is_html (line 10) | def is_html(text: str) -> bool:
  function get_website_html (line 15) | def get_website_html(url: str) -> str:
  function get_html_text (line 20) | def get_html_text(html: str) -> str:
  function get_website_text (line 25) | def get_website_text(url: str) -> str:
  function query_website_html (line 30) | def query_website_html(url: str, xpath: str) -> str:
  function get_website_links (line 37) | def get_website_links(url: str) -> List[str]:

FILE: tests/api-client/test_api_client.py
  function handle_streaming_content_updated (line 13) | def handle_streaming_content_updated(agent_id, content_buffer):
  function handle_streaming_function_updated (line 20) | def handle_streaming_function_updated(agent_id, function_name, arguments...
  function handle_agent_message (line 28) | def handle_agent_message(agent_id, message):
  function handle_tool_selected_message (line 43) | def handle_tool_selected_message(agent_id, message):
  function handle_tool_error_message (line 57) | def handle_tool_error_message(agent_id, message):

FILE: tests/memory/single_agent_with_memory.py
  function retrieve_and_parse_pdf (line 15) | def retrieve_and_parse_pdf(url):

FILE: tests/memory/util.py
  function extract_text_from_directory (line 10) | def extract_text_from_directory(
  function extract_file_text (line 29) | def extract_file_text(file_path: str) -> str:

FILE: website/src/components/HomepageFeatures/index.js
  function Feature (line 38) | function Feature({Svg, title, description}) {
  function HomepageFeatures (line 52) | function HomepageFeatures() {

FILE: website/src/pages/index.js
  function HomepageHeader (line 11) | function HomepageHeader() {
  function Home (line 30) | function Home() {
Condensed preview — 186 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (449K chars).
[
  {
    "path": ".github/workflows/deploy-website.yaml",
    "chars": 1159,
    "preview": "name: Deploy BondAI Website\n\non:\n  push:\n    branches:\n      - main\n\njobs:\n  deploy:\n    runs-on: ubuntu-latest\n\n    ste"
  },
  {
    "path": ".github/workflows/deploy.yaml",
    "chars": 1609,
    "preview": "name: Deploy to PyPI and DockerHub\n\non:\n  push:\n    tags:\n      - 'v[0-9]+.[0-9]+.[0-9]+[a-zA-Z0-9]*'\n  workflow_dispatc"
  },
  {
    "path": ".gitignore",
    "chars": 3344,
    "preview": "*.DS_Store\nresponse_query_storage\ngmail-token.pickle\n.debug\n.memory\n.cache\nmisc\nui/.next\nui/node_modules\nui/agent-volume"
  },
  {
    "path": ".pre-commit-config.yaml",
    "chars": 91,
    "preview": "repos:\n  - repo: https://github.com/psf/black\n    rev: stable\n    hooks:\n      - id: black\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "chars": 1213,
    "preview": "# Contribution Instruction & Guidelines\n\nHello there! Any kind of contribution to **BondAI** is most welcome!\n\n- If you "
  },
  {
    "path": "LICENSE",
    "chars": 1052,
    "preview": "Copyright 2023 Kevin Rohling\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this softw"
  },
  {
    "path": "MANIFEST.in",
    "chars": 97,
    "preview": "include bondai/prompt/default_prompt_template.md\ninclude bondai/cli/onboarding_prompt_template.md"
  },
  {
    "path": "README.md",
    "chars": 6957,
    "preview": "<a href=\"https://bondai.dev\">\n<p align=\"center\">\n<img src=\"assets/bondai-logo.png\" alt=\"Description or Alt text\" style=\""
  },
  {
    "path": "bondai/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "bondai/agents/__init__.py",
    "chars": 1030,
    "preview": "from .conversational_agent import ConversationalAgent\nfrom .agent import Agent, DEFAULT_MESSAGE_PROMPT_TEMPLATE\nfrom .co"
  },
  {
    "path": "bondai/agents/agent.py",
    "chars": 24116,
    "preview": "import os\nimport uuid\nimport traceback\nfrom pydantic import BaseModel\nfrom datetime import datetime\nfrom typing import D"
  },
  {
    "path": "bondai/agents/compression/__init__.py",
    "chars": 171,
    "preview": "from .conversation_summarizer import summarize_conversation\nfrom .message_summarizer import summarize_messages\n\n__all__ "
  },
  {
    "path": "bondai/agents/compression/conversation_summarizer.py",
    "chars": 1242,
    "preview": "import os\nfrom typing import List\nfrom bondai.models import LLM\nfrom bondai.prompt import PromptBuilder, JinjaPromptBuil"
  },
  {
    "path": "bondai/agents/compression/message_summarizer.py",
    "chars": 3304,
    "preview": "import os\nfrom typing import List\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom bondai.models imp"
  },
  {
    "path": "bondai/agents/compression/prompts/conversation_summarizer_prompt_template.md",
    "chars": 776,
    "preview": "Read the entire conversation provided below and create a summary. Your task is to condense the key information and main "
  },
  {
    "path": "bondai/agents/compression/prompts/message_summarizer_prompt_template.md",
    "chars": 437,
    "preview": "Read the following conversation and summarize the final message:\n\n# Conversation\n{% for msg in previous_messages %}\n- {{"
  },
  {
    "path": "bondai/agents/conversation_member.py",
    "chars": 2028,
    "preview": "import uuid\nfrom abc import ABC, abstractmethod\nfrom enum import Enum\nfrom typing import List, Callable\nfrom .messages i"
  },
  {
    "path": "bondai/agents/conversational_agent.py",
    "chars": 14727,
    "preview": "import os\nimport traceback\nimport json\nfrom datetime import datetime\nfrom typing import Dict, List, Callable\nfrom bondai"
  },
  {
    "path": "bondai/agents/group_chat/__init__.py",
    "chars": 498,
    "preview": "from .group_conversation import GroupConversation\nfrom .user_proxy import UserProxy\nfrom .group_conversation_config impo"
  },
  {
    "path": "bondai/agents/group_chat/group_conversation.py",
    "chars": 10037,
    "preview": "import uuid\nimport asyncio\nimport traceback\nfrom datetime import datetime\nfrom typing import Dict, List, Callable\nfrom b"
  },
  {
    "path": "bondai/agents/group_chat/group_conversation_config.py",
    "chars": 3976,
    "preview": "from typing import Dict, List, Set\nfrom abc import ABC, abstractmethod\nfrom bondai.agents import ConversationMember\n\n\ncl"
  },
  {
    "path": "bondai/agents/group_chat/user_proxy.py",
    "chars": 5754,
    "preview": "from termcolor import cprint\nfrom datetime import datetime\nfrom typing import List\nfrom bondai.util import EventMixin\nfr"
  },
  {
    "path": "bondai/agents/messages.py",
    "chars": 6402,
    "preview": "import uuid\nfrom abc import ABC\nfrom typing import List, Dict, Set\nfrom datetime import datetime\nfrom dataclasses import"
  },
  {
    "path": "bondai/agents/prompts/__init__.py",
    "chars": 256,
    "preview": "from .default_persona import (\n    DEFAULT_AGENT_NAME,\n    DEFAULT_CONVERSATIONAL_INSTRUCTIONS,\n    DEFAULT_CONVERSATION"
  },
  {
    "path": "bondai/agents/prompts/agent_message_prompt_template.md",
    "chars": 1336,
    "preview": "{%- if message_type == \"ToolUsageMessage\" %}\n# Message Timestamp\n{{ message.timestamp }}\n\n# Tool Name\nYou used the **{{ "
  },
  {
    "path": "bondai/agents/prompts/conversational_agent_system_prompt_template.md",
    "chars": 2458,
    "preview": "{%- if instructions %}\n# Instructions\n\n{{ instructions }}\n{%- endif %}\n\n\n# Your Persona\n\nYour Name is {{ name }}.\n{%- if"
  },
  {
    "path": "bondai/agents/prompts/default_persona.py",
    "chars": 2745,
    "preview": "from jinja2 import Template\n\nDEFAULT_AGENT_NAME = \"Mira\"\n\nDEFAULT_CONVERSATIONAL_INSTRUCTIONS = \"\"\"The user should alway"
  },
  {
    "path": "bondai/agents/prompts/react_agent_system_prompt_template.md",
    "chars": 1391,
    "preview": "# Instructions\n{%- if instructions %}\n{{ instructions }}\n{%- else %}\nYou are a powerful problem solving agent! \nYou have"
  },
  {
    "path": "bondai/agents/util.py",
    "chars": 4684,
    "preview": "import json\nimport inspect\nimport traceback\nfrom enum import Enum\nfrom typing import List, Dict, Callable\nfrom bondai.mo"
  },
  {
    "path": "bondai/api/__init__.py",
    "chars": 254,
    "preview": "from .client import BondAIAPIClient\nfrom .server import BondAIAPIServer\nfrom .api_user_proxy import APIUserProxy\nfrom .a"
  },
  {
    "path": "bondai/api/agent_wrapper.py",
    "chars": 2311,
    "preview": "from bondai import AGENT_STATE_RUNNING\nfrom .api_error import BondAIAPIError\n\n\nclass AgentWrapper:\n    def __init__(self"
  },
  {
    "path": "bondai/api/api_error.py",
    "chars": 42,
    "preview": "class BondAIAPIError(Exception):\n    pass\n"
  },
  {
    "path": "bondai/api/api_user_proxy.py",
    "chars": 2443,
    "preview": "import json\nfrom typing import Callable, List\nfrom flask_socketio import SocketIO\nfrom bondai.util import EventMixin\nfro"
  },
  {
    "path": "bondai/api/client.py",
    "chars": 4466,
    "preview": "import json\nimport requests\nfrom socketio import Client\nfrom bondai.util import EventMixin\nfrom bondai.agents import Age"
  },
  {
    "path": "bondai/api/routes.py",
    "chars": 3888,
    "preview": "from typing import List\nfrom flask import jsonify, request, abort\nfrom .settings import get_settings, set_settings\nfrom "
  },
  {
    "path": "bondai/api/server.py",
    "chars": 7314,
    "preview": "import os\nimport logging\nimport json\nfrom typing import Callable, List\nfrom datetime import datetime\nfrom dataclasses im"
  },
  {
    "path": "bondai/api/settings.py",
    "chars": 5683,
    "preview": "import os\nfrom itertools import chain\nfrom flask import request, jsonify\nfrom flask_restful import Resource\nfrom bondai."
  },
  {
    "path": "bondai/cli/__init__.py",
    "chars": 55,
    "preview": "from .cli import run_cli\n\n__all__ = [\n    \"run_cli\",\n]\n"
  },
  {
    "path": "bondai/cli/cli.py",
    "chars": 5613,
    "preview": "#!/usr/bin/env python3\nimport os\nimport argparse\nfrom termcolor import cprint\nfrom bondai.util import ModelLogger\nfrom b"
  },
  {
    "path": "bondai/cli/default_tools.py",
    "chars": 3439,
    "preview": "import os\nfrom termcolor import cprint\nfrom bondai.tools import DalleTool, PythonREPLTool, ShellTool\nfrom bondai.tools.a"
  },
  {
    "path": "bondai/cli/personas/__init__.py",
    "chars": 83,
    "preview": "from . import (\n    user_liaison_agent,\n)\n\n__all__ = [\n    \"user_liaison_agent\",\n]\n"
  },
  {
    "path": "bondai/cli/personas/user_liaison_agent.py",
    "chars": 1774,
    "preview": "NAME = \"BondAI\"\n\nPERSONA = (\n    \"- Friendly, approachable, and empathetic. \"\n    \"- Efficient and clear communicator, a"
  },
  {
    "path": "bondai/main.py",
    "chars": 123,
    "preview": "#!/usr/bin/env python3\n\nfrom bondai.cli import run_cli\n\n\ndef main():\n    run_cli()\n\n\nif __name__ == \"__main__\":\n    main"
  },
  {
    "path": "bondai/memory/__init__.py",
    "chars": 1447,
    "preview": "from .memory_manager import (\n    MemoryManager,\n    PersistentMemoryManager,\n    ConversationalMemoryManager,\n)\nfrom .a"
  },
  {
    "path": "bondai/memory/archival/__init__.py",
    "chars": 309,
    "preview": "from .datasources import ArchivalMemoryDataSource, PersistentArchivalMemoryDataSource\nfrom .tools import ArchivalMemoryI"
  },
  {
    "path": "bondai/memory/archival/datasources.py",
    "chars": 5499,
    "preview": "import os\nimport json\nimport numpy as np\nimport faiss\nfrom typing import List\nfrom abc import ABC, abstractmethod\nfrom b"
  },
  {
    "path": "bondai/memory/archival/tools.py",
    "chars": 1819,
    "preview": "from pydantic import BaseModel\nfrom bondai.tools import Tool\nfrom .datasources import ArchivalMemoryDataSource\n\nARCHIVAL"
  },
  {
    "path": "bondai/memory/conversation/__init__.py",
    "chars": 362,
    "preview": "from .datasources import (\n    ConversationMemoryDataSource,\n    PersistentConversationMemoryDataSource,\n)\nfrom .tools i"
  },
  {
    "path": "bondai/memory/conversation/datasources.py",
    "chars": 4439,
    "preview": "import os\nimport json\nfrom abc import ABC, abstractmethod\nfrom datetime import datetime\nfrom typing import List\nfrom bon"
  },
  {
    "path": "bondai/memory/conversation/tools.py",
    "chars": 2386,
    "preview": "from pydantic import BaseModel\nfrom datetime import datetime\nfrom typing import List\nfrom bondai.tools import Tool\nfrom "
  },
  {
    "path": "bondai/memory/core/__init__.py",
    "chars": 362,
    "preview": "from .datasources import (\n    CoreMemoryDataSource,\n    PersistentCoreMemoryDataSource,\n    InMemoryCoreMemoryDataSourc"
  },
  {
    "path": "bondai/memory/core/datasources.py",
    "chars": 2639,
    "preview": "import os\nimport json\nfrom abc import ABC, abstractmethod\nfrom typing import List, Dict\n\nDEFAULT_MEMORY_SECTIONS = {\n   "
  },
  {
    "path": "bondai/memory/core/tools.py",
    "chars": 2299,
    "preview": "from pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools import Tool\nfrom .datasources import CoreMemory"
  },
  {
    "path": "bondai/memory/memory_manager.py",
    "chars": 4573,
    "preview": "import os\nfrom typing import Callable\nfrom bondai.prompt import JinjaPromptBuilder\nfrom bondai.util import load_local_re"
  },
  {
    "path": "bondai/memory/prompts/default_prompt_template.md",
    "chars": 3214,
    "preview": "# Memory System Overview\n\nOlder AI models had no concept of persistent memory; they were only able to access their initi"
  },
  {
    "path": "bondai/models/__init__.py",
    "chars": 113,
    "preview": "from .embedding_model import EmbeddingModel\nfrom .llm import LLM\n\n__all__ = [\n    \"EmbeddingModel\",\n    \"LLM\",\n]\n"
  },
  {
    "path": "bondai/models/embedding_model.py",
    "chars": 430,
    "preview": "from abc import ABC, abstractmethod\nfrom typing import List\n\n\nclass EmbeddingModel(ABC):\n    @property\n    @abstractmeth"
  },
  {
    "path": "bondai/models/llm.py",
    "chars": 877,
    "preview": "from abc import ABC, abstractmethod\nfrom typing import Dict, List, Callable\n\n\nclass LLM(ABC):\n    @property\n    @abstrac"
  },
  {
    "path": "bondai/models/openai/__init__.py",
    "chars": 805,
    "preview": "from .openai_llm import OpenAILLM\nfrom .openai_embedding_model import OpenAIEmbeddingModel\nfrom .openai_wrapper import ("
  },
  {
    "path": "bondai/models/openai/default_openai_connection_params.py",
    "chars": 6070,
    "preview": "import os\nfrom .env_vars import *\nfrom .openai_connection_params import OpenAIConnectionParams, OpenAIConnectionType\n\ngp"
  },
  {
    "path": "bondai/models/openai/env_vars.py",
    "chars": 1252,
    "preview": "OPENAI_API_KEY_ENV_VAR = \"OPENAI_API_KEY\"\nOPENAI_CONNECTION_TYPE_ENV_VAR = \"OPENAI_CONNECTION_TYPE\"\nAZURE_OPENAI_EMBEDDI"
  },
  {
    "path": "bondai/models/openai/openai_connection_params.py",
    "chars": 3173,
    "preview": "from .openai_models import OpenAIConnectionType\n\n\nclass OpenAIConnectionParams:\n    def __init__(\n        self,\n        "
  },
  {
    "path": "bondai/models/openai/openai_embedding_model.py",
    "chars": 1641,
    "preview": "from typing import List, Dict\nfrom bondai.models import EmbeddingModel\nfrom .openai_models import ModelConfig, OpenAIMod"
  },
  {
    "path": "bondai/models/openai/openai_llm.py",
    "chars": 3913,
    "preview": "from typing import Dict, List, Callable\nfrom bondai.models import LLM\nfrom bondai.util.caching import LLMCache\nfrom .ope"
  },
  {
    "path": "bondai/models/openai/openai_models.py",
    "chars": 2950,
    "preview": "from enum import Enum\n\n\nclass OpenAIConnectionType(Enum):\n    AZURE: str = \"azure\"\n    OPENAI: str = \"openai\"\n\n\nclass Op"
  },
  {
    "path": "bondai/models/openai/openai_wrapper.py",
    "chars": 8649,
    "preview": "import json\nimport tiktoken\nfrom typing import Dict, List, Callable\nfrom openai import OpenAI, AzureOpenAI\nfrom .openai_"
  },
  {
    "path": "bondai/prompt/__init__.py",
    "chars": 227,
    "preview": "from .prompt_builder import PromptBuilder\nfrom .default_prompt_builder import DefaultPromptBuilder\nfrom .jinja_prompt_bu"
  },
  {
    "path": "bondai/prompt/default_prompt_builder.py",
    "chars": 306,
    "preview": "from bondai.prompt import PromptBuilder\n\n\nclass DefaultPromptBuilder(PromptBuilder):\n    def __init__(self, prompt_templ"
  },
  {
    "path": "bondai/prompt/default_prompt_template.md",
    "chars": 1533,
    "preview": "# Introduction #\n\nYou are a powerful problem solving agent! \nYou have access to a set of tools that give you capabilitie"
  },
  {
    "path": "bondai/prompt/jinja_prompt_builder.py",
    "chars": 703,
    "preview": "import platform\nfrom datetime import datetime\nfrom jinja2 import Template\nfrom bondai.prompt import PromptBuilder\n\n\nclas"
  },
  {
    "path": "bondai/prompt/prompt_builder.py",
    "chars": 410,
    "preview": "from abc import ABC, abstractmethod\nfrom typing import Dict, Any\n\n\nclass PromptBuilder(ABC):\n    def __call__(self, **kw"
  },
  {
    "path": "bondai/tools/__init__.py",
    "chars": 560,
    "preview": "from .tool import Tool, EmptyParameters, InputParameters\nfrom .agent_tool import AgentTool\nfrom .dalle_tool import Dalle"
  },
  {
    "path": "bondai/tools/agent_tool.py",
    "chars": 1332,
    "preview": "from pydantic import BaseModel\nfrom bondai.tools import Tool\n\nTOOL_NAME = \"agent_tool\"\nTOOL_DESCRIPTION = (\n    \"This to"
  },
  {
    "path": "bondai/tools/alpaca_markets/__init__.py",
    "chars": 598,
    "preview": "from .env_vars import ALPACA_MARKETS_API_KEY_ENV_VAR, ALPACA_MARKETS_SECRET_KEY_ENV_VAR\nfrom .create_order import Create"
  },
  {
    "path": "bondai/tools/alpaca_markets/create_order.py",
    "chars": 3337,
    "preview": "import os\nfrom pydantic import BaseModel\nfrom bondai.tools import Tool\nfrom .response_formatter import format_order_resp"
  },
  {
    "path": "bondai/tools/alpaca_markets/env_vars.py",
    "chars": 122,
    "preview": "ALPACA_MARKETS_API_KEY_ENV_VAR = \"ALPACA_MARKETS_API_KEY\"\nALPACA_MARKETS_SECRET_KEY_ENV_VAR = \"ALPACA_MARKETS_SECRET_KEY"
  },
  {
    "path": "bondai/tools/alpaca_markets/get_account.py",
    "chars": 927,
    "preview": "import os\nfrom bondai.tools import Tool\nfrom .response_formatter import format_account_response\nfrom .env_vars import AL"
  },
  {
    "path": "bondai/tools/alpaca_markets/list_positions.py",
    "chars": 1010,
    "preview": "import os\nfrom bondai.tools.tool import Tool\nfrom .response_formatter import format_positions_response\nfrom .env_vars im"
  },
  {
    "path": "bondai/tools/alpaca_markets/response_formatter.py",
    "chars": 2160,
    "preview": "def format_order_response(response):\n    return f\"\"\"Order ID: {response.id}\nStatus: {response.status}\nFilled At: {respon"
  },
  {
    "path": "bondai/tools/bland_ai/__init__.py",
    "chars": 281,
    "preview": "from .bland_ai_tools import (\n    BlandAITool,\n    BLAND_AI_API_KEY_ENV_VAR,\n    BLAND_AI_VOICE_ID_ENV_VAR,\n    BLAND_AI"
  },
  {
    "path": "bondai/tools/bland_ai/bland_ai_tools.py",
    "chars": 5163,
    "preview": "import re\nimport os\nimport requests\nimport time\nfrom pydantic import BaseModel\nfrom bondai.tools import Tool\n\nTOOL_NAME "
  },
  {
    "path": "bondai/tools/conversational/__init__.py",
    "chars": 277,
    "preview": "from .conversational_tools import (\n    SEND_MESSAGE_TOOL_NAME,\n    SendMessageTool,\n    EXIT_CONVERSATION_TOOL_NAME,\n  "
  },
  {
    "path": "bondai/tools/conversational/conversational_tools.py",
    "chars": 1948,
    "preview": "from pydantic import BaseModel\nfrom bondai.tools import Tool\nfrom typing import Dict, Tuple\nfrom bondai.agents.messages "
  },
  {
    "path": "bondai/tools/dalle_tool.py",
    "chars": 2139,
    "preview": "import requests\nimport openai\nfrom pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools import Tool\nfrom "
  },
  {
    "path": "bondai/tools/database/__init__.py",
    "chars": 107,
    "preview": "from .db_query import DatabaseQueryTool, PG_URI_ENV_VAR\n\n__all__ = [\"DatabaseQueryTool\", \"PG_URI_ENV_VAR\"]\n"
  },
  {
    "path": "bondai/tools/database/db_query.py",
    "chars": 5889,
    "preview": "import os\nimport psycopg2\nfrom pydantic import BaseModel\nfrom bondai.tools import Tool\nfrom bondai.models import LLM\nfro"
  },
  {
    "path": "bondai/tools/file/__init__.py",
    "chars": 189,
    "preview": "from .file_query import FileQueryTool\nfrom .file_write import FileWriteTool\nfrom .file_read import FileReadTool\n\n__all__"
  },
  {
    "path": "bondai/tools/file/file_query.py",
    "chars": 2773,
    "preview": "from pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools import Tool\nfrom bondai.models import LLM, Embe"
  },
  {
    "path": "bondai/tools/file/file_read.py",
    "chars": 978,
    "preview": "from pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools import Tool\nfrom bondai.util import extract_fil"
  },
  {
    "path": "bondai/tools/file/file_write.py",
    "chars": 1147,
    "preview": "from pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools import Tool\n\nTOOL_NAME = \"file_write\"\nTOOL_DESC"
  },
  {
    "path": "bondai/tools/gmail/__init__.py",
    "chars": 142,
    "preview": "from .list_emails import ListEmailsTool\nfrom .query_emails import QueryEmailsTool\n\n__all__ = [\n    \"ListEmailsTool\",\n   "
  },
  {
    "path": "bondai/tools/gmail/list_emails.py",
    "chars": 3867,
    "preview": "import pickle\nfrom pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools import Tool\nfrom googleapiclient."
  },
  {
    "path": "bondai/tools/gmail/query_emails.py",
    "chars": 3815,
    "preview": "import pickle\nimport base64\nfrom googleapiclient.discovery import build\nfrom typing import List, Dict\nfrom pydantic impo"
  },
  {
    "path": "bondai/tools/langchain_tool.py",
    "chars": 585,
    "preview": "from pydantic import BaseModel\nfrom bondai.tools import Tool, InputParameters\nfrom typing import Dict\n\n\nclass LangChainT"
  },
  {
    "path": "bondai/tools/python_repl_tool.py",
    "chars": 3181,
    "preview": "import io\nfrom contextlib import redirect_stdout, redirect_stderr\nfrom pydantic import BaseModel\nfrom typing import Dict"
  },
  {
    "path": "bondai/tools/response_query.py",
    "chars": 2567,
    "preview": "import uuid\nfrom pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools import Tool\nfrom bondai.util import"
  },
  {
    "path": "bondai/tools/search/__init__.py",
    "chars": 165,
    "preview": "from .google_search import GoogleSearchTool\nfrom .duck_duck_go_search import DuckDuckGoSearchTool\n\n__all__ = [\n    \"Goog"
  },
  {
    "path": "bondai/tools/search/duck_duck_go_search.py",
    "chars": 1699,
    "preview": "from pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools.tool import Tool\nfrom duckduckgo_search import "
  },
  {
    "path": "bondai/tools/search/google_search.py",
    "chars": 2025,
    "preview": "import os\nfrom googleapiclient.discovery import build\nfrom pydantic import BaseModel\nfrom bondai.tools.tool import Tool\n"
  },
  {
    "path": "bondai/tools/shell_tool.py",
    "chars": 2431,
    "preview": "import threading\nimport subprocess\nimport shlex\nfrom queue import Queue\nfrom pydantic import BaseModel\nfrom typing impor"
  },
  {
    "path": "bondai/tools/task_completed_tool.py",
    "chars": 511,
    "preview": "from pydantic import BaseModel\nfrom bondai.tools import Tool\nfrom typing import Dict\n\n\nclass TaskCompletedToolParameters"
  },
  {
    "path": "bondai/tools/tool.py",
    "chars": 1839,
    "preview": "from pydantic import BaseModel\nfrom typing import Dict\n\n\nclass InputParameters(BaseModel):\n    input: str\n    thought: s"
  },
  {
    "path": "bondai/tools/vision/__init__.py",
    "chars": 84,
    "preview": "from .image_analysis_tool import ImageAnalysisTool\n\n__all__ = [\"ImageAnalysisTool\"]\n"
  },
  {
    "path": "bondai/tools/vision/image_analysis_tool.py",
    "chars": 2620,
    "preview": "import base64\nfrom pydantic import BaseModel\nfrom typing import Dict, Optional\nfrom bondai.tools import Tool\nfrom bondai"
  },
  {
    "path": "bondai/tools/website/__init__.py",
    "chars": 314,
    "preview": "from .extract_hyperlinks import WebsiteExtractHyperlinksTool\nfrom .html_query import WebsiteHtmlQueryTool\nfrom .query im"
  },
  {
    "path": "bondai/tools/website/download_file.py",
    "chars": 1290,
    "preview": "import requests\nfrom pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools import Tool\n\nTOOL_NAME = \"downl"
  },
  {
    "path": "bondai/tools/website/extract_hyperlinks.py",
    "chars": 1106,
    "preview": "import requests\nfrom pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools import Tool\nfrom bondai.util im"
  },
  {
    "path": "bondai/tools/website/html_query.py",
    "chars": 1861,
    "preview": "import requests\nfrom pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools.tool import Tool\nfrom bondai.ut"
  },
  {
    "path": "bondai/tools/website/query.py",
    "chars": 2399,
    "preview": "import requests\nfrom pydantic import BaseModel\nfrom typing import Dict\nfrom bondai.tools import Tool\nfrom bondai.util im"
  },
  {
    "path": "bondai/util/__init__.py",
    "chars": 735,
    "preview": "from .model_logger import ModelLogger\nfrom .misc import load_local_resource, format_print_string\nfrom .semantic_search i"
  },
  {
    "path": "bondai/util/caching/__init__.py",
    "chars": 136,
    "preview": "from .llm_cache import LLMCache, PersistentLLMCache, InMemoryLLMCache\n\n__all__ = [\"LLMCache\", \"PersistentLLMCache\", \"InM"
  },
  {
    "path": "bondai/util/caching/llm_cache.py",
    "chars": 2100,
    "preview": "import os\nimport json\nimport hashlib\nfrom abc import ABC, abstractmethod\nfrom typing import Tuple, Dict, Optional\n\n\nclas"
  },
  {
    "path": "bondai/util/document_parser.py",
    "chars": 1344,
    "preview": "import os\nimport PyPDF2\nimport docx\nfrom typing import Dict\n\n\ndef extract_text_from_directory(directory: str) -> Dict[st"
  },
  {
    "path": "bondai/util/event_mixin.py",
    "chars": 1239,
    "preview": "from enum import Enum\nfrom typing import List, Callable\n\n\nclass EventMixin:\n    def __init__(self, allowed_events: List["
  },
  {
    "path": "bondai/util/misc.py",
    "chars": 658,
    "preview": "import os\nimport pkg_resources\n\n\ndef load_local_resource(local_file: str, resource: str) -> str:\n    current_dir = os.pa"
  },
  {
    "path": "bondai/util/model_logger.py",
    "chars": 932,
    "preview": "import os\nimport json\nfrom datetime import datetime\nfrom typing import Dict\n\n\ndef get_instance_dir(logging_dir: str) -> "
  },
  {
    "path": "bondai/util/runnable.py",
    "chars": 1196,
    "preview": "from abc import ABC\nfrom typing import List, Callable, Tuple\nimport threading\n\n\nclass Runnable(ABC):\n    def __init__(se"
  },
  {
    "path": "bondai/util/semantic_search.py",
    "chars": 4356,
    "preview": "import nltk\nimport faiss\nimport numpy as np\nfrom typing import List\nfrom bondai.models import EmbeddingModel\nfrom concur"
  },
  {
    "path": "bondai/util/web.py",
    "chars": 1031,
    "preview": "import requests\nfrom bs4 import BeautifulSoup\nfrom typing import List\n\nREQUEST_HEADERS = {\n    \"User-Agent\": \"Mozilla/5."
  },
  {
    "path": "docker/Dockerfile",
    "chars": 198,
    "preview": "FROM ubuntu:latest\n\nLABEL maintainer=\"kevin@kevinrohling.com\"\n\nRUN apt-get update && apt-get install -y \\\n    python3 \\\n"
  },
  {
    "path": "docker/docker-compose.yml",
    "chars": 271,
    "preview": "version: '3.7'\n\nservices:\n  bondai:\n    build:\n      context: .\n      dockerfile: Dockerfile\n    working_dir: /agent-vol"
  },
  {
    "path": "requirements.txt",
    "chars": 1649,
    "preview": "aiofiles==23.2.1\naiohttp==3.8.5\naiosignal==1.3.1\nalpaca-py==0.13.4\naniso8601==9.0.1\nannotated-types==0.5.0\nanyio==3.7.1\n"
  },
  {
    "path": "sample.env",
    "chars": 879,
    "preview": "# Always Required\nOPENAI_API_KEY=\n\n# Required to enable Google Search tool\nGOOGLE_API_KEY=\nGOOGLE_CSE_ID=\n\n# Required to"
  },
  {
    "path": "scripts/bondai",
    "chars": 122,
    "preview": "#!/usr/bin/env python3\n\nfrom bondai.cli import run_cli\n\n\ndef main():\n    run_cli()\n\nif __name__ == '__main__':\n    main("
  },
  {
    "path": "setup.py",
    "chars": 1154,
    "preview": "from setuptools import setup, find_packages\n\n# If you have a requirements.txt, you can read it to set the install_requir"
  },
  {
    "path": "tests/api-client/test_api_client.py",
    "chars": 1991,
    "preview": "from termcolor import cprint\nfrom bondai.api import BondAIAPIClient\n\n# Create the client\nclient = BondAIAPIClient()\nclie"
  },
  {
    "path": "tests/conversational/hierarchical_conversation.py",
    "chars": 2055,
    "preview": "from bondai.models.openai import get_total_cost, OpenAILLM, OpenAIModelNames\nfrom bondai.tools.file import FileWriteTool"
  },
  {
    "path": "tests/conversational/single_agent.py",
    "chars": 798,
    "preview": "from bondai.models.openai import get_total_cost, OpenAILLM, OpenAIModelNames\nfrom bondai.tools.file import FileWriteTool"
  },
  {
    "path": "tests/debug/test_error.py",
    "chars": 2883,
    "preview": "from openai import OpenAI\nfrom bondai.models.openai import DefaultOpenAIConnectionParams\n\nparams = {\n    \"temperature\": "
  },
  {
    "path": "tests/getting-started/example-1.py",
    "chars": 864,
    "preview": "from bondai.agents import Agent, AgentEventNames\nfrom bondai.tools import PythonREPLTool\nfrom bondai.tools.search import"
  },
  {
    "path": "tests/memory/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tests/memory/single_agent_with_memory.py",
    "chars": 1817,
    "preview": "from bondai.agents import ConversationalAgent, AgentEventNames\nfrom bondai.models.openai import OpenAIEmbeddingModel\nfro"
  },
  {
    "path": "tests/memory/util.py",
    "chars": 1634,
    "preview": "import os\nimport PyPDF2\nimport docx\nfrom typing import List\nfrom bondai.util import split_text\nfrom bondai.models import"
  },
  {
    "path": "tests/vision/single_agent_with_vision.py",
    "chars": 306,
    "preview": "from bondai.tools.vision import ImageAnalysisTool\nfrom bondai.agents import Agent\n\nagent = Agent(tools=[ImageAnalysisToo"
  },
  {
    "path": "website/.gitignore",
    "chars": 233,
    "preview": "# Dependencies\n/node_modules\n\n# Production\n/build\n\n# Generated files\n.docusaurus\n.cache-loader\n\n# Misc\n.DS_Store\n.env.lo"
  },
  {
    "path": "website/README.md",
    "chars": 770,
    "preview": "# Website\n\nThis website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.\n\n### I"
  },
  {
    "path": "website/babel.config.js",
    "chars": 89,
    "preview": "module.exports = {\n  presets: [require.resolve('@docusaurus/core/lib/babel/preset')],\n};\n"
  },
  {
    "path": "website/docs/agent-memory/agent-memory.md",
    "chars": 1411,
    "preview": "---\nsidebar_position: 5\n---\n\n# Memory Management\n\nMemory Management in BondAI is inspired by the tiered memory approach "
  },
  {
    "path": "website/docs/agent-memory/archival-memory.md",
    "chars": 4171,
    "preview": "---\nsidebar_position: 3\n---\n\n# Archival Memory\n\nArchival Memory in BondAI, inspired by the [MemGPT paper](https://arxiv."
  },
  {
    "path": "website/docs/agent-memory/conversation-memory.md",
    "chars": 4037,
    "preview": "---\nsidebar_position: 2\n---\n\n# Conversation Memory\n\nConversation Memory in BondAI, inspired by the [MemGPT paper](https:"
  },
  {
    "path": "website/docs/agent-memory/core-memory.md",
    "chars": 4216,
    "preview": "---\nsidebar_position: 1\n---\n\n# Core Memory\n\nCore Memory in BondAI, inspired by the [MemGPT paper](https://arxiv.org/pdf/"
  },
  {
    "path": "website/docs/agent-memory/memory-manager.md",
    "chars": 2109,
    "preview": "---\nsidebar_position: 4\n---\n\n# MemoryManager\n\nThe MemoryManager class in BondAI is designed to orchestrate the memory ma"
  },
  {
    "path": "website/docs/agents/agents.md",
    "chars": 1340,
    "preview": "---\nsidebar_position: 3\n---\n\n# Agents in BondAI\n\nAgents in BondAI, encompassing both [ConversationalAgent](./conversatio"
  },
  {
    "path": "website/docs/agents/conversational-agent.md",
    "chars": 4700,
    "preview": "---\nsidebar_position: 2\n---\n\n# Conversational Agents\n\n\n\n# ConversationalAgent\n**bondai.agents.ConversationalAgent**\n\nThe"
  },
  {
    "path": "website/docs/agents/react-agent.md",
    "chars": 5980,
    "preview": "---\nsidebar_position: 1\n---\n\n# ReAct Agents\n\nReAct Agents in BondAI are based on research findings in the [ReAct: Synerg"
  },
  {
    "path": "website/docs/api-spec/_category_.json",
    "chars": 99,
    "preview": "{\n  \"label\": \"API Specification\",\n  \"position\": 7,\n  \"link\": {\n    \"type\": \"generated-index\"\n  }\n}\n"
  },
  {
    "path": "website/docs/api-spec/add-agent-tool.md",
    "chars": 346,
    "preview": "---\nsidebar_position: 8\n---\n\n# Add Agent Tool\n\n`POST /agents/<agent_id>/tools`\n\nThis API adds a tool to the Agent so it "
  },
  {
    "path": "website/docs/api-spec/api-client.md",
    "chars": 2142,
    "preview": "---\nsidebar_position: 11\n---\n\n# Python API Client\n\nBondAI comes with a Python API client that can be used to communicate"
  },
  {
    "path": "website/docs/api-spec/create-agent.md",
    "chars": 2219,
    "preview": "---\nsidebar_position: 2\n---\n\n# Create Agent\n\n`POST /agents`\n\nThis API will create a Conversational Agent.\n\n**Response Bo"
  },
  {
    "path": "website/docs/api-spec/get-agent.md",
    "chars": 3488,
    "preview": "---\nsidebar_position: 5\n---\n\n# Get Agent State\n\n`GET /agents/<agent_id>`\n\nThis API returns the current state of an Agent"
  },
  {
    "path": "website/docs/api-spec/get-tools.md",
    "chars": 2087,
    "preview": "---\nsidebar_position: 7\n---\n\n# Get Tools\n\n`GET /tools`\n\nThis API returns the list of all of tools that BondAI has loaded"
  },
  {
    "path": "website/docs/api-spec/getting-started.md",
    "chars": 294,
    "preview": "---\nsidebar_position: 1\n---\n\n# Getting Started\n\nBondAI comes with a build in RESTful/WebSocket API server. You can start"
  },
  {
    "path": "website/docs/api-spec/list-agents.md",
    "chars": 3802,
    "preview": "---\nsidebar_position: 4\n---\n\n# List Agents\n\n`GET /agents`\n\nThis API returns a list of all active Agents.\n\n**Response Bod"
  },
  {
    "path": "website/docs/api-spec/remove-agent-tool.md",
    "chars": 218,
    "preview": "---\nsidebar_position: 9\n---\n\n# Remove Agent Tool\n\n`DELETE /agents/<agent_id>/tools/<tool_name>`\n\nThis API removes a tool"
  },
  {
    "path": "website/docs/api-spec/send-message.md",
    "chars": 284,
    "preview": "---\nsidebar_position: 3\n---\n\n# Send Message\n\n`POST /agents/<agent_id>/messages`\n\nThis API will send a message to an agen"
  },
  {
    "path": "website/docs/api-spec/stop-agent.md",
    "chars": 197,
    "preview": "---\nsidebar_position: 6\n---\n\n# Stop Agent\n\n`POST /agents/<agent_id>/stop`\n\nThis API will forcibly stop the specified Age"
  },
  {
    "path": "website/docs/api-spec/ws-events.md",
    "chars": 4989,
    "preview": "---\nsidebar_position: 10\n---\n\n# WebSocket Events\n\nThe BondAI API server will host a WebSocket endpoint on the specified "
  },
  {
    "path": "website/docs/azure.md",
    "chars": 2096,
    "preview": "---\nsidebar_position: 11\n---\n\n# Azure OpenAI Services\n\nBondAI has support for Azure OpenAI Services for all GPT-N models"
  },
  {
    "path": "website/docs/cli.md",
    "chars": 5867,
    "preview": "---\nsidebar_position: 9\n---\n\nimport googleLogo from './img/google-logo.png'\nimport alpacaMarketsLogo from './img/alpaca-"
  },
  {
    "path": "website/docs/docker.md",
    "chars": 1366,
    "preview": "---\nsidebar_position: 10\n---\n\n# Using Docker\n\n## BondAI Docker Image\n\nBondAI Docker images are available on [DockerHub h"
  },
  {
    "path": "website/docs/examples/_category_.json",
    "chars": 90,
    "preview": "{\n  \"label\": \"Examples\",\n  \"position\": 8,\n  \"link\": {\n    \"type\": \"generated-index\"\n  }\n}\n"
  },
  {
    "path": "website/docs/examples/api-client.md",
    "chars": 2166,
    "preview": "---\nsidebar_position: 4\n---\n\n# API Client\n\nThis example demonstrates how to use the BondAIAPIClient to communicate with "
  },
  {
    "path": "website/docs/examples/code-interpreter.md",
    "chars": 2903,
    "preview": "---\nsidebar_position: 3\n---\n\nimport gdpChart from './img/us_gdp_2000_2010.png'\n\n# Code Interpreter\n\nIn this example we d"
  },
  {
    "path": "website/docs/examples/home-automation.md",
    "chars": 10223,
    "preview": "---\nsidebar_position: 2\n---\n\n# Home Automation\n\nIn this example we demonstrate how BondAI is able to use the *ShellTool*"
  },
  {
    "path": "website/docs/examples/investor-agent.md",
    "chars": 2061,
    "preview": "---\nsidebar_position: 1\n---\n\n# Investor Agent\n\nIn this example we demonstrate BondAI's ability to use the Alpaca Market "
  },
  {
    "path": "website/docs/examples/online-research/metformin-research.md",
    "chars": 2926,
    "preview": "# Example Output\n\n*This output was produced by the [BondAI online research agent](../online-research/)*\n\n\n# Metformin an"
  },
  {
    "path": "website/docs/examples/online-research/online-research.md",
    "chars": 6411,
    "preview": "---\nsidebar_position: 4\n---\n\n# Online Research Agent\n\n**[View Agent Output](./metformin-research)**\n\nIn this example we "
  },
  {
    "path": "website/docs/getting-started.md",
    "chars": 3638,
    "preview": "---\nsidebar_position: 2\n---\n\n# Getting Started\n\nThere are 3 ways to use BondAI:\n\n1) 🛠️ **Command Line Interface (CLI)** "
  },
  {
    "path": "website/docs/intro.md",
    "chars": 3983,
    "preview": "---\nsidebar_position: 1\n---\n\nimport bondaiLogo from './img/bondai-logo.png'\nimport googleLogo from './img/google-logo.pn"
  },
  {
    "path": "website/docs/multi-agent-systems/examples.md",
    "chars": 5387,
    "preview": "---\nsidebar_position: 1\n---\n\n# Multi-Agent Architectures\n\n## Example 1: Flat Multi-Agent Architecture\n\nIn this example a"
  },
  {
    "path": "website/docs/multi-agent-systems/group-conversation.md",
    "chars": 2852,
    "preview": "---\nsidebar_position: 2\n---\n\n\n# GroupConversation\n\nThe GroupConversation class in BondAI facilitates the creation and ma"
  },
  {
    "path": "website/docs/multi-agent-systems/multi-agent-systems.md",
    "chars": 1834,
    "preview": "---\nsidebar_position: 4\n---\n\n# Multi-Agent Systems\n\nMulti-Agent Systems (MAS) in BondAI represent a sophisticated approa"
  },
  {
    "path": "website/docs/multi-agent-systems/team-conversation-config.md",
    "chars": 1281,
    "preview": "---\nsidebar_position: 3\n---\n\n\n# TeamConversationConfig\n\nThe TeamConversationConfig class in BondAI structures the conver"
  },
  {
    "path": "website/docs/tools/_category_.json",
    "chars": 87,
    "preview": "{\n  \"label\": \"Tools\",\n  \"position\": 6,\n  \"link\": {\n    \"type\": \"generated-index\"\n  }\n}\n"
  },
  {
    "path": "website/docs/tools/custom-tool.md",
    "chars": 5528,
    "preview": "---\nsidebar_position: 2\n---\n\n# Building Custom Tools\n\nBy building your own custom tools you can give BondAI the power to"
  },
  {
    "path": "website/docs/tools/getting-started.md",
    "chars": 4715,
    "preview": "---\nsidebar_position: 1\n---\n\nimport bondaiLogo from '../img/bondai-logo.png'\nimport googleLogo from '../img/google-logo."
  },
  {
    "path": "website/docusaurus.config.js",
    "chars": 3075,
    "preview": "// @ts-check\n// Note: type annotations allow type checking and IDEs autocompletion\n\nconst lightCodeTheme = require('pris"
  },
  {
    "path": "website/package.json",
    "chars": 1030,
    "preview": "{\n  \"name\": \"bondai-docs\",\n  \"version\": \"0.0.0\",\n  \"private\": true,\n  \"scripts\": {\n    \"docusaurus\": \"docusaurus\",\n    \""
  },
  {
    "path": "website/sidebars.js",
    "chars": 781,
    "preview": "/**\n * Creating a sidebar enables you to:\n - create an ordered group of docs\n - render a sidebar for each doc of that gr"
  },
  {
    "path": "website/src/components/HomepageFeatures/index.js",
    "chars": 1692,
    "preview": "import React from 'react';\nimport clsx from 'clsx';\nimport styles from './styles.module.css';\n\nconst FeatureList = [\n  {"
  },
  {
    "path": "website/src/components/HomepageFeatures/styles.module.css",
    "chars": 138,
    "preview": ".features {\n  display: flex;\n  align-items: center;\n  padding: 2rem 0;\n  width: 100%;\n}\n\n.featureSvg {\n  height: 200px;\n"
  },
  {
    "path": "website/src/css/custom.css",
    "chars": 1042,
    "preview": "/**\n * Any CSS included here will be global. The classic template\n * bundles Infima by default. Infima is a CSS framewor"
  },
  {
    "path": "website/src/pages/index.js",
    "chars": 1176,
    "preview": "import React, { useEffect } from 'react';\nimport { useHistory } from 'react-router-dom';\nimport clsx from 'clsx';\nimport"
  },
  {
    "path": "website/src/pages/index.module.css",
    "chars": 365,
    "preview": "/**\n * CSS files with the .module.css suffix will be treated as CSS modules\n * and scoped locally.\n */\n\n.heroBanner {\n  "
  },
  {
    "path": "website/src/pages/markdown-page.md",
    "chars": 118,
    "preview": "---\ntitle: Markdown page example\n---\n\n# Markdown page example\n\nYou don't need React to write simple standalone pages.\n"
  },
  {
    "path": "website/static/.nojekyll",
    "chars": 0,
    "preview": ""
  }
]

About this extraction

This page contains the full source code of the krohling/bondai GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 186 files (410.8 KB), approximately 98.1k tokens, and a symbol index with 537 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!