Showing preview only (994K chars total). Download the full file or copy to clipboard to get everything.
Repository: RealHossie/Auto-GPT-Chinese
Branch: stable
Commit: f5c0338759b1
Files: 222
Total size: 925.1 KB
Directory structure:
gitextract_3dkehp_f/
├── .coveragerc
├── .devcontainer/
│ ├── Dockerfile
│ ├── devcontainer.json
│ └── docker-compose.yml
├── .dockerignore
├── .envrc
├── .flake8
├── .gitattributes
├── .github/
│ ├── FUNDING.yml
│ ├── ISSUE_TEMPLATE/
│ │ ├── 1.bug.yml
│ │ └── 2.feature.yml
│ ├── PULL_REQUEST_TEMPLATE.md
│ └── workflows/
│ ├── benchmarks.yml
│ ├── ci.yml
│ ├── docker-cache-clean.yml
│ ├── docker-ci.yml
│ ├── docker-release.yml
│ ├── documentation-release.yml
│ ├── pr-label.yml
│ ├── scripts/
│ │ ├── docker-ci-summary.sh
│ │ └── docker-release-summary.sh
│ └── sponsors_readme.yml
├── .gitignore
├── .isort.cfg
├── .pre-commit-config.yaml
├── .sourcery.yaml
├── BULLETIN.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── README.md
├── autogpt/
│ ├── __init__.py
│ ├── __main__.py
│ ├── agent/
│ │ ├── __init__.py
│ │ ├── agent.py
│ │ └── agent_manager.py
│ ├── app.py
│ ├── cli.py
│ ├── commands/
│ │ ├── __init__.py
│ │ ├── analyze_code.py
│ │ ├── audio_text.py
│ │ ├── command.py
│ │ ├── execute_code.py
│ │ ├── file_operations.py
│ │ ├── git_operations.py
│ │ ├── google_search.py
│ │ ├── image_gen.py
│ │ ├── improve_code.py
│ │ ├── task_statuses.py
│ │ ├── times.py
│ │ ├── twitter.py
│ │ ├── web_playwright.py
│ │ ├── web_requests.py
│ │ ├── web_selenium.py
│ │ └── write_tests.py
│ ├── config/
│ │ ├── __init__.py
│ │ ├── ai_config.py
│ │ └── config.py
│ ├── configurator.py
│ ├── js/
│ │ └── overlay.js
│ ├── json_utils/
│ │ ├── __init__.py
│ │ ├── json_fix_general.py
│ │ ├── json_fix_llm.py
│ │ ├── llm_response_format_1.json
│ │ └── utilities.py
│ ├── llm/
│ │ ├── __init__.py
│ │ ├── api_manager.py
│ │ ├── base.py
│ │ ├── chat.py
│ │ ├── llm_utils.py
│ │ ├── modelsinfo.py
│ │ ├── providers/
│ │ │ ├── __init__.py
│ │ │ └── openai.py
│ │ └── token_counter.py
│ ├── log_cycle/
│ │ ├── __init__.py
│ │ ├── json_handler.py
│ │ └── log_cycle.py
│ ├── logs.py
│ ├── main.py
│ ├── memory/
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── local.py
│ │ ├── milvus.py
│ │ ├── no_memory.py
│ │ ├── pinecone.py
│ │ ├── redismem.py
│ │ └── weaviate.py
│ ├── memory_management/
│ │ ├── store_memory.py
│ │ └── summary_memory.py
│ ├── models/
│ │ └── base_open_ai_plugin.py
│ ├── plugins.py
│ ├── processing/
│ │ ├── __init__.py
│ │ ├── html.py
│ │ └── text.py
│ ├── prompts/
│ │ ├── __init__.py
│ │ ├── generator.py
│ │ └── prompt.py
│ ├── setup.py
│ ├── singleton.py
│ ├── speech/
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── brian.py
│ │ ├── eleven_labs.py
│ │ ├── gtts.py
│ │ ├── macos_tts.py
│ │ └── say.py
│ ├── spinner.py
│ ├── url_utils/
│ │ ├── __init__.py
│ │ └── validators.py
│ ├── utils.py
│ └── workspace/
│ ├── __init__.py
│ └── workspace.py
├── azure.yaml.template
├── benchmark/
│ ├── __init__.py
│ └── benchmark_entrepreneur_gpt_with_difficult_user.py
├── codecov.yml
├── data/
│ └── .keep
├── data_ingestion.py
├── docker-compose.yml
├── docs/
│ ├── challenges/
│ │ ├── beat.md
│ │ ├── challenge_template.md
│ │ ├── introduction.md
│ │ ├── list.md
│ │ ├── memory/
│ │ │ ├── challenge_a.md
│ │ │ ├── challenge_b.md
│ │ │ └── introduction.md
│ │ └── submit.md
│ ├── configuration/
│ │ ├── imagegen.md
│ │ ├── memory.md
│ │ ├── search.md
│ │ └── voice.md
│ ├── index.md
│ ├── plugins.md
│ ├── setup.md
│ ├── testing.md
│ └── usage.md
├── main.py
├── mkdocs.yml
├── pyproject.toml
├── requirements.txt
├── run.bat
├── run.sh
├── run_continuous.bat
├── run_continuous.sh
├── scripts/
│ ├── __init__.py
│ ├── check_requirements.py
│ └── install_plugin_deps.py
├── tests/
│ ├── __init__.py
│ ├── conftest.py
│ ├── context.py
│ ├── integration/
│ │ ├── __init__.py
│ │ ├── agent_factory.py
│ │ ├── agent_utils.py
│ │ ├── cassettes/
│ │ │ ├── test_llm_utils/
│ │ │ │ ├── test_get_ada_embedding.yaml
│ │ │ │ └── test_get_ada_embedding_large_context.yaml
│ │ │ ├── test_local_cache/
│ │ │ │ └── test_get_relevant.yaml
│ │ │ ├── test_memory_management/
│ │ │ │ └── test_save_memory_trimmed_from_context_window.yaml
│ │ │ └── test_setup/
│ │ │ ├── test_generate_aiconfig_automatic_default.yaml
│ │ │ ├── test_generate_aiconfig_automatic_fallback.yaml
│ │ │ └── test_generate_aiconfig_automatic_typical.yaml
│ │ ├── challenges/
│ │ │ ├── __init__.py
│ │ │ ├── conftest.py
│ │ │ ├── information_retrieval/
│ │ │ │ └── test_information_retrieval_challenge_a.py
│ │ │ ├── memory/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── cassettes/
│ │ │ │ │ ├── test_memory_challenge_a/
│ │ │ │ │ │ └── test_memory_challenge_a.yaml
│ │ │ │ │ └── test_memory_challenge_b/
│ │ │ │ │ └── test_memory_challenge_b.yaml
│ │ │ │ ├── test_memory_challenge_a.py
│ │ │ │ └── test_memory_challenge_b.py
│ │ │ └── utils.py
│ │ ├── conftest.py
│ │ ├── goal_oriented/
│ │ │ ├── __init__.py
│ │ │ ├── cassettes/
│ │ │ │ ├── test_browse_website/
│ │ │ │ │ └── test_browse_website.yaml
│ │ │ │ └── test_write_file/
│ │ │ │ └── test_write_file.yaml
│ │ │ ├── goal_oriented_tasks.md
│ │ │ ├── test_browse_website.py
│ │ │ └── test_write_file.py
│ │ ├── memory_tests.py
│ │ ├── milvus_memory_tests.py
│ │ ├── test_execute_code.py
│ │ ├── test_git_commands.py
│ │ ├── test_llm_utils.py
│ │ ├── test_local_cache.py
│ │ ├── test_memory_management.py
│ │ ├── test_setup.py
│ │ └── weaviate_memory_tests.py
│ ├── milvus_memory_test.py
│ ├── mocks/
│ │ ├── __init__.py
│ │ └── mock_commands.py
│ ├── test_agent.py
│ ├── test_agent_manager.py
│ ├── test_ai_config.py
│ ├── test_api_manager.py
│ ├── test_commands.py
│ ├── test_config.py
│ ├── test_image_gen.py
│ ├── test_logs.py
│ ├── test_prompt_generator.py
│ ├── test_token_counter.py
│ ├── test_utils.py
│ ├── test_workspace.py
│ ├── unit/
│ │ ├── __init__.py
│ │ ├── _test_json_parser.py
│ │ ├── models/
│ │ │ └── test_base_open_api_plugin.py
│ │ ├── test_browse_scrape_links.py
│ │ ├── test_browse_scrape_text.py
│ │ ├── test_chat.py
│ │ ├── test_commands.py
│ │ ├── test_file_operations.py
│ │ ├── test_get_self_feedback.py
│ │ ├── test_json_parser.py
│ │ ├── test_json_utils_llm.py
│ │ ├── test_llm_utils.py
│ │ ├── test_plugins.py
│ │ ├── test_spinner.py
│ │ ├── test_url_validation.py
│ │ └── test_web_selenium.py
│ ├── utils.py
│ └── vcr/
│ ├── __init__.py
│ ├── openai_filter.py
│ └── vcr_filter.py
└── tests.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .coveragerc
================================================
[run]
relative_files = true
================================================
FILE: .devcontainer/Dockerfile
================================================
# Use an official Python base image from the Docker Hub
FROM python:3.10
# Install browsers
RUN apt-get update && apt-get install -y \
chromium-driver firefox-esr \
ca-certificates
# Install utilities
RUN apt-get install -y curl jq wget git
# Declare working directory
WORKDIR /workspace/Auto-GPT
================================================
FILE: .devcontainer/devcontainer.json
================================================
{
"dockerComposeFile": "./docker-compose.yml",
"service": "auto-gpt",
"workspaceFolder": "/workspace/Auto-GPT",
"shutdownAction": "stopCompose",
"features": {
"ghcr.io/devcontainers/features/common-utils:2": {
"installZsh": "true",
"username": "vscode",
"userUid": "6942",
"userGid": "6942",
"upgradePackages": "true"
},
"ghcr.io/devcontainers/features/desktop-lite:1": {},
"ghcr.io/devcontainers/features/python:1": "none",
"ghcr.io/devcontainers/features/node:1": "none",
"ghcr.io/devcontainers/features/git:1": {
"version": "latest",
"ppa": "false"
}
},
// Configure tool-specific properties.
"customizations": {
// Configure properties specific to VS Code.
"vscode": {
// Set *default* container specific settings.json values on container create.
"settings": {
"python.defaultInterpreterPath": "/usr/local/bin/python"
}
}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "pip3 install --user -r requirements.txt",
// Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
"remoteUser": "vscode"
}
================================================
FILE: .devcontainer/docker-compose.yml
================================================
# To boot the app run the following:
# docker-compose run auto-gpt
version: '3.9'
services:
auto-gpt:
depends_on:
- redis
build:
dockerfile: .devcontainer/Dockerfile
context: ../
tty: true
environment:
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
REDIS_HOST: ${REDIS_HOST:-redis}
volumes:
- ../:/workspace/Auto-GPT
redis:
image: 'redis/redis-stack-server:latest'
================================================
FILE: .dockerignore
================================================
.*
*.template
*.yaml
*.yml
*.md
*.png
!BULLETIN.md
================================================
FILE: .envrc
================================================
# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.
# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use Auto-GPT.
[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt
================================================
FILE: .flake8
================================================
[flake8]
max-line-length = 88
select = "E303, W293, W291, W292, E305, E231, E302"
exclude =
.tox,
__pycache__,
*.pyc,
.env
venv*/*,
.venv/*,
reports/*,
dist/*,
================================================
FILE: .gitattributes
================================================
# Exclude VCR cassettes from stats
tests/**/cassettes/**.y*ml linguist-generated
# Mark documentation as such
docs/**.md linguist-documentation
================================================
FILE: .github/FUNDING.yml
================================================
# These are supported funding model platforms
github: Torantulino
================================================
FILE: .github/ISSUE_TEMPLATE/1.bug.yml
================================================
name: Bug report 🐛
description: Create a bug report for Auto-GPT.
labels: ['status: needs triage']
body:
- type: markdown
attributes:
value: |
### ⚠️ Before you continue
* Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
* If you need help, you can ask in the [discussions] section or in [#tech-support]
* **Throughly search the [existing issues] before creating a new one**
* Read our [wiki page on Contributing]
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
[discord]: https://discord.gg/autogpt
[discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
[existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
[wiki page on Contributing]: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
- type: checkboxes
attributes:
label: ⚠️ Search for existing issues first ⚠️
description: >
Please [search the history](https://github.com/Torantulino/Auto-GPT/issues)
to see if an issue already exists for the same problem.
options:
- label: I have searched the existing issues, and there is no existing issue for my problem
required: true
- type: markdown
attributes:
value: |
Please provide a searchable summary of the issue in the title above ⬆️.
⚠️ SUPER-busy repo, please help the volunteer maintainers.
The less time we spend here, the more time we spend building AutoGPT.
Please help us help you:
- Does it work on `stable` branch (https://github.com/Torantulino/Auto-GPT/tree/stable)?
- Does it work on current `master` (https://github.com/Torantulino/Auto-GPT/tree/master)?
- Search for existing issues, "add comment" is tidier than "new issue"
- Ask on our Discord (https://discord.gg/autogpt)
- Provide relevant info:
- Provide commit-hash (`git rev-parse HEAD` gets it)
- If it's a pip/packages issue, provide pip version, python version
- If it's a crash, provide traceback.
- type: dropdown
attributes:
label: Which Operating System are you using?
description: >
Please select the operating system you were using to run Auto-GPT when this problem occurred.
options:
- Windows
- Linux
- MacOS
- Docker
- Devcontainer / Codespace
- Windows Subsystem for Linux (WSL)
- Other (Please specify in your problem)
validations:
required: true
- type: dropdown
attributes:
label: Which version of Auto-GPT are you using?
description: |
Please select which version of Auto-GPT you were using when this issue occurred.
If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/Auto-GPT/releases/) make sure you were using the latest code.
**If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/Auto-GPT/releases/)**.
If installed with git you can run `git branch` to see which version of Auto-GPT you are running.
options:
- Latest Release
- Stable (branch)
- Master (branch)
validations:
required: true
- type: dropdown
attributes:
label: GPT-3 or GPT-4?
description: >
If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
options:
- GPT-3.5
- GPT-4
validations:
required: true
- type: textarea
attributes:
label: Steps to reproduce 🕹
description: |
**⚠️ Issues that we can't reproduce will be closed.**
- type: textarea
attributes:
label: Current behavior 😯
description: Describe what happens instead of the expected behavior.
- type: textarea
attributes:
label: Expected behavior 🤔
description: Describe what should happen.
- type: textarea
attributes:
label: Your prompt 📝
description: >
If applicable please provide the prompt you are using. Your prompt is stored in your `ai_settings.yaml` file.
value: |
```yaml
# Paste your prompt here
```
- type: textarea
attributes:
label: Your Logs 📒
description: |
Please include the log showing your error and the command that caused it, if applicable.
You can copy it from your terminal or from `logs/activity.log`.
This will help us understand your issue better!
<details>
<summary><i>Example</i></summary>
```log
INFO NEXT ACTION: COMMAND = execute_shell ARGUMENTS = {'command_line': 'some_command'}
INFO -=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=
Traceback (most recent call last):
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 619, in _interpret_response
self._interpret_response_line(
File "/home/anaconda3/lib/python3.9/site-packages/openai/api_requestor.py", line 682, in _interpret_response_line
raise self.handle_error_response(
openai.error.InvalidRequestError: This model's maximum context length is 8191 tokens, however you requested 10982 tokens (10982 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.
```
</details>
value: |
```log
<insert your logs here>
```
================================================
FILE: .github/ISSUE_TEMPLATE/2.feature.yml
================================================
name: Feature request 🚀
description: Suggest a new idea for Auto-GPT!
labels: ['status: needs triage']
body:
- type: markdown
attributes:
value: |
First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing)
Please provide a searchable summary of the issue in the title above ⬆️.
- type: checkboxes
attributes:
label: Duplicates
description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem.
options:
- label: I have searched the existing issues
required: true
- type: textarea
attributes:
label: Summary 💡
description: Describe how it should work.
- type: textarea
attributes:
label: Examples 🌈
description: Provide a link to other implementations, or screenshots of the expected behavior.
- type: textarea
attributes:
label: Motivation 🔦
description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
================================================
FILE: .github/PULL_REQUEST_TEMPLATE.md
================================================
<!-- ⚠️ At the moment any non-essential commands are not being merged.
If you want to add non-essential commands to Auto-GPT, please create a plugin instead.
We are expecting to ship plugin support within the week (PR #757).
Resources:
* https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template
-->
<!-- 📢 Announcement
We've recently noticed an increase in pull requests focusing on combining multiple changes. While the intentions behind these PRs are appreciated, it's essential to maintain a clean and manageable git history. To ensure the quality of our repository, we kindly ask you to adhere to the following guidelines when submitting PRs:
Focus on a single, specific change.
Do not include any unrelated or "extra" modifications.
Provide clear documentation and explanations of the changes made.
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
Check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing)
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
### Background
<!-- Provide a concise overview of the rationale behind this change. Include relevant context, prior discussions, or links to related issues. Ensure that the change aligns with the project's overall direction. -->
### Changes
<!-- Describe the specific, focused change made in this pull request. Detail the modifications clearly and avoid any unrelated or "extra" changes. -->
### Documentation
<!-- Explain how your changes are documented, such as in-code comments or external documentation. Ensure that the documentation is clear, concise, and easy to understand. -->
### Test Plan
<!-- Describe how you tested this functionality. Include steps to reproduce, relevant test cases, and any other pertinent information. -->
### PR Quality Checklist
- [ ] My pull request is atomic and focuses on a single change.
- [ ] I have thoroughly tested my changes with multiple different prompts.
- [ ] I have considered potential risks and mitigations for my changes.
- [ ] I have documented my changes clearly and comprehensively.
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guidelines. -->
================================================
FILE: .github/workflows/benchmarks.yml
================================================
name: Run Benchmarks
on:
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
env:
python-version: '3.10'
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Set up Python ${{ env.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ env.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: benchmark
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
python benchmark/benchmark_entrepreneur_gpt_with_undecisive_user.py
================================================
FILE: .github/workflows/ci.yml
================================================
name: Python CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master, stable ]
concurrency:
group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
jobs:
lint:
runs-on: ubuntu-latest
env:
min-python-version: "3.10"
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ env.min-python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Lint with flake8
run: flake8
- name: Check black formatting
run: black . --check
if: success() || failure()
- name: Check isort formatting
run: isort . --check
if: success() || failure()
test:
permissions:
# Gives the action the necessary permissions for publishing new
# comments in pull requests.
pull-requests: write
# Gives the action the necessary permissions for pushing data to the
# python-coverage-comment-action branch, and for editing existing
# comments (to avoid publishing multiple comments in the same PR)
contents: write
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.11"]
steps:
- name: Check out repository
uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run unittest tests with coverage
run: |
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
env:
CI: true
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
================================================
FILE: .github/workflows/docker-cache-clean.yml
================================================
name: Purge Docker CI cache
on:
schedule:
- cron: 20 4 * * 1,4
env:
BASE_BRANCH: master
IMAGE_NAME: auto-gpt
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- id: build
name: Build image
uses: docker/build-push-action@v3
with:
build-args: BUILD_TYPE=${{ matrix.build-type }}
load: true # save to docker images
# use GHA cache as read-only
cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.schedule }}
build_type: ${{ matrix.build-type }}
prod_branch: stable
dev_branch: master
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
push_forced_label:
new_commits_json: ${{ null }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true
================================================
FILE: .github/workflows/docker-ci.yml
================================================
name: Docker CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master, stable ]
concurrency:
group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
env:
IMAGE_NAME: auto-gpt
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
build-type: [release, dev]
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- if: runner.debug
run: |
ls -al
du -hs *
- id: build
name: Build image
uses: docker/build-push-action@v3
with:
build-args: BUILD_TYPE=${{ matrix.build-type }}
tags: ${{ env.IMAGE_NAME }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=docker-${{ matrix.build-type }}
cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
build_type: ${{ matrix.build-type }}
prod_branch: stable
dev_branch: master
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.event.after }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
new_commits_json: ${{ toJSON(github.event.commits) }}
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true
# Docker setup needs fixing before this is going to work: #1843
test:
runs-on: ubuntu-latest
needs: build
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- id: build
name: Build image
uses: docker/build-push-action@v3
with:
build-args: BUILD_TYPE=dev # include pytest
tags: ${{ env.IMAGE_NAME }}
load: true # save to docker images
# cache layers in GitHub Actions cache to speed up builds
cache-from: type=gha,scope=docker-dev
cache-to: type=gha,scope=docker-dev,mode=max
- id: test
name: Run tests
env:
CI: true
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
set +e
test_output=$(
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
)
test_failure=$?
echo "$test_output"
cat << $EOF >> $GITHUB_STEP_SUMMARY
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
\`\`\`
$test_output
\`\`\`
$EOF
================================================
FILE: .github/workflows/docker-release.yml
================================================
name: Docker Release
on:
release:
types: [ published, edited ]
workflow_dispatch:
inputs:
no_cache:
type: boolean
description: 'Build from scratch, without using cached layers'
env:
IMAGE_NAME: auto-gpt
DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Log in to Docker hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
# slashes are not allowed in image tags, but can appear in git branch or tag names
- id: sanitize_tag
name: Sanitize image tag
run: echo tag=${raw_tag//\//-} >> $GITHUB_OUTPUT
env:
raw_tag: ${{ github.ref_name }}
- id: build
name: Build image
uses: docker/build-push-action@v3
with:
build-args: BUILD_TYPE=release
load: true # save to docker images
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
tags: >
${{ env.IMAGE_NAME }},
${{ env.DEPLOY_IMAGE_NAME }}:latest,
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
# cache layers in GitHub Actions cache to speed up builds
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=docker-release
cache-to: type=gha,scope=docker-release,mode=max
- name: Push image to Docker Hub
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
- name: Generate build report
env:
event_name: ${{ github.event_name }}
event_ref: ${{ github.event.ref }}
event_ref_type: ${{ github.event.ref}}
inputs_no_cache: ${{ inputs.no_cache }}
prod_branch: stable
dev_branch: master
repository: ${{ github.repository }}
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
ref_type: ${{ github.ref_type }}
current_ref: ${{ github.ref_name }}
commit_hash: ${{ github.sha }}
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
github_context_json: ${{ toJSON(github) }}
job_env_json: ${{ toJSON(env) }}
vars_json: ${{ toJSON(vars) }}
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true
================================================
FILE: .github/workflows/documentation-release.yml
================================================
name: Docs
on:
push:
branches: [ stable ]
paths:
- 'docs/**'
- 'mkdocs.yml'
- '.github/workflows/documentation.yml'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
permissions:
contents: write
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Set up Python 3
uses: actions/setup-python@v4
with:
python-version: 3.x
- name: Set up workflow cache
uses: actions/cache@v3
with:
key: ${{ github.ref }}
path: .cache
- run: pip install mkdocs-material
- run: mkdocs gh-deploy --force
================================================
FILE: .github/workflows/pr-label.yml
================================================
name: "Pull Request auto-label"
on:
# So that PRs touching the same files as the push are updated
push:
branches: [ master ]
# So that the `dirtyLabel` is removed if conflicts are resolve
# We recommend `pull_request_target` so that github secrets are available.
# In `pull_request` we wouldn't be able to change labels of fork PRs
pull_request_target:
types: [ opened, synchronize ]
concurrency:
group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }}
cancel-in-progress: true
jobs:
conflicts:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps:
- name: Update PRs with conflict labels
uses: eps1lon/actions-label-merge-conflict@releases/2.x
with:
dirtyLabel: "conflicts"
#removeOnDirtyLabel: "PR: ready to ship"
repoToken: "${{ secrets.GITHUB_TOKEN }}"
commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request."
commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly."
size:
if: ${{ github.event_name == 'pull_request_target' }}
permissions:
issues: write
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: codelytv/pr-size-labeler@v1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
xs_label: 'size/xs'
xs_max_size: 2
s_label: 'size/s'
s_max_size: 10
m_label: 'size/m'
m_max_size: 50
l_label: 'size/l'
l_max_size: 200
xl_label: 'size/xl'
message_if_xl: >
This PR exceeds the recommended size of 200 lines.
Please make sure you are NOT addressing multiple issues with one PR.
Note this PR might be rejected due to its size
================================================
FILE: .github/workflows/scripts/docker-ci-summary.sh
================================================
#!/bin/bash
meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
head_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$current_ref/" <<< $compare_url_template)
ref_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$commit_hash/" <<< $compare_url_template)
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
cat << $EOF
# Docker Build summary 🔨
**Source:** branch \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
**Build type:** \`$build_type\`
**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
## Image details
**Tags:**
$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
<details>
<summary><h3>Layers</h3></summary>
| Age | Size | Created by instruction |
| --------- | ------ | ---------------------- |
$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
| grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
| cut -f-3 `# yeet Comment column`\
| sed 's/ ago//' `# fix Layer age`\
| sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
| sed 's/\$/\\$/g' `# escape variable and shell expansions`\
| sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
| column -t -s$'\t' -o' | ' `# align columns and add separator`\
| sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
</details>
<details>
<summary><h3>ENV</h3></summary>
| Variable | Value |
| -------- | -------- |
$(jq -r \
'.Config.Env
| map(
split("=")
| "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
)
| map("| \(.) |")
| .[]' <<< $meta
)
</details>
<details>
<summary>Raw metadata</summary>
\`\`\`JSON
$meta
\`\`\`
</details>
## Build details
**Build trigger:** $push_forced_label $event_name \`$event_ref\`
<details>
<summary><code>github</code> context</summary>
\`\`\`JSON
$github_context_json
\`\`\`
</details>
### Source
**HEAD:** [$repository@\`${commit_hash:0:7}\`]($source_url) on branch [$current_ref]($ref_compare_url)
**Diff with previous HEAD:** $head_compare_url
#### New commits
$(jq -r 'map([
"**Commit [`\(.id[0:7])`](\(.url)) by \(if .author.username then "@"+.author.username else .author.name end):**",
.message,
(if .committer.name != .author.name then "\n> <sub>**Committer:** \(.committer.name) <\(.committer.email)></sub>" else "" end),
"<sub>**Timestamp:** \(.timestamp)</sub>"
] | map("> \(.)\n") | join("")) | join("\n")' <<< $new_commits_json)
### Job environment
#### \`vars\` context:
\`\`\`JSON
$vars_json
\`\`\`
#### \`env\` context:
\`\`\`JSON
$job_env_json
\`\`\`
$EOF
================================================
FILE: .github/workflows/scripts/docker-release-summary.sh
================================================
#!/bin/bash
meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
cat << $EOF
# Docker Release Build summary 🚀🔨
**Source:** $ref_type \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
**Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
## Image details
**Tags:**
$(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
<details>
<summary><h3>Layers</h3></summary>
| Age | Size | Created by instruction |
| --------- | ------ | ---------------------- |
$(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
| grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
| cut -f-3 `# yeet Comment column`\
| sed 's/ ago//' `# fix Layer age`\
| sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
| sed 's/\$/\\$/g' `# escape variable and shell expansions`\
| sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
| column -t -s$'\t' -o' | ' `# align columns and add separator`\
| sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
</details>
<details>
<summary><h3>ENV</h3></summary>
| Variable | Value |
| -------- | -------- |
$(jq -r \
'.Config.Env
| map(
split("=")
| "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
)
| map("| \(.) |")
| .[]' <<< $meta
)
</details>
<details>
<summary>Raw metadata</summary>
\`\`\`JSON
$meta
\`\`\`
</details>
## Build details
**Build trigger:** $event_name \`$current_ref\`
| Parameter | Value |
| -------------- | ------------ |
| \`no_cache\` | \`$inputs_no_cache\` |
<details>
<summary><code>github</code> context</summary>
\`\`\`JSON
$github_context_json
\`\`\`
</details>
### Job environment
#### \`vars\` context:
\`\`\`JSON
$vars_json
\`\`\`
#### \`env\` context:
\`\`\`JSON
$job_env_json
\`\`\`
$EOF
================================================
FILE: .github/workflows/sponsors_readme.yml
================================================
name: Generate Sponsors README
on:
workflow_dispatch:
schedule:
- cron: '0 */12 * * *'
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout 🛎️
uses: actions/checkout@v3
- name: Generate Sponsors 💖
uses: JamesIves/github-sponsors-readme-action@v1
with:
token: ${{ secrets.README_UPDATER_PAT }}
file: 'README.md'
minimum: 2500
maximum: 99999
- name: Deploy to GitHub Pages 🚀
uses: JamesIves/github-pages-deploy-action@v4
with:
branch: master
folder: '.'
token: ${{ secrets.README_UPDATER_PAT }}
================================================
FILE: .gitignore
================================================
## Original ignores
autogpt/keys.py
autogpt/*json
autogpt/node_modules/
autogpt/__pycache__/keys.cpython-310.pyc
autogpt/auto_gpt_workspace
package-lock.json
*.pyc
auto_gpt_workspace/*
*.mpeg
.env
azure.yaml
ai_settings.yaml
last_run_ai_settings.yaml
.vscode
.idea/*
auto-gpt.json
log.txt
log-ingestion.txt
logs
*.log
*.mp3
mem.sqlite3
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
plugins/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
site/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.direnv/
.env
.venv
env/
venv*/
ENV/
env.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
llama-*
vicuna-*
# mac
.DS_Store
openai/
# news
CURRENT_BULLETIN.md
================================================
FILE: .isort.cfg
================================================
[settings]
profile = black
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
use_parentheses = true
ensure_newline_before_comments = true
line_length = 88
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
skip = .tox,__pycache__,*.pyc,venv*/*,reports,venv,env,node_modules,.env,.venv,dist
================================================
FILE: .pre-commit-config.yaml
================================================
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
args: ['--maxkb=500']
- id: check-byte-order-marker
- id: check-case-conflict
- id: check-merge-conflict
- id: check-symlinks
- id: debug-statements
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
language_version: python3.10
- repo: https://github.com/psf/black
rev: 23.3.0
hooks:
- id: black
language_version: python3.10
- repo: local
hooks:
- id: pytest-check
name: pytest-check
entry: pytest --cov=autogpt --without-integration --without-slow-integration
language: system
pass_filenames: false
always_run: true
================================================
FILE: .sourcery.yaml
================================================
# 🪄 This is your project's Sourcery configuration file.
# You can use it to get Sourcery working in the way you want, such as
# ignoring specific refactorings, skipping directories in your project,
# or writing custom rules.
# 📚 For a complete reference to this file, see the documentation at
# https://docs.sourcery.ai/Configuration/Project-Settings/
# This file was auto-generated by Sourcery on 2023-02-25 at 21:07.
version: '1' # The schema version of this config file
ignore: # A list of paths or files which Sourcery will ignore.
- .git
- venv
- .venv
- build
- dist
- env
- .env
- .tox
rule_settings:
enable:
- default
- gpsg
disable: [] # A list of rule IDs Sourcery will never suggest.
rule_types:
- refactoring
- suggestion
- comment
python_version: '3.10' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version.
# rules: # A list of custom rules Sourcery will include in its analysis.
# - id: no-print-statements
# description: Do not use print statements in the test directory.
# pattern: print(...)
# language: python
# replacement:
# condition:
# explanation:
# paths:
# include:
# - test
# exclude:
# - conftest.py
# tests: []
# tags: []
# rule_tags: {} # Additional rule tags.
# metrics:
# quality_threshold: 25.0
# github:
# labels: []
# ignore_labels:
# - sourcery-ignore
# request_review: author
# sourcery_branch: sourcery/{base_branch}
# clone_detection:
# min_lines: 3
# min_duplicates: 2
# identical_clones_only: false
# proxy:
# url:
# ssl_certs_file:
# no_ssl_verify: false
================================================
FILE: BULLETIN.md
================================================
# 持续更新中
Auto-GPT 0.3.0最大更新为自动定义AI名称,角色,任务等,也可以进入手动模式
对插件(Plugin)的支持也在逐渐强大,可玩性更强
我正在逐步修改汉化0.3.0稳定版本,如果你喜欢我的工作,请给我一个star,谢谢!关注我的频道www.youtube.com/@hossie
# 官方网站与文档站 📰📖
使用说明与最新信息 *https://agpt.co*
相关文档 *https://docs.agpt.co*
# 🚀 最新更新 🚀
1. 基础命令汉化
2. 将一些Prompt转化为中文更好理解内容
3. 已完成部分插件汉化
# 🚀 未来更新 🚀
1. 简单的插件汉化
2. 优化已有汉化内容
3. 跟踪官方更新
# ⚠️ `send_tweet` 已经被废弃, 0.4.0版本中将彻底删除 ⚠️
Twitter 功能将通过Plugin完成 [Plugin support 🔌]
================================================
FILE: CODE_OF_CONDUCT.md
================================================
# Code of Conduct for Auto-GPT
## 1. Purpose
The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
## 2. Scope
This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
## 3. Our Standards
We encourage the following behavior:
* Being respectful and considerate to others
* Actively seeking diverse perspectives
* Providing constructive feedback and assistance
* Demonstrating empathy and understanding
We discourage the following behavior:
* Harassment or discrimination of any kind
* Disrespectful, offensive, or inappropriate language or content
* Personal attacks or insults
* Unwarranted criticism or negativity
## 4. Reporting and Enforcement
If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary.
Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations.
## 5. Acknowledgements
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html).
## 6. Contact
If you have any questions or concerns, please contact the project maintainers.
================================================
FILE: CONTRIBUTING.md
================================================
This document now lives at https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
================================================
FILE: Dockerfile
================================================
# 'dev' or 'release' container build
ARG BUILD_TYPE=dev
# Use an official Python base image from the Docker Hub
FROM python:3.10-slim AS autogpt-base
# Install browsers
RUN apt-get update && apt-get install -y \
chromium-driver firefox-esr \
ca-certificates
# Install utilities
RUN apt-get install -y curl jq wget git
# Set environment variables
ENV PIP_NO_CACHE_DIR=yes \
PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1
# Install the required python packages globally
ENV PATH="$PATH:/root/.local/bin"
COPY requirements.txt .
# Set the entrypoint
ENTRYPOINT ["python", "-m", "autogpt"]
# dev build -> include everything
FROM autogpt-base as autogpt-dev
RUN pip install --no-cache-dir -r requirements.txt
WORKDIR /app
ONBUILD COPY . ./
# release build -> include bare minimum
FROM autogpt-base as autogpt-release
RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
pip install --no-cache-dir -r requirements.txt
WORKDIR /app
ONBUILD COPY autogpt/ ./autogpt
ONBUILD COPY scripts/ ./scripts
FROM autogpt-${BUILD_TYPE} AS auto-gpt
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2023 Toran Bruce Richards
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
# Auto-GPT中文版
### Youtube频道:https://www.youtube.com/@Hossie
##
<hr/>
<h2 align="center"> 💖 大家好 💖</h2>
<p align="center">
这个项目是基于Auto-GPT原作者的0.3.0Stable版本,应大家要求,我快速制作了中文的汉化版本,基本运行顺利,还有些小瑕疵,会不断更新修改,欢迎大家关注,还请大家多支持!下面的内容就不过多翻译了,保持与原作者一致!
如果大家希望支持我,还请给我点个小星星,谢谢!也可以通过微信与我们交流
<p align="center">
<a href="https://raw.githubusercontent.com/RealHossie/Auto-GPT-Chinese/master/docs/imgs/wechat.png">
<img src="https://raw.githubusercontent.com/RealHossie/Auto-GPT-Chinese/master/docs/imgs/wechat.png" alt="微信">
</a>
</p>
</div>
</br>
</br>
<p align="center">
<a href="https://star-history.com/#Realhossie/auto-gpt-chinese&Date">
<img src="https://api.star-history.com/svg?repos=Realhossie/auto-gpt-chinese&type=Date" alt="Star History Chart">
</a>
</p>
## 🚀 Features
- 🌐 Internet access for searches and information gathering
- 💾 Long-term and short-term memory management
- 🧠 GPT-4 instances for text generation
- 🔗 Access to popular websites and platforms
- 🗃️ File storage and summarization with GPT-3.5
- 🔌 Extensibility with Plugins
## Quickstart
1. Get an OpenAI [API Key](https://platform.openai.com/account/api-keys)
2. Download the [latest release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest)
3. Follow the [installation instructions][docs/setup]
4. Configure any additional features you want, or install some [plugins][docs/plugins]
5. [Run][docs/usage] the app
Please see the [documentation][docs] for full setup instructions and configuration options.
[docs]: https://docs.agpt.co/
## 📖 Documentation
* [⚙️ Setup][docs/setup]
* [💻 Usage][docs/usage]
* [🔌 Plugins][docs/plugins]
* Configuration
* [🔍 Web Search](https://docs.agpt.co/configuration/search/)
* [🧠 Memory](https://docs.agpt.co/configuration/memory/)
* [🗣️ Voice (TTS)](https://docs.agpt.co/configuration/voice/)
* [🖼️ Image Generation](https://docs.agpt.co/configuration/imagegen/)
[docs/setup]: https://docs.agpt.co/setup/
[docs/usage]: https://docs.agpt.co/usage/
[docs/plugins]: https://docs.agpt.co/plugins/
================================================
FILE: autogpt/__init__.py
================================================
import os
import random
import sys
from dotenv import load_dotenv
if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"):
print("Setting random seed to 42")
random.seed(42)
# Load the users .env file into environment variables
load_dotenv(verbose=True, override=True)
del load_dotenv
================================================
FILE: autogpt/__main__.py
================================================
"""Auto-GPT: A GPT powered AI Assistant"""
import autogpt.cli
if __name__ == "__main__":
autogpt.cli.main()
================================================
FILE: autogpt/agent/__init__.py
================================================
from autogpt.agent.agent import Agent
from autogpt.agent.agent_manager import AgentManager
__all__ = ["Agent", "AgentManager"]
================================================
FILE: autogpt/agent/agent.py
================================================
from colorama import Fore, Style
from autogpt.app import execute_command, get_command
from autogpt.config import Config
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message
from autogpt.llm.token_counter import count_string_tokens
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.speech import say_text
from autogpt.spinner import Spinner
from autogpt.utils import clean_input
from autogpt.workspace import Workspace
class Agent:
"""Agent class for interacting with Auto-GPT.
Attributes:
ai_name: The name of the agent.
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
system_prompt: The system prompt is the initial prompt that defines everything
the AI needs to know to achieve its task successfully.
Currently, the dynamic and customizable information in the system prompt are
ai_name, description and goals.
triggering_prompt: The last sentence the AI will see before answering.
For Auto-GPT, this prompt is:
Determine which next command to use, and respond using the format specified
above:
The triggering prompt is not part of the system prompt because between the
system prompt and the triggering
prompt we have contextual information that can distract the AI and make it
forget that its goal is to find the next task to achieve.
SYSTEM PROMPT
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
TRIGGERING PROMPT
The triggering prompt reminds the AI about its short term meta task
(defining the next task)
"""
def __init__(
self,
ai_name,
memory,
full_message_history,
next_action_count,
command_registry,
config,
system_prompt,
triggering_prompt,
workspace_directory,
):
cfg = Config()
self.ai_name = ai_name
self.memory = memory
self.summary_memory = (
"我被生成完毕." # Initial memory necessary to avoid hilucination
)
self.last_memory_index = 0
self.full_message_history = full_message_history
self.next_action_count = next_action_count
self.command_registry = command_registry
self.config = config
self.system_prompt = system_prompt
self.triggering_prompt = triggering_prompt
self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace)
def start_interaction_loop(self):
# Interaction Loop
cfg = Config()
loop_count = 0
command_name = None
arguments = None
user_input = ""
while True:
# Discontinue if continuous limit is reached
loop_count += 1
if (
cfg.continuous_mode
and cfg.continuous_limit > 0
and loop_count > cfg.continuous_limit
):
logger.typewriter_log(
"持续模式次数已到达: ", Fore.YELLOW, f"{cfg.continuous_limit}"
)
break
# Send message to AI, get response
with Spinner("琢磨中... "):
assistant_reply = chat_with_ai(
self,
self.system_prompt,
self.triggering_prompt,
self.full_message_history,
self.memory,
cfg.fast_token_limit,
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
for plugin in cfg.plugins:
if not plugin.can_handle_post_planning():
continue
assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
# Print Assistant thoughts
if assistant_reply_json != {}:
validate_json(assistant_reply_json, LLM_DEFAULT_RESPONSE_FORMAT)
# Get command name and arguments
try:
print_assistant_thoughts(
self.ai_name, assistant_reply_json, cfg.speak_mode
)
command_name, arguments = get_command(assistant_reply_json)
if cfg.speak_mode:
say_text(f"我想要执行 {command_name}")
arguments = self._resolve_pathlike_command_args(arguments)
except Exception as e:
logger.error("Error: \n", str(e))
if not cfg.continuous_mode and self.next_action_count == 0:
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
self.user_input = ""
logger.typewriter_log(
"下一步: ",
Fore.CYAN,
f"命令 = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
f"参数 = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
)
logger.info(
"输入 'y' 授权执行命令, 'y -N' 执行N步持续模式, 's' 执行自我反馈命令 或"
"'n' 退出程序, 或直接输入反馈 "
f"{self.ai_name}..."
)
while True:
if cfg.chat_messages_enabled:
console_input = clean_input("等待你的反馈中...")
else:
console_input = clean_input(
Fore.MAGENTA + "输入:" + Style.RESET_ALL
)
if console_input.lower().strip() == cfg.authorise_key:
user_input = "生成下一个命令的JSON"
break
elif console_input.lower().strip() == "s":
logger.typewriter_log(
"-=-=-=-=-=-=-= 思考, 推理, 计划于反思将会被AI助手校验 -=-=-=-=-=-=-=",
Fore.GREEN,
"",
)
thoughts = assistant_reply_json.get("thoughts", {})
self_feedback_resp = self.get_self_feedback(
thoughts, cfg.fast_llm_model
)
logger.typewriter_log(
f"自我反馈: {self_feedback_resp}",
Fore.YELLOW,
"",
)
if self_feedback_resp[0].lower().strip() == cfg.authorise_key:
user_input = "生成下一个命令的JSON"
else:
user_input = self_feedback_resp
break
elif console_input.lower().strip() == "":
logger.warn("错误的输入格式.")
continue
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
try:
self.next_action_count = abs(
int(console_input.split(" ")[1])
)
user_input = "生成下一个命令的JSON"
except ValueError:
logger.warn(
"错误的输入格式. 请输入 'y -n' n 代表"
" 持续模式的步数."
)
continue
break
elif console_input.lower() == cfg.exit_key:
user_input = "EXIT"
break
else:
user_input = console_input
command_name = "human_feedback"
break
if user_input == "生成下一个命令的JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= 命令被用户批准执行 -=-=-=-=-=-=-=",
Fore.MAGENTA,
"",
)
elif user_input == "EXIT" or "exit" or "退出":
logger.info("Exiting...")
break
else:
# Print command
logger.typewriter_log(
"下一步: ",
Fore.CYAN,
f"命令 = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
f" 参数 = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
)
# Execute command
if command_name is not None and command_name.lower().startswith("error"):
result = (
f"Command {command_name} threw the following error: {arguments}"
)
elif command_name == "人类反馈":
result = f"人类反馈: {user_input}"
else:
for plugin in cfg.plugins:
if not plugin.can_handle_pre_command():
continue
command_name, arguments = plugin.pre_command(
command_name, arguments
)
command_result = execute_command(
self.command_registry,
command_name,
arguments,
self.config.prompt_generator,
)
result = f"Command {command_name} returned: " f"{command_result}"
result_tlength = count_string_tokens(
str(command_result), cfg.fast_llm_model
)
memory_tlength = count_string_tokens(
str(self.summary_memory), cfg.fast_llm_model
)
if result_tlength + memory_tlength + 600 > cfg.fast_token_limit:
result = f"Failure: command {command_name} returned too much output. \
Do not execute this command again with the same arguments."
for plugin in cfg.plugins:
if not plugin.can_handle_post_command():
continue
result = plugin.post_command(command_name, result)
if self.next_action_count > 0:
self.next_action_count -= 1
# Check if there's a result from the command append it to the message
# history
if result is not None:
self.full_message_history.append(create_chat_message("system", result))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else:
self.full_message_history.append(
create_chat_message("system", "无法执行命令")
)
logger.typewriter_log(
"SYSTEM: ", Fore.YELLOW, "无法执行命令"
)
def _resolve_pathlike_command_args(self, command_args):
if "directory" in command_args and command_args["directory"] in {"", "/"}:
command_args["directory"] = str(self.workspace.root)
else:
for pathlike in ["filename", "directory", "clone_path"]:
if pathlike in command_args:
command_args[pathlike] = str(
self.workspace.get_path(command_args[pathlike])
)
return command_args
def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
"""Generates a feedback response based on the provided thoughts dictionary.
This method takes in a dictionary of thoughts containing keys such as "reasoning",
"plan", "thoughts", and "criticism". It combines these elements into a single
feedback message and uses the create_chat_completion() function to generate a
response based on the input message.
Args:
thoughts (dict): A dictionary containing thought elements like reasoning,
plan, thoughts, and criticism.
Returns:
str: A feedback response generated using the provided thoughts dictionary.
"""
ai_role = self.config.ai_role
feedback_prompt = f"下面是来自我的消息,我是一个AI助手,角色为: {ai_role}. 请评估提供的思考,推理,计划与反思. 如果这些内容可以准确的完成该角色的任务,请回复字母'Y'后面加上一个空格,然后请解释为什么是最优方案. 日过这些信息无法实现该角色的目标, 请提供一句或多句解释一下具体的问题与建议的解决方案."
reasoning = thoughts.get("推理", "")
plan = thoughts.get("计划", "")
thought = thoughts.get("思考", "")
criticism = thoughts.get("反思", "")
feedback_thoughts = thought + reasoning + plan + criticism
return create_chat_completion(
[{"role": "user", "content": feedback_prompt + feedback_thoughts}],
llm_model,
)
================================================
FILE: autogpt/agent/agent_manager.py
================================================
"""Agent manager for managing GPT agents"""
from __future__ import annotations
from typing import List
from autogpt.config.config import Config
from autogpt.llm import Message, create_chat_completion
from autogpt.singleton import Singleton
class AgentManager(metaclass=Singleton):
"""Agent manager for managing GPT agents"""
def __init__(self):
self.next_key = 0
self.agents = {} # key, (task, full_message_history, model)
self.cfg = Config()
# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
"""Create a new agent and return its key
Args:
task: The task to perform
prompt: The prompt to use
model: The model to use
Returns:
The key of the new agent
"""
messages: List[Message] = [
{"role": "user", "content": prompt},
]
for plugin in self.cfg.plugins:
if not plugin.can_handle_pre_instruction():
continue
if plugin_messages := plugin.pre_instruction(messages):
messages.extend(iter(plugin_messages))
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,
)
messages.append({"role": "assistant", "content": agent_reply})
plugins_reply = ""
for i, plugin in enumerate(self.cfg.plugins):
if not plugin.can_handle_on_instruction():
continue
if plugin_result := plugin.on_instruction(messages):
sep = "\n" if i else ""
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
if plugins_reply and plugins_reply != "":
messages.append({"role": "assistant", "content": plugins_reply})
key = self.next_key
# This is done instead of len(agents) to make keys unique even if agents
# are deleted
self.next_key += 1
self.agents[key] = (task, messages, model)
for plugin in self.cfg.plugins:
if not plugin.can_handle_post_instruction():
continue
agent_reply = plugin.post_instruction(agent_reply)
return key, agent_reply
def message_agent(self, key: str | int, message: str) -> str:
"""Send a message to an agent and return its response
Args:
key: The key of the agent to message
message: The message to send to the agent
Returns:
The agent's response
"""
task, messages, model = self.agents[int(key)]
# Add user message to message history before sending to agent
messages.append({"role": "user", "content": message})
for plugin in self.cfg.plugins:
if not plugin.can_handle_pre_instruction():
continue
if plugin_messages := plugin.pre_instruction(messages):
for plugin_message in plugin_messages:
messages.append(plugin_message)
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,
)
messages.append({"role": "assistant", "content": agent_reply})
plugins_reply = agent_reply
for i, plugin in enumerate(self.cfg.plugins):
if not plugin.can_handle_on_instruction():
continue
if plugin_result := plugin.on_instruction(messages):
sep = "\n" if i else ""
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
# Update full message history
if plugins_reply and plugins_reply != "":
messages.append({"role": "assistant", "content": plugins_reply})
for plugin in self.cfg.plugins:
if not plugin.can_handle_post_instruction():
continue
agent_reply = plugin.post_instruction(agent_reply)
return agent_reply
def list_agents(self) -> list[tuple[str | int, str]]:
"""Return a list of all agents
Returns:
A list of tuples of the form (key, task)
"""
# Return a list of agent keys and their tasks
return [(key, task) for key, (task, _, _) in self.agents.items()]
def delete_agent(self, key: str | int) -> bool:
"""Delete an agent from the agent manager
Args:
key: The key of the agent to delete
Returns:
True if successful, False otherwise
"""
try:
del self.agents[int(key)]
return True
except KeyError:
return False
================================================
FILE: autogpt/app.py
================================================
""" Command and Control """
import json
from typing import Dict, List, NoReturn, Union
from autogpt.agent.agent_manager import AgentManager
from autogpt.commands.command import CommandRegistry, command
from autogpt.commands.web_requests import scrape_links, scrape_text
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.memory import get_memory
from autogpt.processing.text import summarize_text
from autogpt.prompts.generator import PromptGenerator
from autogpt.speech import say_text
from autogpt.url_utils.validators import validate_url
CFG = Config()
AGENT_MANAGER = AgentManager()
def is_valid_int(value: str) -> bool:
"""Check if the value is a valid integer
Args:
value (str): The value to check
Returns:
bool: True if the value is a valid integer, False otherwise
"""
try:
int(value)
return True
except ValueError:
return False
def get_command(response_json: Dict):
"""Parse the response and return the command name and arguments
Args:
response_json (json): The response from the AI
Returns:
tuple: The command name and arguments
Raises:
json.decoder.JSONDecodeError: If the response is not valid JSON
Exception: If any other error occurs
"""
try:
if "command" not in response_json:
return "Error:", "缺少 'command' object in JSON"
if not isinstance(response_json, dict):
return "Error:", f"'response_json' object is not dictionary {response_json}"
command = response_json["command"]
if not isinstance(command, dict):
return "Error:", "'command' object is not a dictionary"
if "name" not in command:
return "Error:", "缺少 'name' field in 'command' object"
command_name = command["name"]
# Use an empty dictionary if 'args' field is not present in 'command' object
arguments = command.get("args", {})
return command_name, arguments
except json.decoder.JSONDecodeError:
return "Error:", "Invalid JSON"
# All other errors, return "Error: + error message"
except Exception as e:
return "Error:", str(e)
def map_command_synonyms(command_name: str):
"""Takes the original command name given by the AI, and checks if the
string matches a list of common/known hallucinations
"""
synonyms = [
("write_file", "write_to_file"),
("create_file", "write_to_file"),
("search", "google"),
]
for seen_command, actual_command_name in synonyms:
if command_name == seen_command:
return actual_command_name
return command_name
def execute_command(
command_registry: CommandRegistry,
command_name: str,
arguments,
prompt: PromptGenerator,
):
"""Execute the command and return the result
Args:
command_name (str): The name of the command to execute
arguments (dict): The arguments for the command
Returns:
str: The result of the command
"""
try:
cmd = command_registry.commands.get(command_name)
# If the command is found, call it with the provided arguments
if cmd:
return cmd(**arguments)
# TODO: Remove commands below after they are moved to the command registry.
command_name = map_command_synonyms(command_name.lower())
if command_name == "memory_add":
return get_memory(CFG).add(arguments["string"])
# TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python
# filepath, write your code to file and try again
else:
for command in prompt.commands:
if (
command_name == command["label"].lower()
or command_name == command["name"].lower()
):
return command["function"](**arguments)
return (
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
" list for available commands and only respond in the specified JSON"
" format."
)
except Exception as e:
return f"错误: {str(e)}"
@command(
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
)
@validate_url
def get_text_summary(url: str, question: str) -> str:
"""Return the results of a Google search
Args:
url (str): The url to scrape
question (str): The question to summarize the text for
Returns:
str: The summary of the text
"""
text = scrape_text(url)
summary = summarize_text(url, text, question)
return f""" "结果" : {summary}"""
@command("get_hyperlinks", "Get text summary", '"url": "<url>"')
@validate_url
def get_hyperlinks(url: str) -> Union[str, List[str]]:
"""Return the results of a Google search
Args:
url (str): The url to scrape
Returns:
str or list: The hyperlinks on the page
"""
return scrape_links(url)
@command(
"start_agent",
"启动GPT助手",
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
)
def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
"""Start an agent with a given name, task, and prompt
Args:
name (str): The name of the agent
task (str): The task of the agent
prompt (str): The prompt for the agent
model (str): The model to use for the agent
Returns:
str: The response of the agent
"""
# Remove underscores from name
voice_name = name.replace("_", " ")
first_message = f"""你是 {name}. 回答 with: "收到"."""
agent_intro = f"{voice_name} 在这儿呢, 听从领导指挥!"
# Create agent
if CFG.speak_mode:
say_text(agent_intro, 1)
key, ack = AGENT_MANAGER.create_agent(task, first_message, model)
if CFG.speak_mode:
say_text(f"你好 {voice_name}. 你的任务如下. {task}.")
# Assign task (prompt), get response
agent_response = AGENT_MANAGER.message_agent(key, prompt)
return f"Agent {name} 生成key {key}. 首次反馈: {agent_response}"
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
def message_agent(key: str, message: str) -> str:
"""Message an agent with a given key and message"""
# Check if the key is a valid integer
if is_valid_int(key):
agent_response = AGENT_MANAGER.message_agent(int(key), message)
else:
return "无效的key, 必须为数字."
# Speak response
if CFG.speak_mode:
say_text(agent_response, 1)
return agent_response
@command("list_agents", "List GPT Agents", "")
def list_agents() -> str:
"""List all agents
Returns:
str: A list of all agents
"""
return "List of agents:\n" + "\n".join(
[str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()]
)
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
def delete_agent(key: str) -> str:
"""Delete an agent with a given key
Args:
key (str): The key of the agent to delete
Returns:
str: A message indicating whether the agent was deleted or not
"""
result = AGENT_MANAGER.delete_agent(key)
return f"Agent {key} deleted." if result else f"Agent {key} does not exist."
================================================
FILE: autogpt/cli.py
================================================
"""Main script for the autogpt package."""
import click
@click.group(invoke_without_command=True)
@click.option("-c", "--continuous", is_flag=True, help="启动持续模式")
@click.option(
"--skip-reprompt",
"-y",
is_flag=True,
help="跳过重新输入命令环节",
)
@click.option(
"--ai-settings",
"-C",
help="指定使用哪个ai_settings.yaml文件, 同时自动跳过重新输入命令.",
)
@click.option(
"-l",
"--continuous-limit",
type=int,
help="定义持续模式中的持续次数",
)
@click.option("--speak", is_flag=True, help="开启语音模式")
@click.option("--debug", is_flag=True, help="开启Debug模式")
@click.option("--gpt3only", is_flag=True, help="开启GPT3.5模式")
@click.option("--gpt4only", is_flag=True, help="开启GPT4模式")
@click.option(
"--use-memory",
"-m",
"memory_type",
type=str,
help="定义使用那种记忆后台",
)
@click.option(
"-b",
"--browser-name",
help="指定使用哪个 Web 浏览器来使用 Selenium 抓取网络内容.",
)
@click.option(
"--allow-downloads",
is_flag=True,
help="危险: 允许Auto-GPT自动下载文件.",
)
@click.option(
"--skip-news",
is_flag=True,
help="指定是否在启动时不输出最新新闻.",
)
@click.option(
# TODO: this is a hidden option for now, necessary for integration testing.
# We should make this public once we're ready to roll out agent specific workspaces.
"--workspace-directory",
"-w",
type=click.Path(),
hidden=True,
)
@click.option(
"--install-plugin-deps",
is_flag=True,
help="为第三方插件安装外部依赖库.",
)
@click.pass_context
def main(
ctx: click.Context,
continuous: bool,
continuous_limit: int,
ai_settings: str,
skip_reprompt: bool,
speak: bool,
debug: bool,
gpt3only: bool,
gpt4only: bool,
memory_type: str,
browser_name: str,
allow_downloads: bool,
skip_news: bool,
workspace_directory: str,
install_plugin_deps: bool,
) -> None:
"""
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
Start an Auto-GPT assistant.
"""
# Put imports inside function to avoid importing everything when starting the CLI
from autogpt.main import run_auto_gpt
if ctx.invoked_subcommand is None:
run_auto_gpt(
continuous,
continuous_limit,
ai_settings,
skip_reprompt,
speak,
debug,
gpt3only,
gpt4only,
memory_type,
browser_name,
allow_downloads,
skip_news,
workspace_directory,
install_plugin_deps,
)
if __name__ == "__main__":
main()
================================================
FILE: autogpt/commands/__init__.py
================================================
================================================
FILE: autogpt/commands/analyze_code.py
================================================
"""Code evaluation module."""
from __future__ import annotations
from autogpt.commands.command import command
from autogpt.llm import call_ai_function
@command(
"analyze_code",
"分析代码",
'"code": "<full_code_string>"',
)
def analyze_code(code: str) -> list[str]:
"""
A function that takes in a string and returns a response from create chat
completion api call.
Parameters:
code (str): Code to be evaluated.
Returns:
A result string from create chat completion. A list of suggestions to
improve the code.
"""
function_string = "def analyze_code(code: str) -> list[str]:"
args = [code]
description_string = (
"分析给出的代码并列出一系列优化建议。"
)
return call_ai_function(function_string, args, description_string)
================================================
FILE: autogpt/commands/audio_text.py
================================================
"""Commands for converting audio to text."""
import json
import requests
from autogpt.commands.command import command
from autogpt.config import Config
CFG = Config()
@command(
"read_audio_from_file",
"转换音频至文本",
'"文件名": "<filename>"',
CFG.huggingface_audio_to_text_model,
"Configure huggingface_audio_to_text_model.",
)
def read_audio_from_file(filename: str) -> str:
"""
Convert audio to text.
Args:
filename (str): The path to the audio file
Returns:
str: The text from the audio
"""
with open(filename, "rb") as audio_file:
audio = audio_file.read()
return read_audio(audio)
def read_audio(audio: bytes) -> str:
"""
Convert audio to text.
Args:
audio (bytes): The audio to convert
Returns:
str: The text from the audio
"""
model = CFG.huggingface_audio_to_text_model
api_url = f"https://api-inference.huggingface.co/models/{model}"
api_token = CFG.huggingface_api_token
headers = {"Authorization": f"Bearer {api_token}"}
if api_token is None:
raise ValueError(
"你需要在配置文件中配置你的Hugging Face API token."
)
response = requests.post(
api_url,
headers=headers,
data=audio,
)
text = json.loads(response.content.decode("utf-8"))["text"]
return f"音频中说道: {text}"
================================================
FILE: autogpt/commands/command.py
================================================
import functools
import importlib
import inspect
from typing import Any, Callable, Optional
# Unique identifier for auto-gpt commands
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
class Command:
"""A class representing a command.
Attributes:
name (str): The name of the command.
description (str): A brief description of what the command does.
signature (str): The signature of the function that the command executes. Defaults to None.
"""
def __init__(
self,
name: str,
description: str,
method: Callable[..., Any],
signature: str = "",
enabled: bool = True,
disabled_reason: Optional[str] = None,
):
self.name = name
self.description = description
self.method = method
self.signature = signature if signature else str(inspect.signature(self.method))
self.enabled = enabled
self.disabled_reason = disabled_reason
def __call__(self, *args, **kwargs) -> Any:
if not self.enabled:
return f"命令 '{self.name}' 被禁止: {self.disabled_reason}"
return self.method(*args, **kwargs)
def __str__(self) -> str:
return f"{self.name}: {self.description}, args: {self.signature}"
class CommandRegistry:
"""
The CommandRegistry class is a manager for a collection of Command objects.
It allows the registration, modification, and retrieval of Command objects,
as well as the scanning and loading of command plugins from a specified
directory.
"""
def __init__(self):
self.commands = {}
def _import_module(self, module_name: str) -> Any:
return importlib.import_module(module_name)
def _reload_module(self, module: Any) -> Any:
return importlib.reload(module)
def register(self, cmd: Command) -> None:
self.commands[cmd.name] = cmd
def unregister(self, command_name: str):
if command_name in self.commands:
del self.commands[command_name]
else:
raise KeyError(f"命令 '{command_name}' 在注册表中未找到.")
def reload_commands(self) -> None:
"""Reloads all loaded command plugins."""
for cmd_name in self.commands:
cmd = self.commands[cmd_name]
module = self._import_module(cmd.__module__)
reloaded_module = self._reload_module(module)
if hasattr(reloaded_module, "register"):
reloaded_module.register(self)
def get_command(self, name: str) -> Callable[..., Any]:
return self.commands[name]
def call(self, command_name: str, **kwargs) -> Any:
if command_name not in self.commands:
raise KeyError(f"文件 '{command_name}' 在注册表中未找到.")
command = self.commands[command_name]
return command(**kwargs)
def command_prompt(self) -> str:
"""
Returns a string representation of all registered `Command` objects for use in a prompt
"""
commands_list = [
f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values())
]
return "\n".join(commands_list)
def import_commands(self, module_name: str) -> None:
"""
Imports the specified Python module containing command plugins.
This method imports the associated module and registers any functions or
classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute
as `Command` objects. The registered `Command` objects are then added to the
`commands` dictionary of the `CommandRegistry` object.
Args:
module_name (str): The name of the module to import for command plugins.
"""
module = importlib.import_module(module_name)
for attr_name in dir(module):
attr = getattr(module, attr_name)
# Register decorated functions
if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr(
attr, AUTO_GPT_COMMAND_IDENTIFIER
):
self.register(attr.command)
# Register command classes
elif (
inspect.isclass(attr) and issubclass(attr, Command) and attr != Command
):
cmd_instance = attr()
self.register(cmd_instance)
def command(
name: str,
description: str,
signature: str = "",
enabled: bool = True,
disabled_reason: Optional[str] = None,
) -> Callable[..., Any]:
"""The command decorator is used to create Command objects from ordinary functions."""
def decorator(func: Callable[..., Any]) -> Command:
cmd = Command(
name=name,
description=description,
method=func,
signature=signature,
enabled=enabled,
disabled_reason=disabled_reason,
)
@functools.wraps(func)
def wrapper(*args, **kwargs) -> Any:
return func(*args, **kwargs)
wrapper.command = cmd
setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
return wrapper
return decorator
================================================
FILE: autogpt/commands/execute_code.py
================================================
"""Execute code in a Docker container"""
import os
import subprocess
from pathlib import Path
import docker
from docker.errors import ImageNotFound
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.logs import logger
CFG = Config()
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
def execute_python_file(filename: str) -> str:
"""Execute a Python file in a Docker container and return the output
Args:
filename (str): The name of the file to execute
Returns:
str: The output of the file
"""
logger.info(f"执行文件中 '{filename}'")
if not filename.endswith(".py"):
return "错误: 无效的文件类型. 只允许 .py文件."
if not os.path.isfile(filename):
return f"错误: 文件 '{filename}' 不存在."
if we_are_running_in_a_docker_container():
result = subprocess.run(
f"python {filename}", capture_output=True, encoding="utf8", shell=True
)
if result.returncode == 0:
return result.stdout
else:
return f"错误: {result.stderr}"
try:
client = docker.from_env()
# You can replace this with the desired Python image/version
# You can find available Python images on Docker Hub:
# https://hub.docker.com/_/python
image_name = "python:3-alpine"
try:
client.images.get(image_name)
logger.warn(f"Image '{image_name}' found locally")
except ImageNotFound:
logger.info(
f"Image '{image_name}' not found locally, pulling from Docker Hub"
)
# Use the low-level API to stream the pull response
low_level_client = docker.APIClient()
for line in low_level_client.pull(image_name, stream=True, decode=True):
# Print the status and progress, if available
status = line.get("status")
progress = line.get("progress")
if status and progress:
logger.info(f"{status}: {progress}")
elif status:
logger.info(status)
container = client.containers.run(
image_name,
f"python {Path(filename).relative_to(CFG.workspace_path)}",
volumes={
CFG.workspace_path: {
"bind": "/workspace",
"mode": "ro",
}
},
working_dir="/workspace",
stderr=True,
stdout=True,
detach=True,
)
container.wait()
logs = container.logs().decode("utf-8")
container.remove()
# print(f"Execution complete. Output: {output}")
# print(f"Logs: {logs}")
return logs
except docker.errors.DockerException as e:
logger.warn(
"在容器中无法执行脚本. 如果你还没有安装,请安装Docker https://docs.docker.com/get-docker/"
)
return f"错误: {str(e)}"
except Exception as e:
return f"错误: {str(e)}"
@command(
"execute_shell",
"Execute Shell Command, non-interactive commands only",
'"command_line": "<command_line>"',
CFG.execute_local_commands,
"You are not allowed to run local shell commands. To execute"
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
"in your config. Do not attempt to bypass the restriction.",
)
def execute_shell(command_line: str) -> str:
"""Execute a shell command and return the output
Args:
command_line (str): The command line to execute
Returns:
str: The output of the command
"""
current_dir = Path.cwd()
# Change dir into workspace if necessary
if not current_dir.is_relative_to(CFG.workspace_path):
os.chdir(CFG.workspace_path)
logger.info(
f"执行命令 '{command_line}' in working directory '{os.getcwd()}'"
)
result = subprocess.run(command_line, capture_output=True, shell=True)
output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
# Change back to whatever the prior working dir was
os.chdir(current_dir)
return output
@command(
"execute_shell_popen",
"Execute Shell Command, non-interactive commands only",
'"command_line": "<command_line>"',
CFG.execute_local_commands,
"You are not allowed to run local shell commands. To execute"
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
"in your config. Do not attempt to bypass the restriction.",
)
def execute_shell_popen(command_line) -> str:
"""Execute a shell command with Popen and returns an english description
of the event and the process id
Args:
command_line (str): The command line to execute
Returns:
str: Description of the fact that the process started and its id
"""
current_dir = os.getcwd()
# Change dir into workspace if necessary
if CFG.workspace_path not in current_dir:
os.chdir(CFG.workspace_path)
logger.info(
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
)
do_not_show_output = subprocess.DEVNULL
process = subprocess.Popen(
command_line, shell=True, stdout=do_not_show_output, stderr=do_not_show_output
)
# Change back to whatever the prior working dir was
os.chdir(current_dir)
return f"Subprocess started with PID:'{str(process.pid)}'"
def we_are_running_in_a_docker_container() -> bool:
"""Check if we are running in a Docker container
Returns:
bool: True if we are running in a Docker container, False otherwise
"""
return os.path.exists("/.dockerenv")
================================================
FILE: autogpt/commands/file_operations.py
================================================
"""File operations for AutoGPT"""
from __future__ import annotations
import hashlib
import os
import os.path
from typing import Dict, Generator, Literal, Tuple
import charset_normalizer
import requests
from colorama import Back, Fore
from requests.adapters import HTTPAdapter, Retry
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.spinner import Spinner
from autogpt.utils import readable_file_size
CFG = Config()
Operation = Literal["write", "append", "delete"]
def text_checksum(text: str) -> str:
"""Get the hex checksum for the given text."""
return hashlib.md5(text.encode("utf-8")).hexdigest()
def operations_from_log(log_path: str) -> Generator[Tuple[Operation, str, str | None]]:
"""Parse the file operations log and return a tuple containing the log entries"""
try:
log = open(log_path, "r", encoding="utf-8")
except FileNotFoundError:
return
for line in log:
line = line.replace("File Operation Logger", "").strip()
if not line:
continue
operation, tail = line.split(": ", maxsplit=1)
operation = operation.strip()
if operation in ("write", "append"):
try:
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
except ValueError:
path, checksum = tail.strip(), None
yield (operation, path, checksum)
elif operation == "delete":
yield (operation, tail.strip(), None)
log.close()
def file_operations_state(log_path: str) -> Dict:
"""Iterates over the operations log and returns the expected state.
Parses a log file at CFG.file_logger_path to construct a dictionary that maps
each file path written or appended to its checksum. Deleted files are removed
from the dictionary.
Returns:
A dictionary mapping file paths to their checksums.
Raises:
FileNotFoundError: If CFG.file_logger_path is not found.
ValueError: If the log file content is not in the expected format.
"""
state = {}
for operation, path, checksum in operations_from_log(log_path):
if operation in ("write", "append"):
state[path] = checksum
elif operation == "delete":
del state[path]
return state
def is_duplicate_operation(
operation: Operation, filename: str, checksum: str | None = None
) -> bool:
"""Check if the operation has already been performed
Args:
operation: The operation to check for
filename: The name of the file to check for
checksum: The checksum of the contents to be written
Returns:
True if the operation has already been performed on the file
"""
state = file_operations_state(CFG.file_logger_path)
if operation == "delete" and filename not in state:
return True
if operation == "write" and state.get(filename) == checksum:
return True
return False
def log_operation(operation: str, filename: str, checksum: str | None = None) -> None:
"""Log the file operation to the file_logger.txt
Args:
operation: The operation to log
filename: The name of the file the operation was performed on
checksum: The checksum of the contents to be written
"""
log_entry = f"{operation}: {filename}"
if checksum is not None:
log_entry += f" #{checksum}"
logger.debug(f"Logging file operation: {log_entry}")
append_to_file(CFG.file_logger_path, f"{log_entry}\n", should_log=False)
def split_file(
content: str, max_length: int = 4000, overlap: int = 0
) -> Generator[str, None, None]:
"""
Split text into chunks of a specified maximum length with a specified overlap
between chunks.
:param content: The input text to be split into chunks
:param max_length: The maximum length of each chunk,
default is 4000 (about 1k token)
:param overlap: The number of overlapping characters between chunks,
default is no overlap
:return: A generator yielding chunks of text
"""
start = 0
content_length = len(content)
while start < content_length:
end = start + max_length
if end + overlap < content_length:
chunk = content[start : end + overlap - 1]
else:
chunk = content[start:content_length]
# Account for the case where the last chunk is shorter than the overlap, so it has already been consumed
if len(chunk) <= overlap:
break
yield chunk
start += max_length - overlap
@command("read_file", "Read file", '"filename": "<filename>"')
def read_file(filename: str) -> str:
"""Read a file and return the contents
Args:
filename (str): The name of the file to read
Returns:
str: The contents of the file
"""
try:
charset_match = charset_normalizer.from_path(filename).best()
encoding = charset_match.encoding
logger.debug(f"Read file '{filename}' with encoding '{encoding}'")
return str(charset_match)
except Exception as err:
return f"错误: {err}"
def ingest_file(
filename: str, memory, max_length: int = 4000, overlap: int = 200
) -> None:
"""
Ingest a file by reading its content, splitting it into chunks with a specified
maximum length and overlap, and adding the chunks to the memory storage.
:param filename: The name of the file to ingest
:param memory: An object with an add() method to store the chunks in memory
:param max_length: The maximum length of each chunk, default is 4000
:param overlap: The number of overlapping characters between chunks, default is 200
"""
try:
logger.info(f"操作文件中 {filename}")
content = read_file(filename)
content_length = len(content)
logger.info(f"文件长度: {content_length} 字符")
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
num_chunks = len(chunks)
for i, chunk in enumerate(chunks):
logger.info(f"注入块 {i + 1} / {num_chunks} 到记忆中")
memory_to_add = (
f"文件名: {filename}\n" f"内容块#{i + 1}/{num_chunks}: {chunk}"
)
memory.add(memory_to_add)
logger.info(f"注入完成 {num_chunks} 块 from {filename}.")
except Exception as err:
logger.info(f"注入文件出现错误 '{filename}': {err}")
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
def write_to_file(filename: str, text: str) -> str:
"""Write text to a file
Args:
filename (str): The name of the file to write to
text (str): The text to write to the file
Returns:
str: A message indicating success or failure
"""
checksum = text_checksum(text)
if is_duplicate_operation("write", filename, checksum):
return "错误: 文件已经更新."
try:
directory = os.path.dirname(filename)
os.makedirs(directory, exist_ok=True)
with open(filename, "w", encoding="utf-8") as f:
f.write(text)
log_operation("write", filename, checksum)
return "文件写入成功."
except Exception as err:
return f"错误: {err}"
@command(
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
)
def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
"""Append text to a file
Args:
filename (str): The name of the file to append to
text (str): The text to append to the file
should_log (bool): Should log output
Returns:
str: A message indicating success or failure
"""
try:
directory = os.path.dirname(filename)
os.makedirs(directory, exist_ok=True)
with open(filename, "a", encoding="utf-8") as f:
f.write(text)
if should_log:
with open(filename, "r", encoding="utf-8") as f:
checksum = text_checksum(f.read())
log_operation("append", filename, checksum=checksum)
return "文本追加成功."
except Exception as err:
return f"错误: {err}"
@command("delete_file", "Delete file", '"filename": "<filename>"')
def delete_file(filename: str) -> str:
"""Delete a file
Args:
filename (str): The name of the file to delete
Returns:
str: A message indicating success or failure
"""
if is_duplicate_operation("delete", filename):
return "错误: 文件已经删除."
try:
os.remove(filename)
log_operation("delete", filename)
return "文件删除成功."
except Exception as err:
return f"错误: {err}"
@command("list_files", "List Files in Directory", '"directory": "<directory>"')
def list_files(directory: str) -> list[str]:
"""lists files in a directory recursively
Args:
directory (str): The directory to search in
Returns:
list[str]: A list of files found in the directory
"""
found_files = []
for root, _, files in os.walk(directory):
for file in files:
if file.startswith("."):
continue
relative_path = os.path.relpath(
os.path.join(root, file), CFG.workspace_path
)
found_files.append(relative_path)
return found_files
@command(
"download_file",
"Download File",
'"url": "<url>", "filename": "<filename>"',
CFG.allow_downloads,
"错误: 你没有下载到本地的权限.",
)
def download_file(url, filename):
"""Downloads a file
Args:
url (str): URL of the file to download
filename (str): Filename to save the file as
"""
try:
directory = os.path.dirname(filename)
os.makedirs(directory, exist_ok=True)
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
with Spinner(message) as spinner:
session = requests.Session()
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
total_size = 0
downloaded_size = 0
with session.get(url, allow_redirects=True, stream=True) as r:
r.raise_for_status()
total_size = int(r.headers.get("Content-Length", 0))
downloaded_size = 0
with open(filename, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
downloaded_size += len(chunk)
# Update the progress message
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
spinner.update_message(f"{message} {progress}")
return f'文件下载到本地成功,文件名: "{filename}"! (Size: {readable_file_size(downloaded_size)})'
except requests.HTTPError as err:
return f"文件下载过程中遇到HTTP错误: {err}"
except Exception as err:
return f"错误: {err}"
================================================
FILE: autogpt/commands/git_operations.py
================================================
"""Git operations for autogpt"""
from git.repo import Repo
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.url_utils.validators import validate_url
CFG = Config()
@command(
"clone_repository",
"Clone Repository",
'"url": "<repository_url>", "clone_path": "<clone_path>"',
CFG.github_username and CFG.github_api_key,
"配置 github_username 和 github_api_key.",
)
@validate_url
def clone_repository(url: str, clone_path: str) -> str:
"""Clone a GitHub repository locally.
Args:
url (str): The URL of the repository to clone.
clone_path (str): The path to clone the repository to.
Returns:
str: The result of the clone operation.
"""
split_url = url.split("//")
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
try:
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
return f"""Cloned {url} to {clone_path}"""
except Exception as e:
return f"错误: {str(e)}"
================================================
FILE: autogpt/commands/google_search.py
================================================
"""Google search command for Autogpt."""
from __future__ import annotations
import json
from duckduckgo_search import ddg
from autogpt.commands.command import command
from autogpt.config import Config
CFG = Config()
@command("google", "Google Search", '"query": "<query>"', not CFG.google_api_key)
def google_search(query: str, num_results: int = 8) -> str:
"""Return the results of a Google search
Args:
query (str): The search query.
num_results (int): The number of results to return.
Returns:
str: The results of the search.
"""
search_results = []
if not query:
return json.dumps(search_results)
results = ddg(query, max_results=num_results)
if not results:
return json.dumps(search_results)
for j in results:
search_results.append(j)
results = json.dumps(search_results, ensure_ascii=False, indent=4)
return safe_google_results(results)
@command(
"google",
"Google Search",
'"query": "<query>"',
bool(CFG.google_api_key),
"配置 google_api_key.",
)
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
"""Return the results of a Google search using the official Google API
Args:
query (str): The search query.
num_results (int): The number of results to return.
Returns:
str: The results of the search.
"""
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
try:
# Get the Google API key and Custom Search Engine ID from the config file
api_key = CFG.google_api_key
custom_search_engine_id = CFG.custom_search_engine_id
# Initialize the Custom Search API service
service = build("customsearch", "v1", developerKey=api_key)
# Send the search query and retrieve the results
result = (
service.cse()
.list(q=query, cx=custom_search_engine_id, num=num_results)
.execute()
)
# Extract the search result items from the response
search_results = result.get("items", [])
# Create a list of only the URLs from the search results
search_results_links = [item["link"] for item in search_results]
except HttpError as e:
# Handle errors in the API call
error_details = json.loads(e.content.decode())
# Check if the error is related to an invalid or missing API key
if error_details.get("error", {}).get(
"code"
) == 403 and "invalid API key" in error_details.get("error", {}).get(
"message", ""
):
return "错误: 你提供的Google API key是错误的或者缺失."
else:
return f"错误: {e}"
# google_result can be a list or a string depending on the search results
# Return the list of search result URLs
return safe_google_results(search_results_links)
def safe_google_results(results: str | list) -> str:
"""
Return the results of a google search in a safe format.
Args:
results (str | list): The search results.
Returns:
str: The results of the search.
"""
if isinstance(results, list):
safe_message = json.dumps(
[result.encode("utf-8", "ignore") for result in results]
)
else:
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
return safe_message
================================================
FILE: autogpt/commands/image_gen.py
================================================
""" Image Generation Module for AutoGPT."""
import io
import uuid
from base64 import b64decode
import openai
import requests
from PIL import Image
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.logs import logger
CFG = Config()
@command("generate_image", "Generate Image", '"prompt": "<prompt>"', CFG.image_provider)
def generate_image(prompt: str, size: int = 256) -> str:
"""Generate an image from a prompt.
Args:
prompt (str): The prompt to use
size (int, optional): The size of the image. Defaults to 256. (Not supported by HuggingFace)
Returns:
str: The filename of the image
"""
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
# DALL-E
if CFG.image_provider == "dalle":
return generate_image_with_dalle(prompt, filename, size)
# HuggingFace
elif CFG.image_provider == "huggingface":
return generate_image_with_hf(prompt, filename)
# SD WebUI
elif CFG.image_provider == "sdwebui":
return generate_image_with_sd_webui(prompt, filename, size)
return "没有配置图片功能"
def generate_image_with_hf(prompt: str, filename: str) -> str:
"""Generate an image with HuggingFace's API.
Args:
prompt (str): The prompt to use
filename (str): The filename to save the image to
Returns:
str: The filename of the image
"""
API_URL = (
f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}"
)
if CFG.huggingface_api_token is None:
raise ValueError(
"你需要再配置文件中配置Hugging Face API token."
)
headers = {
"Authorization": f"Bearer {CFG.huggingface_api_token}",
"X-Use-Cache": "false",
}
response = requests.post(
API_URL,
headers=headers,
json={
"inputs": prompt,
},
)
image = Image.open(io.BytesIO(response.content))
logger.info(f"图片生成基于下面prompt:{prompt}")
image.save(filename)
return f"Saved to disk:{filename}"
def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
"""Generate an image with DALL-E.
Args:
prompt (str): The prompt to use
filename (str): The filename to save the image to
size (int): The size of the image
Returns:
str: The filename of the image
"""
# Check for supported image sizes
if size not in [256, 512, 1024]:
closest = min([256, 512, 1024], key=lambda x: abs(x - size))
logger.info(
f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
)
size = closest
response = openai.Image.create(
prompt=prompt,
n=1,
size=f"{size}x{size}",
response_format="b64_json",
api_key=CFG.openai_api_key,
)
logger.info(f"图片生成基于下面prompt:{prompt}")
image_data = b64decode(response["data"][0]["b64_json"])
with open(filename, mode="wb") as png:
png.write(image_data)
return f"Saved to disk:{filename}"
def generate_image_with_sd_webui(
prompt: str,
filename: str,
size: int = 512,
negative_prompt: str = "",
extra: dict = {},
) -> str:
"""Generate an image with Stable Diffusion webui.
Args:
prompt (str): The prompt to use
filename (str): The filename to save the image to
size (int, optional): The size of the image. Defaults to 256.
negative_prompt (str, optional): The negative prompt to use. Defaults to "".
extra (dict, optional): Extra parameters to pass to the API. Defaults to {}.
Returns:
str: The filename of the image
"""
# Create a session and set the basic auth if needed
s = requests.Session()
if CFG.sd_webui_auth:
username, password = CFG.sd_webui_auth.split(":")
s.auth = (username, password or "")
# Generate the images
response = requests.post(
f"{CFG.sd_webui_url}/sdapi/v1/txt2img",
json={
"prompt": prompt,
"negative_prompt": negative_prompt,
"sampler_index": "DDIM",
"steps": 20,
"cfg_scale": 7.0,
"width": size,
"height": size,
"n_iter": 1,
**extra,
},
)
logger.info(f"图片生成基于下面prompt:{prompt}")
# Save the image to disk
response = response.json()
b64 = b64decode(response["images"][0].split(",", 1)[0])
image = Image.open(io.BytesIO(b64))
image.save(filename)
return f"Saved to disk:{filename}"
================================================
FILE: autogpt/commands/improve_code.py
================================================
from __future__ import annotations
import json
from autogpt.commands.command import command
from autogpt.llm import call_ai_function
@command(
"improve_code",
"获取优化代码",
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
)
def improve_code(suggestions: list[str], code: str) -> str:
"""
A function that takes in code and suggestions and returns a response from create
chat completion api call.
Parameters:
suggestions (list): A list of suggestions around what needs to be improved.
code (str): Code to be improved.
Returns:
A result string from create chat completion. Improved code in response.
"""
function_string = (
"def generate_improved_code(suggestions: list[str], code: str) -> str:"
)
args = [json.dumps(suggestions), code]
description_string = (
"基于优化建议提供优化后的代码,不要做其他修改."
)
return call_ai_function(function_string, args, description_string)
================================================
FILE: autogpt/commands/task_statuses.py
================================================
"""Task Statuses module."""
from __future__ import annotations
from typing import NoReturn
from autogpt.commands.command import command
from autogpt.logs import logger
@command(
"task_complete",
"Task Complete (Shutdown)",
'"reason": "<reason>"',
)
def task_complete(reason: str) -> NoReturn:
"""
A function that takes in a string and exits the program
Parameters:
reason (str): The reason for shutting down.
Returns:
A result string from create chat completion. A list of suggestions to
improve the code.
"""
logger.info(title="关闭中...\n", message=reason)
quit()
================================================
FILE: autogpt/commands/times.py
================================================
from datetime import datetime
def get_datetime() -> str:
"""Return the current date and time
Returns:
str: The current date and time
"""
return "当前日期与时间: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S")
================================================
FILE: autogpt/commands/twitter.py
================================================
"""A module that contains a command to send a tweet."""
import os
import tweepy
from autogpt.commands.command import command
@command(
"send_tweet",
"发Tweet",
'"tweet_text": "<tweet_text>"',
)
def send_tweet(tweet_text: str) -> str:
"""
A function that takes in a string and returns a response from create chat
completion api call.
Args:
tweet_text (str): Text to be tweeted.
Returns:
A result from sending the tweet.
"""
consumer_key = os.environ.get("TW_CONSUMER_KEY")
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
access_token = os.environ.get("TW_ACCESS_TOKEN")
access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET")
# Authenticate to Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Create API object
api = tweepy.API(auth)
# Send tweet
try:
api.update_status(tweet_text)
return "Tweet发送成功!"
except tweepy.TweepyException as e:
return f"发送tweet错误: {e.reason}"
================================================
FILE: autogpt/commands/web_playwright.py
================================================
"""Web scraping commands using Playwright"""
from __future__ import annotations
from autogpt.logs import logger
try:
from playwright.sync_api import sync_playwright
except ImportError:
logger.info(
"Playwright not installed. Please install it with 'pip install playwright' to use."
)
from bs4 import BeautifulSoup
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
def scrape_text(url: str) -> str:
"""Scrape text from a webpage
Args:
url (str): The URL to scrape text from
Returns:
str: The scraped text
"""
with sync_playwright() as p:
browser = p.chromium.launch()
page = browser.new_page()
try:
page.goto(url)
html_content = page.content()
soup = BeautifulSoup(html_content, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e:
text = f"Error: {str(e)}"
finally:
browser.close()
return text
def scrape_links(url: str) -> str | list[str]:
"""Scrape links from a webpage
Args:
url (str): The URL to scrape links from
Returns:
Union[str, List[str]]: The scraped links
"""
with sync_playwright() as p:
browser = p.chromium.launch()
page = browser.new_page()
try:
page.goto(url)
html_content = page.content()
soup = BeautifulSoup(html_content, "html.parser")
for script in soup(["script", "style"]):
script.extract()
hyperlinks = extract_hyperlinks(soup, url)
formatted_links = format_hyperlinks(hyperlinks)
except Exception as e:
formatted_links = f"Error: {str(e)}"
finally:
browser.close()
return formatted_links
================================================
FILE: autogpt/commands/web_requests.py
================================================
"""Browse a webpage and summarize it using the LLM model"""
from __future__ import annotations
import requests
from bs4 import BeautifulSoup
from requests import Response
from autogpt.config import Config
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
from autogpt.url_utils.validators import validate_url
CFG = Config()
session = requests.Session()
session.headers.update({"User-Agent": CFG.user_agent})
@validate_url
def get_response(
url: str, timeout: int = 10
) -> tuple[None, str] | tuple[Response, None]:
"""Get the response from a URL
Args:
url (str): The URL to get the response from
timeout (int): The timeout for the HTTP request
Returns:
tuple[None, str] | tuple[Response, None]: The response and error message
Raises:
ValueError: If the URL is invalid
requests.exceptions.RequestException: If the HTTP request fails
"""
try:
response = session.get(url, timeout=timeout)
# Check if the response contains an HTTP error
if response.status_code >= 400:
return None, f"Error: HTTP {str(response.status_code)} error"
return response, None
except ValueError as ve:
# Handle invalid URL format
return None, f"Error: {str(ve)}"
except requests.exceptions.RequestException as re:
# Handle exceptions related to the HTTP request
# (e.g., connection errors, timeouts, etc.)
return None, f"Error: {str(re)}"
def scrape_text(url: str) -> str:
"""Scrape text from a webpage
Args:
url (str): The URL to scrape text from
Returns:
str: The scraped text
"""
response, error_message = get_response(url)
if error_message:
return error_message
if not response:
return "Error: Could not get response"
soup = BeautifulSoup(response.text, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "\n".join(chunk for chunk in chunks if chunk)
return text
def scrape_links(url: str) -> str | list[str]:
"""Scrape links from a webpage
Args:
url (str): The URL to scrape links from
Returns:
str | list[str]: The scraped links
"""
response, error_message = get_response(url)
if error_message:
return error_message
if not response:
return "Error: Could not get response"
soup = BeautifulSoup(response.text, "html.parser")
for script in soup(["script", "style"]):
script.extract()
hyperlinks = extract_hyperlinks(soup, url)
return format_hyperlinks(hyperlinks)
def create_message(chunk, question):
"""Create a message for the user to summarize a chunk of text"""
return {
"role": "user",
"content": f'"""{chunk}""" Using the above text, answer the following'
f' question: "{question}" -- if the question cannot be answered using the'
" text, summarize the text.",
}
================================================
FILE: autogpt/commands/web_selenium.py
================================================
"""Selenium web scraping module."""
from __future__ import annotations
import logging
from pathlib import Path
from sys import platform
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.safari.options import Options as SafariOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
import autogpt.processing.text as summary
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
from autogpt.url_utils.validators import validate_url
FILE_DIR = Path(__file__).parent.parent
CFG = Config()
@command(
"browse_website",
"浏览网页",
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
)
@validate_url
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
"""Browse a website and return the answer and links to the user
Args:
url (str): The url of the website to browse
question (str): The question asked by the user
Returns:
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
"""
try:
driver, text = scrape_text_with_selenium(url)
except WebDriverException as e:
# These errors are often quite long and include lots of context.
# Just grab the first line.
msg = e.msg.split("\n")[0]
return f"Error: {msg}", None
add_header(driver)
summary_text = summary.summarize_text(url, text, question, driver)
links = scrape_links_with_selenium(driver, url)
# Limit links to 5
if len(links) > 5:
links = links[:5]
close_browser(driver)
return f"从网页中获取的回答: {summary_text} \n \n 链接: {links}", driver
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
"""Scrape text from a website using selenium
Args:
url (str): The url of the website to scrape
Returns:
Tuple[WebDriver, str]: The webdriver and the text scraped from the website
"""
logging.getLogger("selenium").setLevel(logging.CRITICAL)
options_available = {
"chrome": ChromeOptions,
"safari": SafariOptions,
"firefox": FirefoxOptions,
}
options = options_available[CFG.selenium_web_browser]()
options.add_argument(
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
)
if CFG.selenium_web_browser == "firefox":
if CFG.selenium_headless:
options.headless = True
options.add_argument("--disable-gpu")
driver = webdriver.Firefox(
executable_path=GeckoDriverManager().install(), options=options
)
elif CFG.selenium_web_browser == "safari":
# Requires a bit more setup on the users end
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
driver = webdriver.Safari(options=options)
else:
if platform == "linux" or platform == "linux2":
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--remote-debugging-port=9222")
options.add_argument("--no-sandbox")
if CFG.selenium_headless:
options.add_argument("--headless=new")
options.add_argument("--disable-gpu")
chromium_driver_path = Path("/usr/bin/chromedriver")
driver = webdriver.Chrome(
executable_path=chromium_driver_path
if chromium_driver_path.exists()
else ChromeDriverManager().install(),
options=options,
)
driver.get(url)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.TAG_NAME, "body"))
)
# Get the HTML content directly from the browser's DOM
page_source = driver.execute_script("return document.body.outerHTML;")
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "\n".join(chunk for chunk in chunks if chunk)
return driver, text
def scrape_links_with_selenium(driver: WebDriver, url: str) -> list[str]:
"""Scrape links from a website using selenium
Args:
driver (WebDriver): The webdriver to use to scrape the links
Returns:
List[str]: The links scraped from the website
"""
page_source = driver.page_source
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
hyperlinks = extract_hyperlinks(soup, url)
return format_hyperlinks(hyperlinks)
def close_browser(driver: WebDriver) -> None:
"""Close the browser
Args:
driver (WebDriver): The webdriver to close
Returns:
None
"""
driver.quit()
def add_header(driver: WebDriver) -> None:
"""Add a header to the website
Args:
driver (WebDriver): The webdriver to use to add the header
Returns:
None
"""
try:
with open(f"{FILE_DIR}/js/overlay.js", "r") as overlay_file:
overlay_script = overlay_file.read()
driver.execute_script(overlay_script)
except Exception as e:
print(f"Error executing overlay.js: {e}")
================================================
FILE: autogpt/commands/write_tests.py
================================================
"""A module that contains a function to generate test cases for the submitted code."""
from __future__ import annotations
import json
from autogpt.commands.command import command
from autogpt.llm import call_ai_function
@command(
"write_tests",
"写入测试",
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
)
def write_tests(code: str, focus: list[str]) -> str:
"""
A function that takes in code and focus topics and returns a response from create
chat completion api call.
Parameters:
focus (list): A list of suggestions around what needs to be improved.
code (str): Code for test cases to be generated against.
Returns:
A result string from create chat completion. Test cases for the submitted code
in response.
"""
function_string = (
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
)
args = [code, json.dumps(focus)]
description_string = (
"如果需要,为现有代码生成测试用例,重点关注特定领域。"
)
return call_ai_function(function_string, args, description_string)
================================================
FILE: autogpt/config/__init__.py
================================================
"""
This module contains the configuration classes for AutoGPT.
"""
from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config, check_openai_api_key
__all__ = [
"check_openai_api_key",
"AIConfig",
"Config",
]
================================================
FILE: autogpt/config/ai_config.py
================================================
# sourcery skip: do-not-use-staticmethod
"""
A module that contains the AIConfig class object that contains the configuration
"""
from __future__ import annotations
import os
import platform
from pathlib import Path
from typing import Any, Optional, Type
import distro
import yaml
from autogpt.prompts.generator import PromptGenerator
# Soon this will go in a folder where it remembers more stuff about the run(s)
SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
class AIConfig:
"""
A class object that contains the configuration information for the AI
Attributes:
ai_name (str): The name of the AI.
ai_role (str): The description of the AI's role.
ai_goals (list): The list of objectives the AI is supposed to complete.
api_budget (float): The maximum dollar value for API calls (0.0 means infinite)
"""
def __init__(
self,
ai_name: str = "",
ai_role: str = "",
ai_goals: list | None = None,
api_budget: float = 0.0,
) -> None:
"""
Initialize a class instance
Parameters:
ai_name (str): The name of the AI.
ai_role (str): The description of the AI's role.
ai_goals (list): The list of objectives the AI is supposed to complete.
api_budget (float): The maximum dollar value for API calls (0.0 means infinite)
Returns:
None
"""
if ai_goals is None:
ai_goals = []
self.ai_name = ai_name
self.ai_role = ai_role
self.ai_goals = ai_goals
self.api_budget = api_budget
self.prompt_generator = None
self.command_registry = None
@staticmethod
def load(config_file: str = SAVE_FILE) -> "AIConfig":
"""
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from
yaml file if yaml file exists,
else returns class with no parameters.
Parameters:
config_file (int): The path to the config yaml file.
DEFAULT: "../ai_settings.yaml"
Returns:
cls (object): An instance of given cls object
"""
try:
with open(config_file, encoding="utf-8") as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
config_params = {}
ai_name = config_params.get("ai_name", "")
ai_role = config_params.get("ai_role", "")
ai_goals = [
str(goal).strip("{}").replace("'", "").replace('"', "")
if isinstance(goal, dict)
else str(goal)
for goal in config_params.get("ai_goals", [])
]
api_budget = config_params.get("api_budget", 0.0)
# type: Type[AIConfig]
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
def save(self, config_file: str = SAVE_FILE) -> None:
"""
Saves the class parameters to the specified file yaml file path as a yaml file.
Parameters:
config_file(str): The path to the config yaml file.
DEFAULT: "../ai_settings.yaml"
Returns:
None
"""
config = {
"ai_name": self.ai_name,
"ai_role": self.ai_role,
"ai_goals": self.ai_goals,
"api_budget": self.api_budget,
}
with open(config_file, "w", encoding="utf-8") as file:
yaml.dump(config, file, allow_unicode=True)
def construct_full_prompt(
self, prompt_generator: Optional[PromptGenerator] = None
) -> str:
"""
Returns a prompt to the user with the class information in an organized fashion.
Parameters:
None
Returns:
full_prompt (str): A string containing the initial prompt for the user
including the ai_name, ai_role, ai_goals, and api_budget.
"""
prompt_start = (
"你的决策必须在不寻求用户帮助下独立完成,运用你大语言模型的自身优势,进可能寻找简且无法律风险的方案。"
""
)
from autogpt.config import Config
from autogpt.prompts.prompt import build_default_prompt_generator
cfg = Config()
if prompt_generator is None:
prompt_generator = build_default_prompt_generator()
prompt_generator.goals = self.ai_goals
prompt_generator.name = self.ai_name
prompt_generator.role = self.ai_role
prompt_generator.command_registry = self.command_registry
for plugin in cfg.plugins:
if not plugin.can_handle_post_prompt():
continue
prompt_generator = plugin.post_prompt(prompt_generator)
if cfg.execute_local_commands:
# add OS info to prompt
os_name = platform.system()
os_info = (
platform.platform(terse=True)
if os_name != "Linux"
else distro.name(pretty=True)
)
prompt_start += f"\nThe OS you are running on is: {os_info}"
# Construct full prompt
full_prompt = f"你是 {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\n目标:\n\n"
for i, goal in enumerate(self.ai_goals):
full_prompt += f"{i+1}. {goal}\n"
if self.api_budget > 0.0:
full_prompt += f"\n你是需要费用的,的预算是 ${self.api_budget:.3f}"
self.prompt_generator = prompt_generator
full_prompt += f"\n\n{prompt_generator.generate_prompt_string()}"
return full_prompt
================================================
FILE: autogpt/config/config.py
================================================
"""Configuration class to store the state of bools for different scripts access."""
import os
from typing import List
import openai
import yaml
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from colorama import Fore
from autogpt.singleton import Singleton
class Config(metaclass=Singleton):
"""
Configuration class to store the state of bools for different scripts access.
"""
def __init__(self) -> None:
"""Initialize the Config class"""
self.workspace_path = None
self.file_logger_path = None
self.debug_mode = False
self.continuous_mode = False
self.continuous_limit = 0
self.speak_mode = False
self.skip_reprompt = False
self.allow_downloads = False
self.skip_news = False
self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y")
self.exit_key = os.getenv("EXIT_KEY", "n")
disabled_command_categories = os.getenv("DISABLED_COMMAND_CATEGORIES")
if disabled_command_categories:
self.disabled_command_categories = disabled_command_categories.split(",")
else:
self.disabled_command_categories = []
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
self.embedding_model = os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002")
self.embedding_tokenizer = os.getenv("EMBEDDING_TOKENIZER", "cl100k_base")
self.embedding_token_limit = int(os.getenv("EMBEDDING_TOKEN_LIMIT", 8191))
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 3000))
self.browse_spacy_language_model = os.getenv(
"BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm"
)
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.temperature = float(os.getenv("TEMPERATURE", "0"))
self.use_azure = os.getenv("USE_AZURE") == "True"
self.execute_local_commands = (
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
)
self.restrict_to_workspace = (
os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True"
)
if self.use_azure:
self.load_azure_config()
openai.api_type = self.openai_api_type
openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
self.use_mac_os_tts = False
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True"
self.use_brian_tts = False
self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
self.github_api_key = os.getenv("GITHUB_API_KEY")
self.github_username = os.getenv("GITHUB_USERNAME")
self.google_api_key = os.getenv("GOOGLE_API_KEY")
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
self.pinecone_region = os.getenv("PINECONE_ENV")
self.weaviate_host = os.getenv("WEAVIATE_HOST")
self.weaviate_port = os.getenv("WEAVIATE_PORT")
self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None)
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
self.use_weaviate_embedded = (
os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
)
# milvus or zilliz cloud configuration.
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
self.milvus_username = os.getenv("MILVUS_USERNAME")
self.milvus_password = os.getenv("MILVUS_PASSWORD")
self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
self.milvus_secure = os.getenv("MILVUS_SECURE") == "True"
self.image_provider = os.getenv("IMAGE_PROVIDER")
self.image_size = int(os.getenv("IMAGE_SIZE", 256))
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
self.huggingface_image_model = os.getenv(
"HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4"
)
self.huggingface_audio_to_text_model = os.getenv(
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
)
self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860")
self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH")
# Selenium browser settings
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True"
# User agent header to use when making HTTP requests
# Some websites might just completely deny request with an error code if
# no user agent was found.
self.user_agent = os.getenv(
"USER_AGENT",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
)
self.redis_host = os.getenv("REDIS_HOST", "localhost")
self.redis_port = os.getenv("REDIS_PORT", "6379")
self.redis_password = os.getenv("REDIS_PASSWORD", "")
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
# Note that indexes must be created on db 0 in redis, this is not configurable.
self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins")
self.plugins: List[AutoGPTPluginTemplate] = []
self.plugins_openai = []
plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS")
if plugins_allowlist:
self.plugins_allowlist = plugins_allowlist.split(",")
else:
self.plugins_allowlist = []
plugins_denylist = os.getenv("DENYLISTED_PLUGINS")
if plugins_denylist:
self.plugins_denylist = plugins_denylist.split(",")
else:
self.plugins_denylist = []
def get_azure_deployment_id_for_model(self, model: str) -> str:
"""
Returns the relevant deployment id for the model specified.
Parameters:
model(str): The model to map to the deployment id.
Returns:
The matching deployment id if found, otherwise an empty string.
"""
if model == self.fast_llm_model:
return self.azure_model_to_deployment_id_map[
"fast_llm_model_deployment_id"
] # type: ignore
elif model == self.smart_llm_model:
return self.azure_model_to_deployment_id_map[
"smart_llm_model_deployment_id"
] # type: ignore
elif model == "text-embedding-ada-002":
return self.azure_model_to_deployment_id_map[
"embedding_model_deployment_id"
] # type: ignore
else:
return ""
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml")
def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
"""
Loads the configuration parameters for Azure hosting from the specified file
path as a yaml file.
Parameters:
config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
Returns:
None
"""
with open(config_file) as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
self.openai_api_type = config_params.get("azure_api_type") or "azure"
self.openai_api_base = config_params.get("azure_api_base") or ""
self.openai_api_version = (
config_params.get("azure_api_version") or "2023-03-15-preview"
)
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", {})
def set_continuous_mode(self, value: bool) -> None:
"""Set the continuous mode value."""
self.continuous_mode = value
def set_continuous_limit(self, value: int) -> None:
"""Set the continuous limit value."""
self.continuous_limit = value
def set_speak_mode(self, value: bool) -> None:
"""Set the speak mode value."""
self.speak_mode = value
def set_fast_llm_model(self, value: str) -> None:
"""Set the fast LLM model value."""
self.fast_llm_model = value
def set_smart_llm_model(self, value: str) -> None:
"""Set the smart LLM model value."""
self.smart_llm_model = value
def set_fast_token_limit(self, value: int) -> None:
"""Set the fast token limit value."""
self.fast_token_limit = value
def set_smart_token_limit(self, value: int) -> None:
"""Set the smart token limit value."""
self.smart_token_limit = value
def set_embedding_model(self, value: str) -> None:
"""Set the model to use for creating embeddings."""
self.embedding_model = value
def set_embedding_tokenizer(self, value: str) -> None:
"""Set the tokenizer to use when creating embeddings."""
self.embedding_tokenizer = value
def set_embedding_token_limit(self, value: int) -> None:
"""Set the token limit for creating embeddings."""
self.embedding_token_limit = value
def set_browse_chunk_max_length(self, value: int) -> None:
"""Set the browse_website command chunk max length value."""
self.browse_chunk_max_length = value
def set_openai_api_key(self, value: str) -> None:
"""Set the OpenAI API key value."""
self.openai_api_key = value
def set_elevenlabs_api_key(self, value: str) -> None:
"""Set the ElevenLabs API key value."""
self.elevenlabs_api_key = value
def set_elevenlabs_voice_1_id(self, value: str) -> None:
"""Set the ElevenLabs Voice 1 ID value."""
self.elevenlabs_voice_1_id = value
def set_elevenlabs_voice_2_id(self, value: str) -> None:
"""Set the ElevenLabs Voice 2 ID value."""
self.elevenlabs_voice_2_id = value
def set_google_api_key(self, value: str) -> None:
"""Set the Google API key value."""
self.google_api_key = value
def set_custom_search_engine_id(self, value: str) -> None:
"""Set the custom search engine id value."""
self.custom_search_engine_id = value
def set_pinecone_api_key(self, value: str) -> None:
"""Set the Pinecone API key value."""
self.pinecone_api_key = value
def set_pinecone_region(self, value: str) -> None:
"""Set the Pinecone region value."""
self.pinecone_region = value
def set_debug_mode(self, value: bool) -> None:
"""Set the debug mode value."""
self.debug_mode = value
def set_plugins(self, value: list) -> None:
"""Set the plugins value."""
self.plugins = value
def set_temperature(self, value: int) -> None:
"""Set the temperature value."""
self.temperature = value
def set_memory_backend(self, name: str) -> None:
"""Set the memory backend name."""
self.memory_backend = name
def check_openai_api_key() -> None:
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
cfg = Config()
if not cfg.openai_api_key:
print(
Fore.RED
+ "请在.env文件中配置你的OpenAI API Key."
+ Fore.RESET
)
print("你可以从这里获取你的key https://platform.openai.com/account/api-keys")
exit(1)
================================================
FILE: autogpt/configurator.py
================================================
"""Configurator module."""
import click
from colorama import Back, Fore, Style
from autogpt import utils
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.memory import get_supported_memory_backends
CFG = Config()
def create_config(
continuous: bool,
continuous_limit: int,
ai_settings_file: str,
skip_reprompt: bool,
speak: bool,
debug: bool,
gpt3only: bool,
gpt4only: bool,
memory_type: str,
browser_name: str,
allow_downloads: bool,
skip_news: bool,
) -> None:
"""Updates the config object with the given arguments.
Args:
continuous (bool): Whether to run in continuous mode
continuous_limit (int): The number of times to run in continuous mode
ai_settings_file (str): The path to the ai_settings.yaml file
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
speak (bool): Whether to enable speak mode
debug (bool): Whether to enable debug mode
gpt3only (bool): Whether to enable GPT3.5 only mode
gpt4only (bool): Whether to enable GPT4 only mode
memory_type (str): The type of memory backend to use
browser_name (str): The name of the browser to use when using selenium to scrape the web
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
skips_news (bool): Whether to suppress the output of latest news on startup
"""
CFG.set_debug_mode(False)
CFG.set_continuous_mode(False)
CFG.set_speak_mode(False)
if debug:
logger.typewriter_log("Debug模式: ", Fore.GREEN, "开启")
CFG.set_debug_mode(True)
if continuous:
logger.typewriter_log("持续模式: ", Fore.RED, "开启")
logger.typewriter_log(
"警告: ",
Fore.RED,
"不推荐使用持续模式. 此模式存在风险,可能会让你的AI持续执行下去"
"并执行没有被你授权的指令动作"
"使用需谨慎,自负风险。",
)
CFG.set_continuous_mode(True)
if continuous_limit:
logger.typewriter_log(
"持续限额: ", Fore.GREEN, f"{continuous_limit}"
)
CFG.set_continuous_limit(continuous_limit)
# Check if continuous limit is used without continuous mode
if continuous_limit and not continuous:
raise click.UsageError("--continuous-limit can only be used with --continuous")
if speak:
logger.typewriter_log("语音模式: ", Fore.GREEN, "开启")
CFG.set_speak_mode(True)
if gpt3only:
logger.typewriter_log("GPT3.5模式: ", Fore.GREEN, "开启")
CFG.set_smart_llm_model(CFG.fast_llm_model)
if gpt4only:
logger.typewriter_log("GPT4模式: ", Fore.GREEN, "开启")
CFG.set_fast_llm_model(CFG.smart_llm_model)
if memory_type:
supported_memory = get_supported_memory_backends()
chosen = memory_type
if chosen not in supported_memory:
logger.typewriter_log(
"只支持一下记忆后台模式: ",
Fore.RED,
f"{supported_memory}",
)
logger.typewriter_log("默认至: ", Fore.YELLOW, CFG.memory_backend)
else:
CFG.memory_backend = chosen
if skip_reprompt:
logger.typewriter_log("跳过重新指令: ", Fore.GREEN, "开启")
CFG.skip_reprompt = True
if ai_settings_file:
file = ai_settings_file
# Validate file
(validated, message) = utils.validate_yaml_file(file)
if not validated:
logger.typewriter_log("文件校验失败", Fore.RED, message)
logger.double_check()
exit(1)
logger.typewriter_log("使用AI配置文件:", Fore.GREEN, file)
CFG.ai_settings_file = file
CFG.skip_reprompt = True
if browser_name:
CFG.selenium_web_browser = browser_name
if allow_downloads:
logger.typewriter_log("本地下载:", Fore.GREEN, "开启")
logger.typewriter_log(
"警告: ",
Fore.YELLOW,
f"{Back.LIGHTYELLOW_EX}Auto-GPT将开启下载并存储文件至你的本地电脑中。{Back.RESET} "
+ "建议您仔细监控它下载的任何文件。",
)
logger.typewriter_log(
"警告: ",
Fore.YELLOW,
f"{Back.RED + Style.BRIGHT}请始终记住,永远不要打开您不确定的文件!{Style.RESET_ALL}",
)
CFG.allow_downloads = True
if skip_news:
CFG.skip_news = True
================================================
FILE: autogpt/js/overlay.js
================================================
const overlay = document.createElement('div');
Object.assign(overlay.style, {
position: 'fixed',
zIndex: 999999,
top: 0,
left: 0,
width: '100%',
height: '100%',
background: 'rgba(0, 0, 0, 0.7)',
color: '#fff',
fontSize: '24px',
fontWeight: 'bold',
display: 'flex',
justifyContent: 'center',
alignItems: 'center',
});
const textContent = document.createElement('div');
Object.assign(textContent.style, {
textAlign: 'center',
});
textContent.textContent = 'AutoGPT Analyzing Page';
overlay.appendChild(textContent);
document.body.append(overlay);
document.body.style.overflow = 'hidden';
let dotCount = 0;
setInterval(() => {
textContent.textContent = 'AutoGPT Analyzing Page' + '.'.repeat(dotCount);
dotCount = (dotCount + 1) % 4;
}, 1000);
================================================
FILE: autogpt/json_utils/__init__.py
================================================
================================================
FILE: autogpt/json_utils/json_fix_general.py
================================================
"""This module contains functions to fix JSON strings using general programmatic approaches, suitable for addressing
common JSON formatting issues."""
from __future__ import annotations
import contextlib
import json
import re
from typing import Optional
from autogpt.config import Config
from autogpt.json_utils.utilities import extract_char_position
from autogpt.logs import logger
CFG = Config()
def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
"""Fix invalid escape sequences in JSON strings.
Args:
json_to_load (str): The JSON string.
error_message (str): The error message from the JSONDecodeError
exception.
Returns:
str: The JSON string with invalid escape sequences fixed.
"""
while error_message.startswith("Invalid \\escape"):
bad_escape_location = extract_char_position(error_message)
json_to_load = (
json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1 :]
)
try:
json.loads(json_to_load)
return json_to_load
except json.JSONDecodeError as e:
logger.debug("json loads error - fix invalid escape", e)
error_message = str(e)
return json_to_load
def balance_braces(json_string: str) -> Optional[str]:
"""
Balance the braces in a JSON string.
Args:
json_string (str): The JSON string.
Returns:
str: The JSON string with braces balanced.
"""
open_braces_count = json_string.count("{")
close_braces_count = json_string.count("}")
while open_braces_count > close_braces_count:
json_string += "}"
close_braces_count += 1
while close_braces_count > open_braces_count:
json_string = json_string.rstrip("}")
close_braces_count -= 1
with contextlib.suppress(json.JSONDecodeError):
json.loads(json_string)
return json_string
def add_quotes_to_property_names(json_string: str) -> str:
"""
Add quotes to property names in a JSON string.
Args:
json_string (str): The JSON string.
Returns:
str: The JSON string with quotes added to property names.
"""
def replace_func(match: re.Match) -> str:
return f'"{match[1]}":'
property_name_pattern = re.compile(r"(\w+):")
corrected_json_string = property_name_pattern.sub(replace_func, json_string)
try:
json.loads(corrected_json_string)
return corrected_json_string
except json.JSONDecodeError as e:
raise e
def correct_json(json_to_load: str) -> str:
"""
Correct common JSON errors.
Args:
json_to_load (str): The JSON string.
"""
try:
logger.debug("json", json_to_load)
json.loads(json_to_load)
return json_to_load
except json.JSONDecodeError as e:
logger.debug("json loads error", e)
error_message = str(e)
if error_message.startswith("Invalid \\escape"):
json_to_load = fix_invalid_escape(json_to_load, error_message)
if error_message.startswith(
"Expecting property name enclosed in double quotes"
):
json_to_load = add_quotes_to_property_names(json_to_load)
try:
json.loads(json_to_load)
return json_to_load
except json.JSONDecodeError as e:
logger.debug("json loads error - add quotes", e)
error_message = str(e)
if balanced_str := balance_braces(json_to_load):
return balanced_str
return json_to_load
================================================
FILE: autogpt/json_utils/json_fix_llm.py
================================================
"""This module contains functions to fix JSON strings generated by LLM models, such as ChatGPT, using the assistance
of the ChatGPT API or LLM models."""
from __future__ import annotations
import contextlib
import json
from typing import Any, Dict
from colorama import Fore
from regex import regex
from autogpt.config import Config
from autogpt.json_utils.json_fix_general import correct_json
from autogpt.llm import call_ai_function
from autogpt.logs import logger
from autogpt.speech import say_text
JSON_SCHEMA = """
{
"command": {
"name": "command name",
"args": {
"arg name": "value"
}
},
"thoughts":
{
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user"
}
}
"""
CFG = Config()
def auto_fix_json(json_string: str, schema: str) -> str:
"""Fix the given JSON string to make it parseable and fully compliant with
the provided schema using GPT-3.
Args:
json_string (str): The JSON string to fix.
schema (str): The schema to use to fix the JSON.
Returns:
str: The fixed JSON string.
"""
# Try to fix the JSON using GPT:
function_string = "def fix_json(json_string: str, schema:str=None) -> str:"
args = [f"'''{json_string}'''", f"'''{schema}'''"]
description_string = (
"This function takes a JSON string and ensures that it"
" is parseable and fully compliant with the provided schema. If an object"
" or field specified in the schema isn't contained within the correct JSON,"
" it is omitted. The function also escapes any double quotes within JSON"
" string values to ensure that they are valid. If the JSON string contains"
" any None or NaN values, they are replaced with null before being parsed."
)
# If it doesn't already start with a "`", add one:
if not json_string.startswith("`"):
json_string = "```json\n" + json_string + "\n```"
result_string = call_ai_function(
function_string, args, description_string, model=CFG.fast_llm_model
)
logger.debug("------------ JSON FIX ATTEMPT ---------------")
logger.debug(f"Original JSON: {json_string}")
logger.debug("-----------")
logger.debug(f"Fixed JSON: {result_string}")
logger.debug("----------- END OF FIX ATTEMPT ----------------")
try:
json.loads(result_string) # just check the validity
return result_string
except json.JSONDecodeError: # noqa: E722
# Get the call stack:
# import traceback
# call_stack = traceback.format_exc()
# print(f"Failed to fix JSON: '{json_string}' "+call_stack)
return "failed"
def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
"""Fix the given JSON string to make it parseable and fully compliant with two techniques.
Args:
json_string (str): The JSON string to fix.
Returns:
str: The fixed JSON string.
"""
assistant_reply = assistant_reply.strip()
if assistant_reply.startswith("```json"):
assistant_reply = assistant_reply[7:]
if assistant_reply.endswith("```"):
assistant_reply = assistant_reply[:-3]
try:
return json.loads(assistant_reply) # just check the validity
except json.JSONDecodeError: # noqa: E722
pass
if assistant_reply.startswith("json "):
assistant_reply = assistant_reply[5:]
assistant_reply = assistant_reply.strip()
try:
return json.loads(assistant_reply) # just check the validity
except json.JSONDecodeError: # noqa: E722
pass
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
logger.debug("Assistant reply JSON: %s", str(assistant_reply_json))
if assistant_reply_json == {}:
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
assistant_reply
)
logger.debug("Assistant reply JSON 2: %s", str(assistant_reply_json))
if assistant_reply_json != {}:
return assistant_reply_json
logger.error(
"Error: The following AI output couldn't be converted to a JSON:\n",
assistant_reply,
)
if CFG.speak_mode:
say_text("I have received an invalid JSON response from the OpenAI API.")
return {}
def fix_and_parse_json(
json_to_load: str, try_to_fix_with_gpt: bool = True
) -> Dict[Any, Any]:
"""Fix and parse JSON string
Args:
json_to_load (str): The JSON string.
try_to_fix_with_gpt (bool, optional): Try to fix the JSON with GPT.
Defaults to True.
Returns:
str or dict[Any, Any]: The parsed JSON.
"""
with contextlib.suppress(json.JSONDecodeError):
json_to_load = json_to_load.replace("\t", "")
return json.loads(json_to_load)
with contextlib.suppress(json.JSONDecodeError):
json_to_load = correct_json(json_to_load)
return json.loads(json_to_load)
# Let's do something manually:
# sometimes GPT responds with something BEFORE the braces:
# "I'm sorry, I don't understand. Please try again."
# {"text": "I'm sorry, I don't understand. Please try again.",
# "confidence": 0.0}
# So let's try to find the first brace and then parse the rest
# of the string
try:
brace_index = json_to_load.index("{")
maybe_fixed_json = json_to_load[brace_index:]
last_brace_index = maybe_fixed_json.rindex("}")
maybe_fixed_json = maybe_fixed_json[: last_brace_index + 1]
return json.loads(maybe_fixed_json)
except (json.JSONDecodeError, ValueError) as e:
return try_ai_fix(try_to_fix_with_gpt, e, json_to_load)
def try_ai_fix(
try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str
) -> Dict[Any, Any]:
"""Try to fix the JSON with the AI
Args:
try_to_fix_with_gpt (bool): Whether to try to fix the JSON with the AI.
exception (Exception): The exception that was raised.
json_to_load (str): The JSON string to load.
Raises:
exception: If try_to_fix_with_gpt is False.
Returns:
str or dict[Any, Any]: The JSON string or dictionary.
"""
if not try_to_fix_with_gpt:
raise exception
if CFG.debug_mode:
logger.warn(
"Warning: Failed to parse AI output, attempting to fix."
"\n If you see this warning frequently, it's likely that"
" your prompt is confusing the AI. Try changing it up"
" slightly."
)
# Now try to fix this up using the ai_functions
ai_fixed_json = auto_fix_json(json_to_load, JSON_SCHEMA)
if ai_fixed_json != "failed":
return json.loads(ai_fixed_json)
# This allows the AI to react to the error message,
# which usually results in it correcting its ways.
# logger.error("Failed to fix AI output, telling the AI.")
return {}
def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
if CFG.speak_mode and CFG.debug_mode:
say_text(
"I have received an invalid JSON response from the OpenAI API. "
"Trying to fix it now."
)
logger.error("Attempting to fix JSON by finding outermost brackets\n")
try:
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
json_match = json_pattern.search(json_string)
if json_match:
# Extract the valid JSON object from the string
json_string = json_match.group(0)
logger.typewriter_log(
title="Apparently json was fixed.", title_color=Fore.GREEN
)
if CFG.speak_mode and CFG.debug_mode:
say_text("Apparently json was fixed.")
else:
return {}
except (json.JSONDecodeError, ValueError):
if CFG.debug_mode:
logger.error(f"Error: Invalid JSON: {json_string}\n")
if CFG.speak_mode:
say_text("Didn't work. I will have to ignore this response then.")
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
json_string = {}
return fix_and_parse_json(json_string)
================================================
FILE: autogpt/json_utils/llm_response_format_1.json
================================================
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"thoughts": {
"type": "object",
"properties": {
"text": {"type": "string"},
"reasoning": {"type": "string"},
"plan": {"type": "string"},
"criticism": {"type": "string"},
"speak": {"type": "string"}
},
"required": ["text", "reasoning", "plan", "criticism", "speak"],
"additionalProperties": false
},
"command": {
"type": "object",
"properties": {
"name": {"type": "string"},
"args": {
"type": "object"
}
},
"required": ["name", "args"],
"additionalProperties": false
}
},
"required": ["thoughts", "command"],
"additionalProperties": false
}
================================================
FILE: autogpt/json_utils/utilities.py
================================================
"""Utilities for the json_fixes package."""
import json
import os.path
import re
from jsonschema import Draft7Validator
from autogpt.config import Config
from autogpt.logs import logger
CFG = Config()
LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"
def extract_char_position(error_message: str) -> int:
"""Extract the character position from the JSONDecodeError message.
Args:
error_message (str): The error message from the JSONDecodeError
exception.
Returns:
int: The character position.
"""
char_pattern = re.compile(r"\(char (\d+)\)")
if match := char_pattern.search(error_message):
return int(match[1])
else:
raise ValueError("Character position not found in the error message.")
def validate_json(json_object: object, schema_name: str) -> dict | None:
"""
:type schema_name: object
:param schema_name: str
:type json_object: object
"""
scheme_file = os.path.join(os.path.dirname(__file__), f"{schema_name}.json")
with open(scheme_file, "r") as f:
schema = json.load(f)
validator = Draft7Validator(schema)
if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
logger.error("The JSON object is invalid.")
if CFG.debug_mode:
logger.error(
json.dumps(json_object, indent=4)
) # Replace 'json_object' with the variable containing the JSON data
logger.error("The following issues were found:")
for error in errors:
logger.error(f"Error: {error.message}")
else:
logger.debug("The JSON object is valid.")
return json_object
def validate_json_string(json_string: str, schema_name: str) -> dict | None:
"""
:type schema_name: object
:param schema_name: str
:type json_object: object
"""
try:
json_loaded = json.loads(json_string)
return validate_json(json_loaded, schema_name)
except:
return None
def is_string_valid_json(json_string: str, schema_name: str) -> bool:
"""
:type schema_name: object
:param schema_name: str
:type json_object: object
"""
return validate_json_string(json_string, schema_name) is not None
================================================
FILE: autogpt/llm/__init__.py
================================================
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.base import (
ChatModelInfo,
ChatModelResponse,
EmbeddingModelInfo,
EmbeddingModelResponse,
LLMResponse,
Message,
ModelInfo,
)
from autogpt.llm.chat import chat_with_ai, create_chat_message, generate_context
from autogpt.llm.llm_utils import (
call_ai_function,
chunked_tokens,
create_chat_completion,
get_ada_embedding,
)
from autogpt.llm.modelsinfo import COSTS
from autogpt.llm.token_counter import count_message_tokens, count_string_tokens
__all__ = [
"ApiManager",
"Message",
"ModelInfo",
"ChatModelInfo",
"EmbeddingModelInfo",
"LLMResponse",
"ChatModelResponse",
"EmbeddingModelResponse",
"create_chat_message",
"generate_context",
"chat_with_ai",
"call_ai_function",
"create_chat_completion",
"get_ada_embedding",
"chunked_tokens",
"COSTS",
"count_message_tokens",
"count_string_tokens",
]
================================================
FILE: autogpt/llm/api_manager.py
================================================
from __future__ import annotations
import openai
from autogpt.config import Config
from autogpt.llm.modelsinfo import COSTS
from autogpt.logs import logger
from autogpt.singleton import Singleton
class ApiManager(metaclass=Singleton):
def __init__(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0
def reset(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0.0
def create_chat_completion(
self,
messages: list, # type: ignore
model: str | None = None,
temperature: float = None,
max_tokens: int | None = None,
deployment_id=None,
) -> str:
"""
Create a chat completion and update the cost.
Args:
messages (list): The list of messages to send to the API.
model (str): The model to use for the API call.
temperature (float): The temperature to use for the API call.
max_tokens (int): The maximum number of tokens for the API call.
Returns:
str: The AI's response.
"""
cfg = Config()
if temperature is None:
temperature = cfg.temperature
if deployment_id is not None:
response = openai.ChatCompletion.create(
deployment_id=deployment_id,
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=cfg.openai_api_key,
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=cfg.openai_api_key,
)
logger.debug(f"Response: {response}")
prompt_tokens = response.usage.prompt_tokens
completion_tokens = response.usage.completion_tokens
self.update_cost(prompt_tokens, completion_tokens, model)
return response
def update_cost(self, prompt_tokens, completion_tokens, model):
"""
Update the total cost, prompt tokens, and completion tokens.
Args:
prompt_tokens (int): The number of tokens used in the prompt.
completion_tokens (int): The number of tokens used in the completion.
model (str): The model used for the API call.
"""
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
self.total_cost += (
prompt_tokens * COSTS[model]["prompt"]
+ completion_tokens * COSTS[model]["completion"]
) / 1000
logger.debug(f"Total running cost: ${self.total_cost:.3f}")
def set_total_budget(self, total_budget):
"""
Sets the total user-defined budget for API calls.
Args:
total_budget (float): The total budget for API calls.
"""
self.total_budget = total_budget
def get_total_prompt_tokens(self):
"""
Get the total number of prompt tokens.
Returns:
int: The total number of prompt tokens.
"""
return self.total_prompt_tokens
def get_total_completion_tokens(self):
"""
Get the total number of completion tokens.
Returns:
int: The total number of completion tokens.
"""
return self.total_completion_tokens
def get_total_cost(self):
"""
Get the total cost of API calls.
Returns:
float: The total cost of API calls.
"""
return self.total_cost
def get_total_budget(self):
"""
Get the total user-defined budget for API calls.
Returns:
float: The total budget for API calls.
"""
return self.total_budget
================================================
FILE: autogpt/llm/base.py
================================================
from dataclasses import dataclass, field
from typing import List, TypedDict
class Message(TypedDict):
"""OpenAI Message object containing a role and the message content"""
role: str
content: str
@dataclass
class ModelInfo:
"""Struct for model information.
Would be lovely to eventually get this directly from APIs, but needs to be scraped from
websites for now.
"""
name: str
prompt_token_cost: float
completion_token_cost: float
max_tokens: int
@dataclass
class ChatModelInfo(ModelInfo):
"""Struct for chat model information."""
pass
@dataclass
class EmbeddingModelInfo(ModelInfo):
"""Struct for embedding model information."""
embedding_dimensions: int
@dataclass
class LLMResponse:
"""Standard response struct for a response from an LLM model."""
model_info: ModelInfo
prompt_tokens_used: int = 0
completion_tokens_used: int = 0
@dataclass
class EmbeddingModelResponse(LLMResponse):
"""Standard response struct for a response from an embedding model."""
embedding: List[float] = field(default_factory=list)
def __post_init__(self):
if self.completion_tokens_used:
raise ValueError("Embeddings should not have completion tokens used.")
@dataclass
class ChatModelResponse(LLMResponse):
"""Standard response struct for a response from an LLM model."""
content: str = None
================================================
FILE: autogpt/llm/chat.py
================================================
import time
from random import shuffle
from openai.error import RateLimitError
from autogpt.config import Config
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.base import Message
from autogpt.llm.llm_utils import create_chat_completion
from autogpt.llm.token_counter import count_message_tokens
from autogpt.logs import logger
from autogpt.memory_management.store_memory import (
save_memory_trimmed_from_context_window,
)
from autogpt.memory_management.summary_memory import (
get_newly_trimmed_messages,
update_running_summary,
)
cfg = Config()
def create_chat_message(role, content) -> Message:
"""
Create a chat message with the given role and content.
Args:
role (str): The role of the message sender, e.g., "system", "user", or "assistant".
content (str): The content of the message.
Returns:
dict: A dictionary containing the role and content of the message.
"""
return {"role": role, "content": content}
def generate_context(prompt, relevant_memory, full_message_history, model):
current_context = [
create_chat_message("system", prompt),
create_chat_message(
"system", f"当前的日期和时间是 {time.strftime('%c')}"
),
# create_chat_message(
# "system",
# f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
# ),
]
# Add messages from the full message history until we reach the token limit
next_message_to_add_index = len(full_message_history) - 1
insertion_index = len(current_context)
# Count the currently used tokens
current_tokens_used = count_message_tokens(current_context, model)
return (
next_message_to_add_index,
current_tokens_used,
insertion_index,
current_context,
)
# TODO: Change debug from hardcode to argument
def chat_with_ai(
agent, prompt, user_input, full_message_history, permanent_memory, token_limit
):
"""Interact with the OpenAI API, sending the prompt, user input, message history,
and permanent memory."""
while True:
try:
"""
Interact with the OpenAI API, sending the prompt, user input,
message history, and permanent memory.
Args:
prompt (str): The prompt explaining the rules to the AI.
user_input (str): The input from the user.
full_message_history (list): The list of all messages sent between the
user and the AI.
permanent_memory (Obj): The memory object containing the permanent
memory.
token_limit (int): The maximum number of tokens allowed in the API call.
Returns:
str: The AI's response.
"""
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response
logger.debug(f"Token限额: {token_limit}")
send_token_limit = token_limit - 1000
# if len(full_message_history) == 0:
# relevant_memory = ""
# else:
# recent_history = full_message_history[-5:]
# shuffle(recent_history)
# relevant_memories = permanent_memory.get_relevant(
# str(recent_history), 5
# )
# if relevant_memories:
# shuffle(relevant_memories)
# relevant_memory = str(relevant_memories)
relevant_memory = ""
logger.debug(f"Memory Stats: {permanent_memory.get_stats()}")
(
next_message_to_add_index,
current_tokens_used,
insertion_index,
current_context,
) = generate_context(prompt, relevant_memory, full_message_history, model)
# while current_tokens_used > 2500:
# # remove memories until we are under 2500 tokens
# relevant_memory = relevant_memory[:-1]
# (
# next_message_to_add_index,
# current_tokens_used,
# insertion_index,
# current_context,
# ) = generate_context(
# prompt, relevant_memory, full_message_history, model
# )
current_tokens_used += count_message_tokens(
[create_chat_message("user", user_input)], model
) # Account for user input (appended later)
current_tokens_used += 500 # Account for memory (appended later) TODO: The final memory may be less than 500 tokens
# Add Messages until the token limit is reached or there are no more messages to add.
while next_message_to_add_index >= 0:
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
message_to_add = full_message_history[next_message_to_add_index]
tokens_to_add = count_message_tokens([message_to_add], model)
if current_tokens_used + tokens_to_add > send_token_limit:
# save_memory_trimmed_from_context_window(
# full_message_history,
# next_message_to_add_index,
# permanent_memory,
# )
break
# Add the most recent message to the start of the current context,
# after the two system prompts.
current_context.insert(
insertion_index, full_message_history[next_message_to_add_index]
)
# Count the currently used tokens
current_tokens_used += tokens_to_add
# Move to the next most recent message in the full message history
next_message_to_add_index -= 1
# Insert Memories
if len(full_message_history) > 0:
(
newly_trimmed_messages,
agent.last_memory_index,
) = get_newly_trimmed_messages(
full_message_history=full_message_history,
current_context=current_context,
last_memory_index=agent.last_memory_index,
)
agent.summary_memory = update_running_summary(
current_memory=agent.summary_memory,
new_events=newly_trimmed_messages,
)
current_context.insert(insertion_index, agent.summary_memory)
api_manager = ApiManager()
# inform the AI about its remaining budget (if it has one)
if api_manager.get_total_budget() > 0.0:
remaining_budget = (
api_manager.get_total_budget() - api_manager.get_total_cost()
)
if remaining_budget < 0:
remaining_budget = 0
system_message = (
f"你的剩余API预算为 ${remaining_budget:.3f}"
+ (
" 已超预算! 关闭!\n\n"
if remaining_budget == 0
else " 预算非常接近限额! 优雅关闭中!\n\n"
if remaining_budget < 0.005
else " 预算接近限额. 完成中.\n\n"
if remaining_budget < 0.01
else "\n\n"
)
)
logger.debug(system_message)
current_context.append(create_chat_message("system", system_message))
# Append user input, the length of this is accounted for above
current_context.extend([create_chat_message("user", user_input)])
plugin_count = len(cfg.plugins)
for i, plugin in enumerate(cfg.plugins):
if not plugin.can_handle_on_planning():
continue
plugin_response = plugin.on_planning(
agent.prompt_generator, current_context
)
if not plugin_response or plugin_response == "":
continue
tokens_to_add = count_message_tokens(
[create_chat_message("system", plugin_response)], model
)
if current_tokens_used + tokens_to_add > send_token_limit:
logger.debug("Plugin response too long, skipping:", plugin_response)
logger.debug("Plugins remaining at stop:", plugin_count - i)
break
current_context.append(create_chat_message("system", plugin_response))
# Calculate remaining tokens
tokens_remaining = token_limit - current_tokens_used
# assert tokens_remaining >= 0, "Tokens remaining is negative.
# This should never happen, please submit a bug report at
# https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context
logger.debug(f"Token限额: {token_limit}")
logger.debug(f"发送Token数量: {current_tokens_used}")
logger.debug(f"回复剩余Token: {tokens_remaining}")
logger.debug("------------ 内容发送至AI ---------------")
for message in current_context:
# Skip printing the prompt
if message["role"] == "system" and message["content"] == prompt:
continue
logger.debug(f"{message['role'].capitalize()}: {message['content']}")
logger.debug("")
logger.debug("----------- 内容结束 ----------------")
# TODO: use a model defined elsewhere, so that model can contain
# temperature and other settings we care about
assistant_reply = create_chat_completion(
model=model,
messages=current_context,
max_tokens=tokens_remaining,
)
# Update full message history
full_message_history.append(create_chat_message("user", user_input))
full_message_history.append(
create_chat_message("assistant", assistant_reply)
)
return assistant_reply
except RateLimitError:
# TODO: When we switch to langchain, this is built in
logger.warn("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
time.sleep(10)
================================================
FILE: autogpt/llm/llm_utils.py
================================================
from __future__ import annotations
import functools
import time
from itertools import islice
from typing import List, Optional
import numpy as np
import openai
import tiktoken
from colorama import Fore, Style
from openai.error import APIError, RateLimitError, Timeout
from autogpt.config import Config
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.base import Message
from autogpt.logs import logger
def retry_openai_api(
num_retries: int = 10,
backoff_base: float = 2.0,
warn_user: bool = True,
):
"""Retry an OpenAI API call.
Args:
num_retries int: Number of retries. Defaults to 10.
backoff_base float: Base for exponential backoff. Defaults to 2.
warn_user bool: Whether to warn the user. Defaults to True.
"""
retry_limit_msg = f"{Fore.RED}Error: " f"Reached rate limit, passing...{Fore.RESET}"
api_key_error_msg = (
f"Please double check that you have setup a "
f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can "
f"read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
)
backoff_msg = (
f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}"
)
def _wrapper(func):
@functools.wraps(func)
def _wrapped(*args, **kwargs):
user_warned = not warn_user
num_attempts = num_retries + 1 # +1 for the first attempt
for attempt in range(1, num_attempts + 1):
try:
return func(*args, **kwargs)
except RateLimitError:
if attempt == num_attempts:
raise
logger.debug(retry_limit_msg)
if not user_warned:
logger.double_check(api_key_error_msg)
user_warned = True
except APIError as e:
if (e.http_status != 502) or (attempt == num_attempts):
raise
backoff = backoff_base ** (attempt + 2)
logger.debug(backoff_msg.format(backoff=backoff))
time.sleep(backoff)
return _wrapped
return _wrapper
def call_ai_function(
function: str, args: list, description: str, model: str | None = None
) -> str:
"""Call an AI function
This is a magic function that can do anything with no-code. See
https://github.com/Torantulino/AI-Functions for more info.
Args:
function (str): The function to call
args (list): The arguments to pass to the function
description (str): The description of the function
model (str, optional): The model to use. Defaults to None.
Returns:
str: The response from the function
"""
cfg = Config()
if model is None:
model = cfg.smart_llm_model
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma separated string
args: str = ", ".join(args)
messages: List[Message] = [
{
"role": "system",
"content": f"You are now the following python function: ```# {description}"
f"\n{function}```\n\nOnly respond with your `return` value.",
},
{"role": "user", "content": args},
]
return create_chat_completion(model=model, messages=messages, temperature=0)
# Overly simple abstraction until we create something better
# simple retry mechanism when getting a rate error or a bad gateway
def create_chat_completion(
messages: List[Message], # type: ignore
model: Optional[str] = None,
temperature: float = None,
max_tokens: Optional[int] = None,
) -> str:
"""Create a chat completion using the OpenAI API
Args:
messages (List[Message]): The messages to send to the chat completion
model (str, optional): The model to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.9.
max_tokens (int, optional): The max tokens to use. Defaults to None.
Returns:
str: The response from the chat completion
"""
cfg = Config()
if temperature is None:
temperature = cfg.temperature
num_retries = 10
warned_user = False
logger.debug(
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
)
for plugin in cfg.plugins:
if plugin.can_handle_chat_completion(
messages=messages,
model=model,
temperature=temperature,
max_tokens=max_tokens,
):
message = plugin.handle_chat_completion(
messages=messages,
model=model,
temperature=temperature,
max_tokens=max_tokens,
)
if message is not None:
return message
api_manager = ApiManager()
response = None
for attempt in range(num_retries):
backoff = 2 ** (attempt + 2)
try:
if cfg.use_azure:
response = api_manager.create_chat_completion(
deployment_id=cfg.get_azure_deployment_id_for_model(model),
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
else:
response = api_manager.create_chat_completion(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
break
except RateLimitError:
logger.debug(
f"{Fore.RED}Error: ", f"到达请求限额, passing...{Fore.RESET}"
)
if not warned_user:
logger.double_check(
f"请再次检查你已经配置好了一个 {Fore.CYAN + Style.BRIGHT}付费的{Style.RESET_ALL} OpenAI API账户. "
+ f"你可以从这里获取更多信息: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
)
warned_user = True
except (APIError, Timeout) as e:
if e.http_status != 502:
raise
if attempt == num_retries - 1:
raise
logger.debug(
f"{Fore.RED}Error: ",
f"API Bad gateway. 等待 {backoff} 秒...{Fore.RESET}",
)
time.sleep(backoff)
if response is None:
logger.typewriter_log(
"从OPENAI获取信息失败",
Fore.RED,
"Auto-GPT从OpenAI服务中获取请求失败. "
+ f"尝试重新启动Auto-GPT, 如果这个问题持续出现请增加后缀 `{Fore.CYAN}--debug{Fore.RESET}`.",
)
logger.double_check()
if cfg.debug_mode:
raise RuntimeError(f"请求 {num_retries} 次后失败")
else:
quit(1)
resp = response.choices[0].message["content"]
for plugin in cfg.plugins:
if not plugin.can_handle_on_response():
continue
resp = plugin.on_response(resp)
return resp
def batched(iterable, n):
"""Batch data into tuples of length n. The last batch may be shorter."""
# batched('ABCDEFG', 3) --> ABC DEF G
if n < 1:
raise ValueError("n 必须为数字")
it = iter(iterable)
while batch := tuple(islice(it, n)):
yield batch
def chunked_tokens(text, tokenizer_name, chunk_length):
tokenizer = tiktoken.get_encoding(tokenizer_name)
tokens = tokenizer.encode(text)
chunks_iterator = batched(tokens, chunk_length)
yield from chunks_iterator
def get_ada_embedding(text: str) -> List[float]:
"""Get an embedding from the ada model.
Args:
text (str): The text to embed.
Returns:
List[float]: The embedding.
"""
cfg = Config()
model = cfg.embedding_model
text = text.replace("\n", " ")
if cfg.use_azure:
kwargs = {"engine": cfg.get_azure_deployment_id_for_model(model)}
else:
kwargs = {"model": model}
embedding = create_embedding(text, **kwargs)
return embedding
@retry_openai_api()
def create_embedding(
text: str,
*_,
**kwargs,
) -> openai.Embedding:
"""Create an embedding using the OpenAI API
Args:
text (str): The text to embed.
kwargs: Other arguments to pass to the OpenAI API embedding creation call.
Returns:
openai.Embedding: The embedding object.
"""
cfg = Config()
chunk_embeddings = []
chunk_lengths = []
for chunk in chunked_tokens(
text,
tokenizer_name=cfg.embedding_tokenizer,
chunk_length=cfg.embedding_token_limit,
):
embedding = openai.Embedding.create(
input=[chunk],
api_key=cfg.openai_api_key,
**kwargs,
)
api_manager = ApiManager()
api_manager.update_cost(
prompt_tokens=embedding.usage.prompt_tokens,
completion_tokens=0,
model=cfg.embedding_model,
)
chunk_embeddings.append(embedding["data"][0]["embedding"])
chunk_lengths.append(len(chunk))
# do weighted avg
chunk_embeddings = np.average(chunk_embeddings, axis=0, weights=chunk_lengths)
chunk_embeddings = chunk_embeddings / np.linalg.norm(
chunk_embeddings
) # normalize the length to one
chunk_embeddings = chunk_embeddings.tolist()
return chunk_embeddings
================================================
FILE: autogpt/llm/modelsinfo.py
================================================
COSTS = {
"gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002},
"gpt-3.5-turbo-0301": {"prompt": 0.002, "completion": 0.002},
"gpt-4-0314": {"prompt": 0.03, "completion": 0.06},
"gpt-4": {"prompt": 0.03, "completion": 0.06},
"gpt-4-0314": {"prompt": 0.03, "completion": 0.06},
"gpt-4-32k": {"prompt": 0.06, "completion": 0.12},
"gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12},
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
}
================================================
FILE: autogpt/llm/providers/__init__.py
================================================
================================================
FILE: autogpt/llm/providers/openai.py
================================================
from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo
OPEN_AI_CHAT_MODELS = {
"gpt-3.5-turbo": ChatModelInfo(
name="gpt-3.5-turbo",
prompt_token_cost=0.002,
completion_token_cost=0.002,
max_tokens=4096,
),
"gpt-4": ChatModelInfo(
name="gpt-4",
prompt_token_cost=0.03,
completion_token_cost=0.06,
max_tokens=8192,
),
"gpt-4-32k": ChatModelInfo(
name="gpt-4-32k",
prompt_token_cost=0.06,
completion_token_cost=0.12,
max_tokens=32768,
),
}
OPEN_AI_EMBEDDING_MODELS = {
"text-embedding-ada-002": EmbeddingModelInfo(
name="text-embedding-ada-002",
prompt_token_cost=0.0004,
completion_token_cost=0.0,
max_tokens=8191,
embedding_dimensions=1536,
),
}
OPEN_AI_MODELS = {
**OPEN_AI_CHAT_MODELS,
**OPEN_AI_EMBEDDING_MODELS,
}
================================================
FILE: autogpt/llm/token_counter.py
================================================
"""Functions for counting the number of tokens in a message or string."""
from __future__ import annotations
from typing import List
import tiktoken
from autogpt.llm.base import Message
from autogpt.logs import logger
def count_message_tokens(
messages: List[Message], model: str = "gpt-3.5-turbo-0301"
) -> int:
"""
Returns the number of tokens used by a list of messages.
Args:
messages (list): A list of messages, each of which is a dictionary
containing the role and content of the message.
model (str): The name of the model to use for tokenization.
Defaults to "gpt-3.5-turbo-0301".
Returns:
int: The number of tokens used by the list of messages.
"""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.warn("警告: model没有找到. 使用cl100k_base编码.")
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo":
# !Note: gpt-3.5-turbo may change over time.
# Returning num tokens assuming gpt-3.5-turbo-0301.")
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
elif model == "gpt-4":
# !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
return count_message_tokens(messages, model="gpt-4-0314")
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = (
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
)
tokens_per_name = -1 # if there's a name, the role is omitted
elif model == "gpt-4-0314":
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"num_tokens_from_messages() is not implemented for model {model}.\n"
" See https://github.com/openai/openai-python/blob/main/chatml.md for"
" information on how messages are converted to tokens."
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def count_string_tokens(string: str, model_name: str) -> int:
"""
Returns the number of tokens in a text string.
Args:
string (str): The text string.
model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
Returns:
int: The number of tokens in the text string.
"""
encoding = tiktoken.encoding_for_model(model_name)
return len(encoding.encode(string))
================================================
FILE: autogpt/log_cycle/__init__.py
================================================
================================================
FILE: autogpt/log_cycle/json_handler.py
================================================
import json
import logging
class JsonFileHandler(logging.FileHandler):
def __init__(self, filename, mode="a", encoding=None, delay=False):
super().__init__(filename, mode, encoding, delay)
def emit(self, record):
json_data = json.loads(self.format(record))
with open(self.baseFilename, "w", encoding="utf-8") as f:
json.dump(json_data, f, ensure_ascii=False, indent=4)
import logging
class JsonFormatter(logging.Formatter):
def format(self, record):
return record.msg
================================================
FILE: autogpt/log_cycle/log_cycle.py
================================================
import json
import os
from typing import Any, Dict, Union
from autogpt.logs import logger
DEFAULT_PREFIX = "agent"
FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json"
CURRENT_CONTEXT_FILE_NAME = "current_context.json"
NEXT_ACTION_FILE_NAME = "next_action.json"
PR
gitextract_3dkehp_f/ ├── .coveragerc ├── .devcontainer/ │ ├── Dockerfile │ ├── devcontainer.json │ └── docker-compose.yml ├── .dockerignore ├── .envrc ├── .flake8 ├── .gitattributes ├── .github/ │ ├── FUNDING.yml │ ├── ISSUE_TEMPLATE/ │ │ ├── 1.bug.yml │ │ └── 2.feature.yml │ ├── PULL_REQUEST_TEMPLATE.md │ └── workflows/ │ ├── benchmarks.yml │ ├── ci.yml │ ├── docker-cache-clean.yml │ ├── docker-ci.yml │ ├── docker-release.yml │ ├── documentation-release.yml │ ├── pr-label.yml │ ├── scripts/ │ │ ├── docker-ci-summary.sh │ │ └── docker-release-summary.sh │ └── sponsors_readme.yml ├── .gitignore ├── .isort.cfg ├── .pre-commit-config.yaml ├── .sourcery.yaml ├── BULLETIN.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── README.md ├── autogpt/ │ ├── __init__.py │ ├── __main__.py │ ├── agent/ │ │ ├── __init__.py │ │ ├── agent.py │ │ └── agent_manager.py │ ├── app.py │ ├── cli.py │ ├── commands/ │ │ ├── __init__.py │ │ ├── analyze_code.py │ │ ├── audio_text.py │ │ ├── command.py │ │ ├── execute_code.py │ │ ├── file_operations.py │ │ ├── git_operations.py │ │ ├── google_search.py │ │ ├── image_gen.py │ │ ├── improve_code.py │ │ ├── task_statuses.py │ │ ├── times.py │ │ ├── twitter.py │ │ ├── web_playwright.py │ │ ├── web_requests.py │ │ ├── web_selenium.py │ │ └── write_tests.py │ ├── config/ │ │ ├── __init__.py │ │ ├── ai_config.py │ │ └── config.py │ ├── configurator.py │ ├── js/ │ │ └── overlay.js │ ├── json_utils/ │ │ ├── __init__.py │ │ ├── json_fix_general.py │ │ ├── json_fix_llm.py │ │ ├── llm_response_format_1.json │ │ └── utilities.py │ ├── llm/ │ │ ├── __init__.py │ │ ├── api_manager.py │ │ ├── base.py │ │ ├── chat.py │ │ ├── llm_utils.py │ │ ├── modelsinfo.py │ │ ├── providers/ │ │ │ ├── __init__.py │ │ │ └── openai.py │ │ └── token_counter.py │ ├── log_cycle/ │ │ ├── __init__.py │ │ ├── json_handler.py │ │ └── log_cycle.py │ ├── logs.py │ ├── main.py │ ├── memory/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── local.py │ │ ├── milvus.py │ │ ├── no_memory.py │ │ ├── pinecone.py │ │ ├── redismem.py │ │ └── weaviate.py │ ├── memory_management/ │ │ ├── store_memory.py │ │ └── summary_memory.py │ ├── models/ │ │ └── base_open_ai_plugin.py │ ├── plugins.py │ ├── processing/ │ │ ├── __init__.py │ │ ├── html.py │ │ └── text.py │ ├── prompts/ │ │ ├── __init__.py │ │ ├── generator.py │ │ └── prompt.py │ ├── setup.py │ ├── singleton.py │ ├── speech/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── brian.py │ │ ├── eleven_labs.py │ │ ├── gtts.py │ │ ├── macos_tts.py │ │ └── say.py │ ├── spinner.py │ ├── url_utils/ │ │ ├── __init__.py │ │ └── validators.py │ ├── utils.py │ └── workspace/ │ ├── __init__.py │ └── workspace.py ├── azure.yaml.template ├── benchmark/ │ ├── __init__.py │ └── benchmark_entrepreneur_gpt_with_difficult_user.py ├── codecov.yml ├── data/ │ └── .keep ├── data_ingestion.py ├── docker-compose.yml ├── docs/ │ ├── challenges/ │ │ ├── beat.md │ │ ├── challenge_template.md │ │ ├── introduction.md │ │ ├── list.md │ │ ├── memory/ │ │ │ ├── challenge_a.md │ │ │ ├── challenge_b.md │ │ │ └── introduction.md │ │ └── submit.md │ ├── configuration/ │ │ ├── imagegen.md │ │ ├── memory.md │ │ ├── search.md │ │ └── voice.md │ ├── index.md │ ├── plugins.md │ ├── setup.md │ ├── testing.md │ └── usage.md ├── main.py ├── mkdocs.yml ├── pyproject.toml ├── requirements.txt ├── run.bat ├── run.sh ├── run_continuous.bat ├── run_continuous.sh ├── scripts/ │ ├── __init__.py │ ├── check_requirements.py │ └── install_plugin_deps.py ├── tests/ │ ├── __init__.py │ ├── conftest.py │ ├── context.py │ ├── integration/ │ │ ├── __init__.py │ │ ├── agent_factory.py │ │ ├── agent_utils.py │ │ ├── cassettes/ │ │ │ ├── test_llm_utils/ │ │ │ │ ├── test_get_ada_embedding.yaml │ │ │ │ └── test_get_ada_embedding_large_context.yaml │ │ │ ├── test_local_cache/ │ │ │ │ └── test_get_relevant.yaml │ │ │ ├── test_memory_management/ │ │ │ │ └── test_save_memory_trimmed_from_context_window.yaml │ │ │ └── test_setup/ │ │ │ ├── test_generate_aiconfig_automatic_default.yaml │ │ │ ├── test_generate_aiconfig_automatic_fallback.yaml │ │ │ └── test_generate_aiconfig_automatic_typical.yaml │ │ ├── challenges/ │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── information_retrieval/ │ │ │ │ └── test_information_retrieval_challenge_a.py │ │ │ ├── memory/ │ │ │ │ ├── __init__.py │ │ │ │ ├── cassettes/ │ │ │ │ │ ├── test_memory_challenge_a/ │ │ │ │ │ │ └── test_memory_challenge_a.yaml │ │ │ │ │ └── test_memory_challenge_b/ │ │ │ │ │ └── test_memory_challenge_b.yaml │ │ │ │ ├── test_memory_challenge_a.py │ │ │ │ └── test_memory_challenge_b.py │ │ │ └── utils.py │ │ ├── conftest.py │ │ ├── goal_oriented/ │ │ │ ├── __init__.py │ │ │ ├── cassettes/ │ │ │ │ ├── test_browse_website/ │ │ │ │ │ └── test_browse_website.yaml │ │ │ │ └── test_write_file/ │ │ │ │ └── test_write_file.yaml │ │ │ ├── goal_oriented_tasks.md │ │ │ ├── test_browse_website.py │ │ │ └── test_write_file.py │ │ ├── memory_tests.py │ │ ├── milvus_memory_tests.py │ │ ├── test_execute_code.py │ │ ├── test_git_commands.py │ │ ├── test_llm_utils.py │ │ ├── test_local_cache.py │ │ ├── test_memory_management.py │ │ ├── test_setup.py │ │ └── weaviate_memory_tests.py │ ├── milvus_memory_test.py │ ├── mocks/ │ │ ├── __init__.py │ │ └── mock_commands.py │ ├── test_agent.py │ ├── test_agent_manager.py │ ├── test_ai_config.py │ ├── test_api_manager.py │ ├── test_commands.py │ ├── test_config.py │ ├── test_image_gen.py │ ├── test_logs.py │ ├── test_prompt_generator.py │ ├── test_token_counter.py │ ├── test_utils.py │ ├── test_workspace.py │ ├── unit/ │ │ ├── __init__.py │ │ ├── _test_json_parser.py │ │ ├── models/ │ │ │ └── test_base_open_api_plugin.py │ │ ├── test_browse_scrape_links.py │ │ ├── test_browse_scrape_text.py │ │ ├── test_chat.py │ │ ├── test_commands.py │ │ ├── test_file_operations.py │ │ ├── test_get_self_feedback.py │ │ ├── test_json_parser.py │ │ ├── test_json_utils_llm.py │ │ ├── test_llm_utils.py │ │ ├── test_plugins.py │ │ ├── test_spinner.py │ │ ├── test_url_validation.py │ │ └── test_web_selenium.py │ ├── utils.py │ └── vcr/ │ ├── __init__.py │ ├── openai_filter.py │ └── vcr_filter.py └── tests.py
SYMBOL INDEX (648 symbols across 119 files)
FILE: autogpt/agent/agent.py
class Agent (line 16) | class Agent:
method __init__ (line 45) | def __init__(
method start_interaction_loop (line 72) | def start_interaction_loop(self):
method _resolve_pathlike_command_args (line 267) | def _resolve_pathlike_command_args(self, command_args):
method get_self_feedback (line 278) | def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
FILE: autogpt/agent/agent_manager.py
class AgentManager (line 11) | class AgentManager(metaclass=Singleton):
method __init__ (line 14) | def __init__(self):
method create_agent (line 22) | def create_agent(self, task: str, prompt: str, model: str) -> tuple[in...
method message_agent (line 73) | def message_agent(self, key: str | int, message: str) -> str:
method list_agents (line 121) | def list_agents(self) -> list[tuple[str | int, str]]:
method delete_agent (line 131) | def delete_agent(self, key: str | int) -> bool:
FILE: autogpt/app.py
function is_valid_int (line 20) | def is_valid_int(value: str) -> bool:
function get_command (line 36) | def get_command(response_json: Dict):
function map_command_synonyms (line 77) | def map_command_synonyms(command_name: str):
function execute_command (line 92) | def execute_command(
function get_text_summary (line 143) | def get_text_summary(url: str, question: str) -> str:
function get_hyperlinks (line 160) | def get_hyperlinks(url: str) -> Union[str, List[str]]:
function start_agent (line 177) | def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_mo...
function message_agent (line 210) | def message_agent(key: str, message: str) -> str:
function list_agents (line 225) | def list_agents() -> str:
function delete_agent (line 237) | def delete_agent(key: str) -> str:
FILE: autogpt/cli.py
function main (line 64) | def main(
FILE: autogpt/commands/analyze_code.py
function analyze_code (line 13) | def analyze_code(code: str) -> list[str]:
FILE: autogpt/commands/audio_text.py
function read_audio_from_file (line 19) | def read_audio_from_file(filename: str) -> str:
function read_audio (line 34) | def read_audio(audio: bytes) -> str:
FILE: autogpt/commands/command.py
class Command (line 10) | class Command:
method __init__ (line 19) | def __init__(
method __call__ (line 35) | def __call__(self, *args, **kwargs) -> Any:
method __str__ (line 40) | def __str__(self) -> str:
class CommandRegistry (line 44) | class CommandRegistry:
method __init__ (line 52) | def __init__(self):
method _import_module (line 55) | def _import_module(self, module_name: str) -> Any:
method _reload_module (line 58) | def _reload_module(self, module: Any) -> Any:
method register (line 61) | def register(self, cmd: Command) -> None:
method unregister (line 64) | def unregister(self, command_name: str):
method reload_commands (line 70) | def reload_commands(self) -> None:
method get_command (line 79) | def get_command(self, name: str) -> Callable[..., Any]:
method call (line 82) | def call(self, command_name: str, **kwargs) -> Any:
method command_prompt (line 88) | def command_prompt(self) -> str:
method import_commands (line 97) | def import_commands(self, module_name: str) -> None:
function command (line 127) | def command(
FILE: autogpt/commands/execute_code.py
function execute_python_file (line 17) | def execute_python_file(filename: str) -> str:
function execute_shell (line 109) | def execute_shell(command_line: str) -> str:
function execute_shell_popen (line 146) | def execute_shell_popen(command_line) -> str:
function we_are_running_in_a_docker_container (line 178) | def we_are_running_in_a_docker_container() -> bool:
FILE: autogpt/commands/file_operations.py
function text_checksum (line 25) | def text_checksum(text: str) -> str:
function operations_from_log (line 30) | def operations_from_log(log_path: str) -> Generator[Tuple[Operation, str...
function file_operations_state (line 55) | def file_operations_state(log_path: str) -> Dict:
function is_duplicate_operation (line 78) | def is_duplicate_operation(
function log_operation (line 99) | def log_operation(operation: str, filename: str, checksum: str | None = ...
function split_file (line 114) | def split_file(
function read_file (line 147) | def read_file(filename: str) -> str:
function ingest_file (line 165) | def ingest_file(
function write_to_file (line 200) | def write_to_file(filename: str, text: str) -> str:
function append_to_file (line 227) | def append_to_file(filename: str, text: str, should_log: bool = True) ->...
function delete_file (line 255) | def delete_file(filename: str) -> str:
function list_files (line 275) | def list_files(directory: str) -> list[str]:
function download_file (line 305) | def download_file(url, filename):
FILE: autogpt/commands/git_operations.py
function clone_repository (line 19) | def clone_repository(url: str, clone_path: str) -> str:
FILE: autogpt/commands/google_search.py
function google_search (line 15) | def google_search(query: str, num_results: int = 8) -> str:
function google_official_search (line 47) | def google_official_search(query: str, num_results: int = 8) -> str | li...
function safe_google_results (line 101) | def safe_google_results(results: str | list) -> str:
FILE: autogpt/commands/image_gen.py
function generate_image (line 18) | def generate_image(prompt: str, size: int = 256) -> str:
function generate_image_with_hf (line 42) | def generate_image_with_hf(prompt: str, filename: str) -> str:
function generate_image_with_dalle (line 80) | def generate_image_with_dalle(prompt: str, filename: str, size: int) -> ...
function generate_image_with_sd_webui (line 118) | def generate_image_with_sd_webui(
FILE: autogpt/commands/improve_code.py
function improve_code (line 14) | def improve_code(suggestions: list[str], code: str) -> str:
FILE: autogpt/commands/task_statuses.py
function task_complete (line 15) | def task_complete(reason: str) -> NoReturn:
FILE: autogpt/commands/times.py
function get_datetime (line 4) | def get_datetime() -> str:
FILE: autogpt/commands/twitter.py
function send_tweet (line 14) | def send_tweet(tweet_text: str) -> str:
FILE: autogpt/commands/web_playwright.py
function scrape_text (line 17) | def scrape_text(url: str) -> str:
function scrape_links (line 52) | def scrape_links(url: str) -> str | list[str]:
FILE: autogpt/commands/web_requests.py
function get_response (line 19) | def get_response(
function scrape_text (line 53) | def scrape_text(url: str) -> str:
function scrape_links (line 81) | def scrape_links(url: str) -> str | list[str]:
function create_message (line 105) | def create_message(chunk, question):
FILE: autogpt/commands/web_selenium.py
function browse_website (line 37) | def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
function scrape_text_with_selenium (line 66) | def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
function scrape_links_with_selenium (line 137) | def scrape_links_with_selenium(driver: WebDriver, url: str) -> list[str]:
function close_browser (line 157) | def close_browser(driver: WebDriver) -> None:
function add_header (line 169) | def add_header(driver: WebDriver) -> None:
FILE: autogpt/commands/write_tests.py
function write_tests (line 15) | def write_tests(code: str, focus: list[str]) -> str:
FILE: autogpt/config/ai_config.py
class AIConfig (line 21) | class AIConfig:
method __init__ (line 32) | def __init__(
method load (line 60) | def load(config_file: str = SAVE_FILE) -> "AIConfig":
method save (line 92) | def save(self, config_file: str = SAVE_FILE) -> None:
method construct_full_prompt (line 113) | def construct_full_prompt(
FILE: autogpt/config/config.py
class Config (line 13) | class Config(metaclass=Singleton):
method __init__ (line 18) | def __init__(self) -> None:
method get_azure_deployment_id_for_model (line 159) | def get_azure_deployment_id_for_model(self, model: str) -> str:
method load_azure_config (line 186) | def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> N...
method set_continuous_mode (line 206) | def set_continuous_mode(self, value: bool) -> None:
method set_continuous_limit (line 210) | def set_continuous_limit(self, value: int) -> None:
method set_speak_mode (line 214) | def set_speak_mode(self, value: bool) -> None:
method set_fast_llm_model (line 218) | def set_fast_llm_model(self, value: str) -> None:
method set_smart_llm_model (line 222) | def set_smart_llm_model(self, value: str) -> None:
method set_fast_token_limit (line 226) | def set_fast_token_limit(self, value: int) -> None:
method set_smart_token_limit (line 230) | def set_smart_token_limit(self, value: int) -> None:
method set_embedding_model (line 234) | def set_embedding_model(self, value: str) -> None:
method set_embedding_tokenizer (line 238) | def set_embedding_tokenizer(self, value: str) -> None:
method set_embedding_token_limit (line 242) | def set_embedding_token_limit(self, value: int) -> None:
method set_browse_chunk_max_length (line 246) | def set_browse_chunk_max_length(self, value: int) -> None:
method set_openai_api_key (line 250) | def set_openai_api_key(self, value: str) -> None:
method set_elevenlabs_api_key (line 254) | def set_elevenlabs_api_key(self, value: str) -> None:
method set_elevenlabs_voice_1_id (line 258) | def set_elevenlabs_voice_1_id(self, value: str) -> None:
method set_elevenlabs_voice_2_id (line 262) | def set_elevenlabs_voice_2_id(self, value: str) -> None:
method set_google_api_key (line 266) | def set_google_api_key(self, value: str) -> None:
method set_custom_search_engine_id (line 270) | def set_custom_search_engine_id(self, value: str) -> None:
method set_pinecone_api_key (line 274) | def set_pinecone_api_key(self, value: str) -> None:
method set_pinecone_region (line 278) | def set_pinecone_region(self, value: str) -> None:
method set_debug_mode (line 282) | def set_debug_mode(self, value: bool) -> None:
method set_plugins (line 286) | def set_plugins(self, value: list) -> None:
method set_temperature (line 290) | def set_temperature(self, value: int) -> None:
method set_memory_backend (line 294) | def set_memory_backend(self, name: str) -> None:
function check_openai_api_key (line 299) | def check_openai_api_key() -> None:
FILE: autogpt/configurator.py
function create_config (line 13) | def create_config(
FILE: autogpt/json_utils/json_fix_general.py
function fix_invalid_escape (line 17) | def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
function balance_braces (line 42) | def balance_braces(json_string: str) -> Optional[str]:
function add_quotes_to_property_names (line 69) | def add_quotes_to_property_names(json_string: str) -> str:
function correct_json (line 93) | def correct_json(json_to_load: str) -> str:
FILE: autogpt/json_utils/json_fix_llm.py
function auto_fix_json (line 40) | def auto_fix_json(json_string: str, schema: str) -> str:
function fix_json_using_multiple_techniques (line 85) | def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any...
function fix_and_parse_json (line 134) | def fix_and_parse_json(
function try_ai_fix (line 172) | def try_ai_fix(
function attempt_to_fix_json_by_finding_outermost_brackets (line 208) | def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
FILE: autogpt/json_utils/utilities.py
function extract_char_position (line 15) | def extract_char_position(error_message: str) -> int:
function validate_json (line 33) | def validate_json(json_object: object, schema_name: str) -> dict | None:
function validate_json_string (line 60) | def validate_json_string(json_string: str, schema_name: str) -> dict | N...
function is_string_valid_json (line 74) | def is_string_valid_json(json_string: str, schema_name: str) -> bool:
FILE: autogpt/llm/api_manager.py
class ApiManager (line 11) | class ApiManager(metaclass=Singleton):
method __init__ (line 12) | def __init__(self):
method reset (line 18) | def reset(self):
method create_chat_completion (line 24) | def create_chat_completion(
method update_cost (line 68) | def update_cost(self, prompt_tokens, completion_tokens, model):
method set_total_budget (line 85) | def set_total_budget(self, total_budget):
method get_total_prompt_tokens (line 94) | def get_total_prompt_tokens(self):
method get_total_completion_tokens (line 103) | def get_total_completion_tokens(self):
method get_total_cost (line 112) | def get_total_cost(self):
method get_total_budget (line 121) | def get_total_budget(self):
FILE: autogpt/llm/base.py
class Message (line 5) | class Message(TypedDict):
class ModelInfo (line 13) | class ModelInfo:
class ChatModelInfo (line 28) | class ChatModelInfo(ModelInfo):
class EmbeddingModelInfo (line 35) | class EmbeddingModelInfo(ModelInfo):
class LLMResponse (line 42) | class LLMResponse:
class EmbeddingModelResponse (line 51) | class EmbeddingModelResponse(LLMResponse):
method __post_init__ (line 56) | def __post_init__(self):
class ChatModelResponse (line 62) | class ChatModelResponse(LLMResponse):
FILE: autogpt/llm/chat.py
function create_chat_message (line 23) | def create_chat_message(role, content) -> Message:
function generate_context (line 37) | def generate_context(prompt, relevant_memory, full_message_history, model):
function chat_with_ai (line 63) | def chat_with_ai(
FILE: autogpt/llm/llm_utils.py
function retry_openai_api (line 20) | def retry_openai_api(
function call_ai_function (line 73) | def call_ai_function(
function create_chat_completion (line 111) | def create_chat_completion(
function batched (line 213) | def batched(iterable, n):
function chunked_tokens (line 223) | def chunked_tokens(text, tokenizer_name, chunk_length):
function get_ada_embedding (line 230) | def get_ada_embedding(text: str) -> List[float]:
function create_embedding (line 253) | def create_embedding(
FILE: autogpt/llm/token_counter.py
function count_message_tokens (line 12) | def count_message_tokens(
function count_string_tokens (line 64) | def count_string_tokens(string: str, model_name: str) -> int:
FILE: autogpt/log_cycle/json_handler.py
class JsonFileHandler (line 5) | class JsonFileHandler(logging.FileHandler):
method __init__ (line 6) | def __init__(self, filename, mode="a", encoding=None, delay=False):
method emit (line 9) | def emit(self, record):
class JsonFormatter (line 18) | class JsonFormatter(logging.Formatter):
method format (line 19) | def format(self, record):
FILE: autogpt/log_cycle/log_cycle.py
class LogCycleHandler (line 16) | class LogCycleHandler:
method __init__ (line 21) | def __init__(self):
method create_directory_if_not_exists (line 25) | def create_directory_if_not_exists(directory_path: str) -> None:
method create_outer_directory (line 29) | def create_outer_directory(self, ai_name: str, created_at: str) -> str:
method create_inner_directory (line 43) | def create_inner_directory(self, outer_folder_path: str, cycle_count: ...
method create_nested_directory (line 50) | def create_nested_directory(
method log_cycle (line 58) | def log_cycle(
FILE: autogpt/logs.py
class Logger (line 17) | class Logger(metaclass=Singleton):
method __init__ (line 24) | def __init__(self):
method typewriter_log (line 87) | def typewriter_log(
method debug (line 106) | def debug(
method info (line 114) | def info(
method warn (line 122) | def warn(
method error (line 130) | def error(self, title, message=""):
method _log (line 133) | def _log(
method set_level (line 147) | def set_level(self, level):
method double_check (line 151) | def double_check(self, additionalText=None):
method log_json (line 162) | def log_json(self, data: Any, file_name: str) -> None:
method get_log_directory (line 177) | def get_log_directory(self):
class TypingConsoleHandler (line 188) | class TypingConsoleHandler(logging.StreamHandler):
method emit (line 189) | def emit(self, record):
class ConsoleHandler (line 210) | class ConsoleHandler(logging.StreamHandler):
method emit (line 211) | def emit(self, record) -> None:
class AutoGptFormatter (line 219) | class AutoGptFormatter(logging.Formatter):
method format (line 225) | def format(self, record: LogRecord) -> str:
function remove_color_codes (line 246) | def remove_color_codes(s: str) -> str:
function print_assistant_thoughts (line 254) | def print_assistant_thoughts(
FILE: autogpt/main.py
function run_auto_gpt (line 25) | def run_auto_gpt(
FILE: autogpt/memory/__init__.py
function get_memory (line 38) | def get_memory(cfg, init=False):
function get_supported_memory_backends (line 84) | def get_supported_memory_backends():
FILE: autogpt/memory/base.py
class MemoryProviderSingleton (line 7) | class MemoryProviderSingleton(AbstractSingleton):
method add (line 9) | def add(self, data):
method get (line 14) | def get(self, data):
method clear (line 19) | def clear(self):
method get_relevant (line 24) | def get_relevant(self, data, num_relevant=5):
method get_stats (line 29) | def get_stats(self):
FILE: autogpt/memory/local.py
function create_default_embeddings (line 17) | def create_default_embeddings():
class CacheContent (line 22) | class CacheContent:
class LocalCache (line 29) | class LocalCache(MemoryProviderSingleton):
method __init__ (line 32) | def __init__(self, cfg) -> None:
method add (line 52) | def add(self, text: str):
method clear (line 83) | def clear(self) -> str:
method get (line 92) | def get(self, data: str) -> list[Any] | None:
method get_relevant (line 103) | def get_relevant(self, text: str, k: int) -> list[Any]:
method get_stats (line 122) | def get_stats(self) -> tuple[int, tuple[int, ...]]:
FILE: autogpt/memory/milvus.py
class MilvusMemory (line 11) | class MilvusMemory(MemoryProviderSingleton):
method __init__ (line 14) | def __init__(self, cfg: Config) -> None:
method configure (line 36) | def configure(self, cfg: Config) -> None:
method init_collection (line 72) | def init_collection(self) -> None:
method add (line 93) | def add(self, data) -> str:
method get (line 110) | def get(self, data):
method clear (line 117) | def clear(self) -> str:
method get_relevant (line 133) | def get_relevant(self, data: str, num_relevant: int = 5):
method get_stats (line 158) | def get_stats(self) -> str:
FILE: autogpt/memory/no_memory.py
class NoMemory (line 9) | class NoMemory(MemoryProviderSingleton):
method __init__ (line 14) | def __init__(self, cfg):
method add (line 25) | def add(self, data: str) -> str:
method get (line 36) | def get(self, data: str) -> list[Any] | None:
method clear (line 48) | def clear(self) -> str:
method get_relevant (line 56) | def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] ...
method get_stats (line 69) | def get_stats(self):
FILE: autogpt/memory/pinecone.py
class PineconeMemory (line 9) | class PineconeMemory(MemoryProviderSingleton):
method __init__ (line 10) | def __init__(self, cfg):
method add (line 49) | def add(self, data):
method get (line 57) | def get(self, data):
method clear (line 60) | def clear(self):
method get_relevant (line 64) | def get_relevant(self, data, num_relevant=5):
method get_stats (line 77) | def get_stats(self):
FILE: autogpt/memory/redismem.py
class RedisMemory (line 27) | class RedisMemory(MemoryProviderSingleton):
method __init__ (line 28) | def __init__(self, cfg):
method add (line 80) | def add(self, data: str) -> str:
method get (line 104) | def get(self, data: str) -> list[Any] | None:
method clear (line 115) | def clear(self) -> str:
method get_relevant (line 124) | def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] ...
method get_stats (line 152) | def get_stats(self):
FILE: autogpt/memory/weaviate.py
function default_schema (line 11) | def default_schema(weaviate_index):
class WeaviateMemory (line 24) | class WeaviateMemory(MemoryProviderSingleton):
method __init__ (line 25) | def __init__(self, cfg):
method format_classname (line 49) | def format_classname(index):
method _create_schema (line 58) | def _create_schema(self):
method _build_auth_credentials (line 63) | def _build_auth_credentials(self, cfg):
method add (line 73) | def add(self, data):
method get (line 89) | def get(self, data):
method clear (line 92) | def clear(self):
method get_relevant (line 102) | def get_relevant(self, data, num_relevant=5):
method get_stats (line 123) | def get_stats(self):
FILE: autogpt/memory_management/store_memory.py
function format_memory (line 8) | def format_memory(assistant_reply, next_message_content):
function save_memory_trimmed_from_context_window (line 22) | def save_memory_trimmed_from_context_window(
FILE: autogpt/memory_management/summary_memory.py
function get_newly_trimmed_messages (line 11) | def get_newly_trimmed_messages(
function update_running_summary (line 48) | def update_running_summary(
FILE: autogpt/models/base_open_ai_plugin.py
class Message (line 9) | class Message(TypedDict):
class BaseOpenAIPlugin (line 14) | class BaseOpenAIPlugin(AutoGPTPluginTemplate):
method __init__ (line 19) | def __init__(self, manifests_specs_clients: dict):
method can_handle_on_response (line 28) | def can_handle_on_response(self) -> bool:
method on_response (line 35) | def on_response(self, response: str, *args, **kwargs) -> str:
method can_handle_post_prompt (line 39) | def can_handle_post_prompt(self) -> bool:
method post_prompt (line 46) | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator:
method can_handle_on_planning (line 56) | def can_handle_on_planning(self) -> bool:
method on_planning (line 63) | def on_planning(
method can_handle_post_planning (line 73) | def can_handle_post_planning(self) -> bool:
method post_planning (line 80) | def post_planning(self, response: str) -> str:
method can_handle_pre_instruction (line 89) | def can_handle_pre_instruction(self) -> bool:
method pre_instruction (line 96) | def pre_instruction(self, messages: List[Message]) -> List[Message]:
method can_handle_on_instruction (line 105) | def can_handle_on_instruction(self) -> bool:
method on_instruction (line 112) | def on_instruction(self, messages: List[Message]) -> Optional[str]:
method can_handle_post_instruction (line 121) | def can_handle_post_instruction(self) -> bool:
method post_instruction (line 128) | def post_instruction(self, response: str) -> str:
method can_handle_pre_command (line 137) | def can_handle_pre_command(self) -> bool:
method pre_command (line 144) | def pre_command(
method can_handle_post_command (line 156) | def can_handle_post_command(self) -> bool:
method post_command (line 163) | def post_command(self, command_name: str, response: str) -> str:
method can_handle_chat_completion (line 173) | def can_handle_chat_completion(
method handle_chat_completion (line 187) | def handle_chat_completion(
FILE: autogpt/plugins.py
function inspect_zip_for_modules (line 22) | def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[...
function write_dict_to_json_file (line 44) | def write_dict_to_json_file(data: dict, file_path: str) -> None:
function fetch_openai_plugins_manifest_and_spec (line 55) | def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
function create_directory_if_not_exists (line 109) | def create_directory_if_not_exists(directory_path: str) -> bool:
function initialize_openai_plugins (line 130) | def initialize_openai_plugins(
function instantiate_openai_plugin_clients (line 180) | def instantiate_openai_plugin_clients(
function scan_plugins (line 199) | def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPlugin...
function denylist_allowlist_check (line 254) | def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool:
FILE: autogpt/processing/html.py
function extract_hyperlinks (line 8) | def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple...
function format_hyperlinks (line 24) | def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
FILE: autogpt/processing/text.py
function split_text (line 15) | def split_text(
function summarize_text (line 71) | def summarize_text(
function scroll_to_percentage (line 140) | def scroll_to_percentage(driver: WebDriver, ratio: float) -> None:
function create_message (line 155) | def create_message(chunk: str, question: str) -> Dict[str, str]:
FILE: autogpt/prompts/generator.py
class PromptGenerator (line 6) | class PromptGenerator:
method __init__ (line 12) | def __init__(self) -> None:
method add_constraint (line 36) | def add_constraint(self, constraint: str) -> None:
method add_command (line 45) | def add_command(
method _generate_command_string (line 77) | def _generate_command_string(self, command: Dict[str, Any]) -> str:
method add_resource (line 92) | def add_resource(self, resource: str) -> None:
method add_performance_evaluation (line 101) | def add_performance_evaluation(self, evaluation: str) -> None:
method _generate_numbered_list (line 110) | def _generate_numbered_list(self, items: List[Any], item_type="list") ...
method generate_prompt_string (line 136) | def generate_prompt_string(self) -> str:
FILE: autogpt/prompts/prompt.py
function build_default_prompt_generator (line 18) | def build_default_prompt_generator() -> PromptGenerator:
function construct_main_ai_config (line 69) | def construct_main_ai_config() -> AIConfig:
FILE: autogpt/setup.py
function prompt_user (line 15) | def prompt_user() -> AIConfig:
function generate_aiconfig_manual (line 74) | def generate_aiconfig_manual() -> AIConfig:
function generate_aiconfig_automatic (line 160) | def generate_aiconfig_automatic(user_prompt) -> AIConfig:
FILE: autogpt/singleton.py
class Singleton (line 5) | class Singleton(abc.ABCMeta, type):
method __call__ (line 12) | def __call__(cls, *args, **kwargs):
class AbstractSingleton (line 19) | class AbstractSingleton(abc.ABC, metaclass=Singleton):
FILE: autogpt/speech/base.py
class VoiceBase (line 8) | class VoiceBase(AbstractSingleton):
method __init__ (line 13) | def __init__(self):
method say (line 24) | def say(self, text: str, voice_index: int = 0) -> bool:
method _setup (line 36) | def _setup(self) -> None:
method _speech (line 43) | def _speech(self, text: str, voice_index: int = 0) -> bool:
FILE: autogpt/speech/brian.py
class BrianSpeech (line 10) | class BrianSpeech(VoiceBase):
method _setup (line 13) | def _setup(self) -> None:
method _speech (line 17) | def _speech(self, text: str, _: int = 0) -> bool:
FILE: autogpt/speech/eleven_labs.py
class ElevenLabsSpeech (line 13) | class ElevenLabsSpeech(VoiceBase):
method _setup (line 16) | def _setup(self) -> None:
method _use_custom_voice (line 48) | def _use_custom_voice(self, voice, voice_index) -> None:
method _speech (line 62) | def _speech(self, text: str, voice_index: int = 0) -> bool:
FILE: autogpt/speech/gtts.py
class GTTSVoice (line 10) | class GTTSVoice(VoiceBase):
method _setup (line 13) | def _setup(self) -> None:
method _speech (line 16) | def _speech(self, text: str, _: int = 0) -> bool:
FILE: autogpt/speech/macos_tts.py
class MacOSTTS (line 7) | class MacOSTTS(VoiceBase):
method _setup (line 10) | def _setup(self) -> None:
method _speech (line 13) | def _speech(self, text: str, voice_index: int = 0) -> bool:
FILE: autogpt/speech/say.py
function say_text (line 17) | def say_text(text: str, voice_index: int = 0) -> None:
function _get_voice_engine (line 34) | def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]:
FILE: autogpt/spinner.py
class Spinner (line 8) | class Spinner:
method __init__ (line 11) | def __init__(self, message: str = "Loading...", delay: float = 0.1) ->...
method spin (line 24) | def spin(self) -> None:
method __enter__ (line 32) | def __enter__(self):
method __exit__ (line 40) | def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
method update_message (line 54) | def update_message(self, new_message, delay=0.1):
FILE: autogpt/url_utils/validators.py
function validate_url (line 8) | def validate_url(func: Callable[..., Any]) -> Any:
function is_valid_url (line 39) | def is_valid_url(url: str) -> bool:
function sanitize_url (line 55) | def sanitize_url(url: str) -> str:
function check_local_file_access (line 69) | def check_local_file_access(url: str) -> bool:
FILE: autogpt/utils.py
function clean_input (line 20) | def clean_input(prompt: str = "", talk=False):
function validate_yaml_file (line 61) | def validate_yaml_file(file: str):
function readable_file_size (line 76) | def readable_file_size(size, decimal_places=2):
function get_bulletin_from_web (line 89) | def get_bulletin_from_web():
function get_current_git_branch (line 102) | def get_current_git_branch() -> str:
function get_latest_bulletin (line 111) | def get_latest_bulletin() -> tuple[str, bool]:
function markdown_to_ansi_style (line 136) | def markdown_to_ansi_style(markdown: str):
FILE: autogpt/workspace/workspace.py
class Workspace (line 17) | class Workspace:
method __init__ (line 22) | def __init__(self, workspace_root: str | Path, restrict_to_workspace: ...
method root (line 27) | def root(self) -> Path:
method restrict_to_workspace (line 32) | def restrict_to_workspace(self):
method make_workspace (line 37) | def make_workspace(cls, workspace_directory: str | Path, *args, **kwar...
method get_path (line 56) | def get_path(self, relative_path: str | Path) -> Path:
method _sanitize_path (line 77) | def _sanitize_path(
FILE: benchmark/benchmark_entrepreneur_gpt_with_difficult_user.py
function benchmark_entrepreneur_gpt_with_difficult_user (line 6) | def benchmark_entrepreneur_gpt_with_difficult_user():
FILE: data_ingestion.py
function configure_logging (line 11) | def configure_logging():
function ingest_directory (line 24) | def ingest_directory(directory, memory, args):
function main (line 40) | def main() -> None:
FILE: scripts/check_requirements.py
function main (line 7) | def main():
FILE: scripts/install_plugin_deps.py
function install_plugin_dependencies (line 8) | def install_plugin_dependencies():
FILE: tests/conftest.py
function workspace_root (line 14) | def workspace_root(tmp_path: Path) -> Path:
function workspace (line 19) | def workspace(workspace_root: Path) -> Workspace:
function config (line 25) | def config(mocker: MockerFixture, workspace: Workspace) -> Config:
function api_manager (line 38) | def api_manager() -> ApiManager:
FILE: tests/integration/agent_factory.py
function agent_test_config (line 12) | def agent_test_config(config: Config):
function memory_local_cache (line 23) | def memory_local_cache(agent_test_config: Config):
function memory_none (line 33) | def memory_none(agent_test_config: Config):
function browser_agent (line 43) | def browser_agent(agent_test_config, memory_none: NoMemory, workspace: W...
function writer_agent (line 80) | def writer_agent(agent_test_config, memory_none: NoMemory, workspace: Wo...
function memory_management_agent (line 119) | def memory_management_agent(
function get_company_revenue_agent (line 155) | def get_company_revenue_agent(
FILE: tests/integration/agent_utils.py
function run_interaction_loop (line 6) | def run_interaction_loop(agent: Agent, timeout: float | None):
FILE: tests/integration/challenges/conftest.py
function pytest_addoption (line 4) | def pytest_addoption(parser):
function pytest_configure (line 10) | def pytest_configure(config):
function user_selected_level (line 15) | def user_selected_level(request) -> int:
FILE: tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py
function input_generator (line 13) | def input_generator(input_sequence: list) -> Generator[str, None, None]:
function test_information_retrieval_challenge_a (line 28) | def test_information_retrieval_challenge_a(
FILE: tests/integration/challenges/memory/test_memory_challenge_a.py
function test_memory_challenge_a (line 15) | def test_memory_challenge_a(
function create_instructions_files (line 41) | def create_instructions_files(
function generate_content (line 62) | def generate_content(
FILE: tests/integration/challenges/memory/test_memory_challenge_b.py
function test_memory_challenge_b (line 16) | def test_memory_challenge_b(
function create_instructions_files (line 42) | def create_instructions_files(
function generate_content (line 64) | def generate_content(index: int, task_ids: list, base_filename: str, lev...
FILE: tests/integration/challenges/utils.py
function get_level_to_run (line 8) | def get_level_to_run(
function generate_noise (line 39) | def generate_noise(noise_size) -> str:
function run_multiple_times (line 48) | def run_multiple_times(times):
FILE: tests/integration/conftest.py
function vcr_config (line 9) | def vcr_config():
FILE: tests/integration/goal_oriented/test_browse_website.py
function test_browse_website (line 11) | def test_browse_website(browser_agent: Agent) -> None:
FILE: tests/integration/goal_oriented/test_write_file.py
function test_write_file (line 11) | def test_write_file(writer_agent: Agent) -> None:
FILE: tests/integration/memory_tests.py
class TestLocalCache (line 11) | class TestLocalCache(unittest.TestCase):
method generate_random_string (line 12) | def generate_random_string(self, length):
method setUp (line 15) | def setUp(self):
method test_get_relevant (line 35) | def test_get_relevant(self):
FILE: tests/integration/milvus_memory_tests.py
class TestMilvusMemory (line 12) | class TestMilvusMemory(unittest.TestCase):
method generate_random_string (line 15) | def generate_random_string(self, length: int) -> str:
method setUp (line 18) | def setUp(self) -> None:
method test_get_relevant (line 39) | def test_get_relevant(self) -> None:
FILE: tests/integration/test_execute_code.py
function config_allow_execute (line 13) | def config_allow_execute(config: Config, mocker: MockerFixture):
function python_test_file (line 18) | def python_test_file(config: Config, random_string):
function random_string (line 28) | def random_string():
function test_execute_python_file (line 32) | def test_execute_python_file(python_test_file: str, random_string: str):
function test_execute_python_file_invalid (line 37) | def test_execute_python_file_invalid():
function test_execute_shell (line 48) | def test_execute_shell(config_allow_execute, random_string):
FILE: tests/integration/test_git_commands.py
function mock_clone_from (line 9) | def mock_clone_from(mocker):
function test_clone_auto_gpt_repository (line 13) | def test_clone_auto_gpt_repository(workspace, mock_clone_from, config):
function test_clone_repository_error (line 32) | def test_clone_repository_error(workspace, mock_clone_from):
FILE: tests/integration/test_llm_utils.py
function random_large_string (line 16) | def random_large_string():
function api_manager (line 25) | def api_manager(mocker: MockerFixture):
function spy_create_embedding (line 37) | def spy_create_embedding(mocker: MockerFixture):
function test_get_ada_embedding (line 43) | def test_get_ada_embedding(
function test_get_ada_embedding_large_context (line 58) | def test_get_ada_embedding_large_context(random_large_string):
FILE: tests/integration/test_local_cache.py
function LocalCache (line 14) | def LocalCache():
function mock_embed_with_ada (line 22) | def mock_embed_with_ada(mocker):
function test_init_without_backing_file (line 29) | def test_init_without_backing_file(LocalCache, config, workspace):
function test_init_with_backing_empty_file (line 38) | def test_init_with_backing_empty_file(LocalCache, config, workspace):
function test_init_with_backing_file (line 48) | def test_init_with_backing_file(LocalCache, config, workspace):
function test_add (line 63) | def test_add(LocalCache, config, mock_embed_with_ada):
function test_clear (line 70) | def test_clear(LocalCache, config, mock_embed_with_ada):
function test_get (line 84) | def test_get(LocalCache, config, mock_embed_with_ada):
function test_get_relevant (line 94) | def test_get_relevant(LocalCache, config) -> None:
function test_get_stats (line 105) | def test_get_stats(LocalCache, config, mock_embed_with_ada) -> None:
FILE: tests/integration/test_memory_management.py
function message_history_fixture (line 14) | def message_history_fixture():
function expected_permanent_memory (line 32) | def expected_permanent_memory() -> str:
function test_save_memory_trimmed_from_context_window (line 54) | def test_save_memory_trimmed_from_context_window(
FILE: tests/integration/test_setup.py
function test_generate_aiconfig_automatic_default (line 16) | def test_generate_aiconfig_automatic_default():
function test_generate_aiconfig_automatic_typical (line 29) | def test_generate_aiconfig_automatic_typical():
function test_generate_aiconfig_automatic_fallback (line 41) | def test_generate_aiconfig_automatic_fallback():
function test_prompt_user_manual_mode (line 62) | def test_prompt_user_manual_mode():
FILE: tests/integration/weaviate_memory_tests.py
class TestWeaviateMemory (line 12) | class TestWeaviateMemory(unittest.TestCase):
method setUpClass (line 18) | def setUpClass(cls):
method setUp (line 50) | def setUp(self):
method test_add (line 59) | def test_add(self):
method test_get (line 69) | def test_get(self):
method test_get_stats (line 88) | def test_get_stats(self):
method test_clear (line 103) | def test_clear(self):
FILE: tests/milvus_memory_test.py
function mock_config (line 10) | def mock_config() -> dict:
class TestMilvusMemory (line 25) | class TestMilvusMemory(unittest.TestCase):
method setUp (line 28) | def setUp(self) -> None:
method test_add (line 33) | def test_add(self) -> None:
method test_clear (line 41) | def test_clear(self) -> None:
method test_get (line 46) | def test_get(self) -> None:
method test_get_relevant (line 54) | def test_get_relevant(self) -> None:
method test_get_stats (line 64) | def test_get_stats(self) -> None:
FILE: tests/mocks/mock_commands.py
function function_based (line 5) | def function_based(arg1: int, arg2: str) -> str:
FILE: tests/test_agent.py
function agent (line 10) | def agent():
function test_agent_initialization (line 35) | def test_agent_initialization(agent):
FILE: tests/test_agent_manager.py
function agent_manager (line 8) | def agent_manager():
function task (line 16) | def task():
function prompt (line 21) | def prompt():
function model (line 26) | def model():
function mock_create_chat_completion (line 31) | def mock_create_chat_completion(mocker):
function test_create_agent (line 40) | def test_create_agent(agent_manager, task, prompt, model):
function test_message_agent (line 47) | def test_message_agent(agent_manager, task, prompt, model):
function test_list_agents (line 54) | def test_list_agents(agent_manager, task, prompt, model):
function test_delete_agent (line 61) | def test_delete_agent(agent_manager, task, prompt, model):
FILE: tests/test_ai_config.py
function test_goals_are_always_lists_of_strings (line 9) | def test_goals_are_always_lists_of_strings(tmp_path):
FILE: tests/test_api_manager.py
function reset_api_manager (line 11) | def reset_api_manager():
function mock_costs (line 17) | def mock_costs():
class TestApiManager (line 29) | class TestApiManager:
method test_create_chat_completion_debug_mode (line 31) | def test_create_chat_completion_debug_mode(caplog):
method test_create_chat_completion_empty_messages (line 51) | def test_create_chat_completion_empty_messages():
method test_create_chat_completion_valid_inputs (line 69) | def test_create_chat_completion_valid_inputs():
method test_getter_methods (line 89) | def test_getter_methods(self):
method test_set_total_budget (line 99) | def test_set_total_budget():
method test_update_cost (line 107) | def test_update_cost():
FILE: tests/test_commands.py
class TestCommand (line 11) | class TestCommand:
method example_command_method (line 15) | def example_command_method(arg1: int, arg2: str) -> str:
method test_command_creation (line 20) | def test_command_creation(self):
method test_command_call (line 33) | def test_command_call(self):
method test_command_call_with_invalid_arguments (line 44) | def test_command_call_with_invalid_arguments(self):
method test_command_default_signature (line 54) | def test_command_default_signature(self):
method test_command_custom_signature (line 63) | def test_command_custom_signature(self):
class TestCommandRegistry (line 75) | class TestCommandRegistry:
method example_command_method (line 77) | def example_command_method(arg1: int, arg2: str) -> str:
method test_register_command (line 80) | def test_register_command(self):
method test_unregister_command (line 94) | def test_unregister_command(self):
method test_get_command (line 108) | def test_get_command(self):
method test_get_nonexistent_command (line 122) | def test_get_nonexistent_command(self):
method test_call_command (line 129) | def test_call_command(self):
method test_call_nonexistent_command (line 143) | def test_call_nonexistent_command(self):
method test_get_command_prompt (line 150) | def test_get_command_prompt(self):
method test_import_mock_commands_module (line 164) | def test_import_mock_commands_module(self):
method test_import_temp_command_file_module (line 178) | def test_import_temp_command_file_module(self, tmp_path):
FILE: tests/test_config.py
function test_initial_values (line 11) | def test_initial_values(config):
function test_set_continuous_mode (line 24) | def test_set_continuous_mode(config):
function test_set_speak_mode (line 38) | def test_set_speak_mode(config):
function test_set_fast_llm_model (line 52) | def test_set_fast_llm_model(config):
function test_set_smart_llm_model (line 66) | def test_set_smart_llm_model(config):
function test_set_fast_token_limit (line 80) | def test_set_fast_token_limit(config):
function test_set_smart_token_limit (line 94) | def test_set_smart_token_limit(config):
function test_set_debug_mode (line 108) | def test_set_debug_mode(config):
FILE: tests/test_image_gen.py
function image_size (line 13) | def image_size(request):
function test_dalle (line 22) | def test_dalle(config, workspace, image_size):
function test_huggingface (line 40) | def test_huggingface(config, workspace, image_size, image_model):
function test_sd_webui (line 52) | def test_sd_webui(config, workspace, image_size):
function test_sd_webui_negative_prompt (line 63) | def test_sd_webui_negative_prompt(config, workspace, image_size):
function lst (line 84) | def lst(txt):
function generate_and_validate (line 89) | def generate_and_validate(
FILE: tests/test_logs.py
function test_remove_color_codes (line 27) | def test_remove_color_codes(raw_text, clean_text):
FILE: tests/test_prompt_generator.py
class TestPromptGenerator (line 6) | class TestPromptGenerator(TestCase):
method setUpClass (line 13) | def setUpClass(cls):
method test_add_constraint (line 20) | def test_add_constraint(self):
method test_add_command (line 29) | def test_add_command(self):
method test_add_resource (line 45) | def test_add_resource(self):
method test_add_performance_evaluation (line 53) | def test_add_performance_evaluation(self):
method test_generate_prompt_string (line 62) | def test_generate_prompt_string(self):
FILE: tests/test_token_counter.py
function test_count_message_tokens (line 6) | def test_count_message_tokens():
function test_count_message_tokens_with_name (line 14) | def test_count_message_tokens_with_name():
function test_count_message_tokens_empty_input (line 22) | def test_count_message_tokens_empty_input():
function test_count_message_tokens_invalid_model (line 27) | def test_count_message_tokens_invalid_model():
function test_count_message_tokens_gpt_4 (line 37) | def test_count_message_tokens_gpt_4():
function test_count_string_tokens (line 45) | def test_count_string_tokens():
function test_count_string_tokens_empty_input (line 52) | def test_count_string_tokens_empty_input():
function test_count_message_tokens_invalid_model (line 58) | def test_count_message_tokens_invalid_model():
function test_count_string_tokens_gpt_4 (line 68) | def test_count_string_tokens_gpt_4():
FILE: tests/test_utils.py
function test_validate_yaml_file_valid (line 20) | def test_validate_yaml_file_valid():
function test_validate_yaml_file_not_found (line 30) | def test_validate_yaml_file_not_found():
function test_validate_yaml_file_invalid (line 37) | def test_validate_yaml_file_invalid():
function test_readable_file_size (line 50) | def test_readable_file_size():
function test_get_bulletin_from_web_success (line 58) | def test_get_bulletin_from_web_success(mock_get):
function test_get_bulletin_from_web_failure (line 72) | def test_get_bulletin_from_web_failure(mock_get):
function test_get_bulletin_from_web_exception (line 80) | def test_get_bulletin_from_web_exception(mock_get):
function test_get_latest_bulletin_no_file (line 87) | def test_get_latest_bulletin_no_file():
function test_get_latest_bulletin_with_file (line 95) | def test_get_latest_bulletin_with_file():
function test_get_latest_bulletin_with_new_bulletin (line 108) | def test_get_latest_bulletin_with_new_bulletin():
function test_get_latest_bulletin_new_bulletin_same_as_old_bulletin (line 122) | def test_get_latest_bulletin_new_bulletin_same_as_old_bulletin():
function test_get_current_git_branch (line 136) | def test_get_current_git_branch():
function test_get_current_git_branch_success (line 144) | def test_get_current_git_branch_success(mock_repo):
function test_get_current_git_branch_failure (line 152) | def test_get_current_git_branch_failure(mock_repo):
FILE: tests/test_workspace.py
function workspace_root (line 56) | def workspace_root(tmp_path):
function accessible_path (line 61) | def accessible_path(request):
function inaccessible_path (line 66) | def inaccessible_path(request):
function test_sanitize_path_accessible (line 70) | def test_sanitize_path_accessible(accessible_path, workspace_root):
function test_sanitize_path_inaccessible (line 80) | def test_sanitize_path_inaccessible(inaccessible_path, workspace_root):
function test_get_path_accessible (line 89) | def test_get_path_accessible(accessible_path, workspace_root):
function test_get_path_inaccessible (line 96) | def test_get_path_inaccessible(inaccessible_path, workspace_root):
FILE: tests/unit/_test_json_parser.py
class TestParseJson (line 6) | class TestParseJson(unittest.TestCase):
method test_valid_json (line 7) | def test_valid_json(self):
method test_invalid_json_minor (line 13) | def test_invalid_json_minor(self):
method test_invalid_json_major_with_gpt (line 21) | def test_invalid_json_major_with_gpt(self):
method test_invalid_json_major_without_gpt (line 29) | def test_invalid_json_major_without_gpt(self):
method test_invalid_json_leading_sentence_with_gpt (line 36) | def test_invalid_json_leading_sentence_with_gpt(self):
method test_invalid_json_leading_sentence_with_gpt (line 75) | def test_invalid_json_leading_sentence_with_gpt(self):
FILE: tests/unit/models/test_base_open_api_plugin.py
class DummyPlugin (line 12) | class DummyPlugin(BaseOpenAIPlugin):
function dummy_plugin (line 19) | def dummy_plugin():
function test_dummy_plugin_inheritance (line 33) | def test_dummy_plugin_inheritance(dummy_plugin):
function test_dummy_plugin_name (line 38) | def test_dummy_plugin_name(dummy_plugin):
function test_dummy_plugin_version (line 43) | def test_dummy_plugin_version(dummy_plugin):
function test_dummy_plugin_description (line 48) | def test_dummy_plugin_description(dummy_plugin):
function test_dummy_plugin_default_methods (line 53) | def test_dummy_plugin_default_methods(dummy_plugin):
FILE: tests/unit/test_browse_scrape_links.py
class TestScrapeLinks (line 40) | class TestScrapeLinks:
method test_valid_url_with_hyperlinks (line 46) | def test_valid_url_with_hyperlinks(self):
method test_valid_url (line 53) | def test_valid_url(self, mocker):
method test_invalid_url (line 69) | def test_invalid_url(self, mocker):
method test_no_hyperlinks (line 82) | def test_no_hyperlinks(self, mocker):
method test_scrape_links_with_few_hyperlinks (line 96) | def test_scrape_links_with_few_hyperlinks(self, mocker):
FILE: tests/unit/test_browse_scrape_text.py
class TestScrapeText (line 44) | class TestScrapeText:
method test_scrape_text_with_valid_url (line 45) | def test_scrape_text_with_valid_url(self, mocker):
method test_invalid_url (line 62) | def test_invalid_url(self):
method test_unreachable_url (line 67) | def test_unreachable_url(self, mocker):
method test_no_text (line 80) | def test_no_text(self, mocker):
method test_http_error (line 92) | def test_http_error(self, mocker):
method test_scrape_text_with_html_tags (line 103) | def test_scrape_text_with_html_tags(self, mocker):
FILE: tests/unit/test_chat.py
function test_happy_path_role_content (line 8) | def test_happy_path_role_content():
function test_empty_role_content (line 14) | def test_empty_role_content():
function test_generate_context_empty_inputs (line 20) | def test_generate_context_empty_inputs(mocker):
function test_generate_context_valid_inputs (line 49) | def test_generate_context_valid_inputs():
FILE: tests/unit/test_commands.py
function test_make_agent (line 13) | def test_make_agent() -> None:
FILE: tests/unit/test_file_operations.py
function file_content (line 21) | def file_content():
function test_file_path (line 26) | def test_file_path(config, workspace: Workspace):
function test_file (line 31) | def test_file(test_file_path: Path):
function test_file_with_content_path (line 39) | def test_file_with_content_path(test_file: TextIOWrapper, file_content):
function test_directory (line 49) | def test_directory(config, workspace: Workspace):
function test_nested_file (line 54) | def test_nested_file(config, workspace: Workspace):
function test_file_operations_log (line 58) | def test_file_operations_log(test_file: TextIOWrapper):
function test_file_operations_state (line 80) | def test_file_operations_state(test_file: TextIOWrapper):
function test_is_duplicate_operation (line 101) | def test_is_duplicate_operation(config, mocker: MockerFixture):
function test_log_operation (line 133) | def test_log_operation(config: Config):
function test_text_checksum (line 140) | def test_text_checksum(file_content: str):
function test_log_operation_with_checksum (line 147) | def test_log_operation_with_checksum(config: Config):
function test_split_file (line 155) | def test_split_file():
function test_read_file (line 162) | def test_read_file(test_file_with_content_path: Path, file_content):
function test_write_to_file (line 167) | def test_write_to_file(test_file_path: Path):
function test_write_file_logs_checksum (line 175) | def test_write_file_logs_checksum(config: Config, test_file_path: Path):
function test_write_file_fails_if_content_exists (line 184) | def test_write_file_fails_if_content_exists(test_file_path: Path):
function test_write_file_succeeds_if_content_different (line 195) | def test_write_file_succeeds_if_content_different(test_file_with_content...
function test_append_to_file (line 201) | def test_append_to_file(test_nested_file: Path):
function test_append_to_file_uses_checksum_from_appended_file (line 213) | def test_append_to_file_uses_checksum_from_appended_file(
function test_delete_file (line 233) | def test_delete_file(test_file_with_content_path: Path):
function test_delete_missing_file (line 239) | def test_delete_missing_file(config):
function test_list_files (line 251) | def test_list_files(workspace: Workspace, test_directory: Path):
function test_download_file (line 286) | def test_download_file(config, workspace: Workspace):
FILE: tests/unit/test_get_self_feedback.py
function test_get_self_feedback (line 6) | def test_get_self_feedback(mocker):
FILE: tests/unit/test_json_parser.py
class TestParseJson (line 7) | class TestParseJson(TestCase):
method test_valid_json (line 8) | def test_valid_json(self):
method test_invalid_json_minor (line 14) | def test_invalid_json_minor(self):
method test_invalid_json_major_with_gpt (line 20) | def test_invalid_json_major_with_gpt(self):
method test_invalid_json_major_without_gpt (line 26) | def test_invalid_json_major_without_gpt(self):
method test_invalid_json_leading_sentence_with_gpt (line 33) | def test_invalid_json_leading_sentence_with_gpt(self):
FILE: tests/unit/test_json_utils_llm.py
class TestFixJsonUsingMultipleTechniques (line 36) | class TestFixJsonUsingMultipleTechniques:
method test_fix_and_parse_json_happy_path (line 38) | def test_fix_and_parse_json_happy_path(self):
method test_fix_and_parse_json_whitespace (line 46) | def test_fix_and_parse_json_whitespace(self, mocker):
method test_fix_and_parse_json_array (line 57) | def test_fix_and_parse_json_array(self):
method test_fix_and_parse_json_can_not (line 65) | def test_fix_and_parse_json_can_not(self, mocker):
method test_fix_and_parse_json_empty_string (line 81) | def test_fix_and_parse_json_empty_string(self, mocker):
method test_fix_and_parse_json_escape_characters (line 95) | def test_fix_and_parse_json_escape_characters(self):
method test_fix_and_parse_json_nested_objects (line 106) | def test_fix_and_parse_json_nested_objects(self):
FILE: tests/unit/test_llm_utils.py
function error (line 8) | def error(request):
function error_factory (line 15) | def error_factory(error_instance, error_count, retry_count, warn_user=Tr...
function test_retry_open_api_no_error (line 32) | def test_retry_open_api_no_error(capsys):
function test_retry_open_api_passing (line 50) | def test_retry_open_api_passing(capsys, error, error_count, retry_count,...
function test_retry_open_api_rate_limit_no_warn (line 75) | def test_retry_open_api_rate_limit_no_warn(capsys):
function test_retry_openapi_other_api_error (line 91) | def test_retry_openapi_other_api_error(capsys):
function test_chunked_tokens (line 106) | def test_chunked_tokens():
FILE: tests/unit/test_plugins.py
function test_inspect_zip_for_modules (line 16) | def test_inspect_zip_for_modules():
function mock_config_denylist_allowlist_check (line 22) | def mock_config_denylist_allowlist_check():
function test_denylist_allowlist_check_denylist (line 34) | def test_denylist_allowlist_check_denylist(
function test_denylist_allowlist_check_allowlist (line 44) | def test_denylist_allowlist_check_allowlist(
function test_denylist_allowlist_check_user_input_yes (line 52) | def test_denylist_allowlist_check_user_input_yes(
function test_denylist_allowlist_check_user_input_no (line 62) | def test_denylist_allowlist_check_user_input_no(
function test_denylist_allowlist_check_user_input_invalid (line 72) | def test_denylist_allowlist_check_user_input_invalid(
function config_with_plugins (line 83) | def config_with_plugins():
function mock_config_openai_plugin (line 93) | def mock_config_openai_plugin():
function test_scan_plugins_openai (line 107) | def test_scan_plugins_openai(mock_config_openai_plugin):
function mock_config_generic_plugin (line 114) | def mock_config_generic_plugin():
function test_scan_plugins_generic (line 127) | def test_scan_plugins_generic(mock_config_generic_plugin):
FILE: tests/unit/test_spinner.py
function test_spinner_initializes_with_default_values (line 31) | def test_spinner_initializes_with_default_values():
function test_spinner_initializes_with_custom_values (line 38) | def test_spinner_initializes_with_custom_values():
function test_spinner_stops_spinning (line 46) | def test_spinner_stops_spinning():
function test_spinner_updates_message_and_still_spins (line 55) | def test_spinner_updates_message_and_still_spins():
function test_spinner_can_be_used_as_context_manager (line 66) | def test_spinner_can_be_used_as_context_manager():
FILE: tests/unit/test_url_validation.py
function dummy_method (line 8) | def dummy_method(url):
function test_url_validation_succeeds (line 21) | def test_url_validation_succeeds(url):
function test_url_validation_fails_bad_protocol (line 33) | def test_url_validation_fails_bad_protocol(url):
function test_url_validation_fails_bad_protocol (line 42) | def test_url_validation_fails_bad_protocol(url):
function test_url_validation_fails_local_path (line 57) | def test_url_validation_fails_local_path(url):
FILE: tests/unit/test_web_selenium.py
function test_browse_website (line 4) | def test_browse_website():
FILE: tests/utils.py
function dummy_openai_api_key (line 11) | def dummy_openai_api_key():
function requires_api_key (line 23) | def requires_api_key(env_var):
function skip_in_ci (line 38) | def skip_in_ci(test_function):
function get_workspace_file_path (line 45) | def get_workspace_file_path(workspace, file_name):
FILE: tests/vcr/openai_filter.py
function replace_timestamp_in_request (line 5) | def replace_timestamp_in_request(request):
function before_record_response (line 29) | def before_record_response(response):
function before_record_request (line 35) | def before_record_request(request):
function filter_hostnames (line 43) | def filter_hostnames(request):
FILE: tests/vcr/vcr_filter.py
function replace_message_content (line 17) | def replace_message_content(content: str, replacements: List[Dict[str, s...
function replace_timestamp_in_request (line 25) | def replace_timestamp_in_request(request: Any) -> Any:
function before_record_response (line 49) | def before_record_response(response: Dict[str, Any]) -> Dict[str, Any]:
function before_record_request (line 55) | def before_record_request(request: Any) -> Any:
function filter_hostnames (line 63) | def filter_hostnames(request: Any) -> Any:
Condensed preview — 222 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,010K chars).
[
{
"path": ".coveragerc",
"chars": 28,
"preview": "[run]\r\nrelative_files = true"
},
{
"path": ".devcontainer/Dockerfile",
"chars": 308,
"preview": "# Use an official Python base image from the Docker Hub\nFROM python:3.10\n\n# Install browsers\nRUN apt-get update && apt-g"
},
{
"path": ".devcontainer/devcontainer.json",
"chars": 1364,
"preview": "{\n \"dockerComposeFile\": \"./docker-compose.yml\",\n \"service\": \"auto-gpt\",\n \"workspaceFolder\": \"/workspace/Auto-GPT\",\n "
},
{
"path": ".devcontainer/docker-compose.yml",
"chars": 424,
"preview": "# To boot the app run the following:\n# docker-compose run auto-gpt\nversion: '3.9'\n\nservices:\n auto-gpt:\n depends_on:"
},
{
"path": ".dockerignore",
"chars": 52,
"preview": ".*\n*.template\n*.yaml\n*.yml\n\n*.md\n*.png\n!BULLETIN.md\n"
},
{
"path": ".envrc",
"chars": 308,
"preview": "# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.\n# El"
},
{
"path": ".flake8",
"chars": 192,
"preview": "[flake8]\nmax-line-length = 88\nselect = \"E303, W293, W291, W292, E305, E231, E302\"\nexclude =\n .tox,\n __pycache__,\n "
},
{
"path": ".gitattributes",
"chars": 145,
"preview": "# Exclude VCR cassettes from stats\ntests/**/cassettes/**.y*ml linguist-generated\n\n# Mark documentation as such\ndocs/**.m"
},
{
"path": ".github/FUNDING.yml",
"chars": 67,
"preview": "# These are supported funding model platforms\n\ngithub: Torantulino\n"
},
{
"path": ".github/ISSUE_TEMPLATE/1.bug.yml",
"chars": 5859,
"preview": "name: Bug report 🐛\ndescription: Create a bug report for Auto-GPT.\nlabels: ['status: needs triage']\nbody:\n - type: markd"
},
{
"path": ".github/ISSUE_TEMPLATE/2.feature.yml",
"chars": 1187,
"preview": "name: Feature request 🚀\ndescription: Suggest a new idea for Auto-GPT!\nlabels: ['status: needs triage']\nbody:\n - type: m"
},
{
"path": ".github/PULL_REQUEST_TEMPLATE.md",
"chars": 2848,
"preview": "<!-- ⚠️ At the moment any non-essential commands are not being merged.\nIf you want to add non-essential commands to Auto"
},
{
"path": ".github/workflows/benchmarks.yml",
"chars": 665,
"preview": "name: Run Benchmarks\n\non:\n workflow_dispatch:\n\njobs:\n build:\n runs-on: ubuntu-latest\n\n env:\n python-version"
},
{
"path": ".github/workflows/ci.yml",
"chars": 2196,
"preview": "name: Python CI\n\non:\n push:\n branches: [ master ]\n pull_request:\n branches: [ master, stable ]\n\nconcurrency:\n g"
},
{
"path": ".github/workflows/docker-cache-clean.yml",
"chars": 1666,
"preview": "name: Purge Docker CI cache\n\non:\n schedule:\n - cron: 20 4 * * 1,4\n\nenv:\n BASE_BRANCH: master\n IMAGE_NAME: auto-gpt"
},
{
"path": ".github/workflows/docker-ci.yml",
"chars": 3554,
"preview": "name: Docker CI\n\non:\n push:\n branches: [ master ]\n pull_request:\n branches: [ master, stable ]\n\nconcurrency:\n g"
},
{
"path": ".github/workflows/docker-release.yml",
"chars": 2654,
"preview": "name: Docker Release\n\non:\n release:\n types: [ published, edited ]\n\n workflow_dispatch:\n inputs:\n no_cache:\n"
},
{
"path": ".github/workflows/documentation-release.yml",
"chars": 720,
"preview": "name: Docs\n\non:\n push:\n branches: [ stable ]\n paths:\n - 'docs/**'\n - 'mkdocs.yml'\n - '.github/work"
},
{
"path": ".github/workflows/pr-label.yml",
"chars": 1921,
"preview": "name: \"Pull Request auto-label\"\n\non:\n # So that PRs touching the same files as the push are updated\n push:\n branche"
},
{
"path": ".github/workflows/scripts/docker-ci-summary.sh",
"chars": 2731,
"preview": "#!/bin/bash\nmeta=$(docker image inspect \"$IMAGE_NAME\" | jq '.[0]')\nhead_compare_url=$(sed \"s/{base}/$base_branch/; s/{he"
},
{
"path": ".github/workflows/scripts/docker-release-summary.sh",
"chars": 2032,
"preview": "#!/bin/bash\nmeta=$(docker image inspect \"$IMAGE_NAME\" | jq '.[0]')\n\nEOF=$(dd if=/dev/urandom bs=15 count=1 status=none |"
},
{
"path": ".github/workflows/sponsors_readme.yml",
"chars": 651,
"preview": "name: Generate Sponsors README\n\non:\n workflow_dispatch:\n schedule:\n - cron: '0 */12 * * *'\n\njobs:\n deploy:\n run"
},
{
"path": ".gitignore",
"chars": 2221,
"preview": "## Original ignores\nautogpt/keys.py\nautogpt/*json\nautogpt/node_modules/\nautogpt/__pycache__/keys.cpython-310.pyc\nautogpt"
},
{
"path": ".isort.cfg",
"chars": 320,
"preview": "[settings]\nprofile = black\nmulti_line_output = 3\ninclude_trailing_comma = true\nforce_grid_wrap = 0\nuse_parentheses = tru"
},
{
"path": ".pre-commit-config.yaml",
"chars": 805,
"preview": "repos:\n - repo: https://github.com/pre-commit/pre-commit-hooks\n rev: v4.4.0\n hooks:\n - id: check-added-large"
},
{
"path": ".sourcery.yaml",
"chars": 1679,
"preview": "# 🪄 This is your project's Sourcery configuration file.\n\n# You can use it to get Sourcery working in the way you want, s"
},
{
"path": "BULLETIN.md",
"chars": 413,
"preview": "# 持续更新中\nAuto-GPT 0.3.0最大更新为自动定义AI名称,角色,任务等,也可以进入手动模式\n对插件(Plugin)的支持也在逐渐强大,可玩性更强\n我正在逐步修改汉化0.3.0稳定版本,如果你喜欢我的工作,请给我一个star,谢"
},
{
"path": "CODE_OF_CONDUCT.md",
"chars": 1805,
"preview": "# Code of Conduct for Auto-GPT\n\n## 1. Purpose\n\nThe purpose of this Code of Conduct is to provide guidelines for contribu"
},
{
"path": "CONTRIBUTING.md",
"chars": 94,
"preview": "This document now lives at https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing\n"
},
{
"path": "Dockerfile",
"chars": 1106,
"preview": "# 'dev' or 'release' container build\nARG BUILD_TYPE=dev\n\n# Use an official Python base image from the Docker Hub\nFROM py"
},
{
"path": "LICENSE",
"chars": 1077,
"preview": "MIT License\n\nCopyright (c) 2023 Toran Bruce Richards\n\nPermission is hereby granted, free of charge, to any person obtain"
},
{
"path": "README.md",
"chars": 2036,
"preview": "# Auto-GPT中文版\n### Youtube频道:https://www.youtube.com/@Hossie\n##\n<hr/>\n\n\n<h2 align=\"center\"> 💖 大家好 💖</h2>\n\n<p align=\"cente"
},
{
"path": "autogpt/__init__.py",
"chars": 311,
"preview": "import os\nimport random\nimport sys\n\nfrom dotenv import load_dotenv\n\nif \"pytest\" in sys.argv or \"pytest\" in sys.modules o"
},
{
"path": "autogpt/__main__.py",
"chars": 113,
"preview": "\"\"\"Auto-GPT: A GPT powered AI Assistant\"\"\"\nimport autogpt.cli\n\nif __name__ == \"__main__\":\n autogpt.cli.main()\n"
},
{
"path": "autogpt/agent/__init__.py",
"chars": 128,
"preview": "from autogpt.agent.agent import Agent\nfrom autogpt.agent.agent_manager import AgentManager\n\n__all__ = [\"Agent\", \"AgentMa"
},
{
"path": "autogpt/agent/agent.py",
"chars": 13026,
"preview": "from colorama import Fore, Style\n\nfrom autogpt.app import execute_command, get_command\nfrom autogpt.config import Config"
},
{
"path": "autogpt/agent/agent_manager.py",
"chars": 4784,
"preview": "\"\"\"Agent manager for managing GPT agents\"\"\"\nfrom __future__ import annotations\n\nfrom typing import List\n\nfrom autogpt.co"
},
{
"path": "autogpt/app.py",
"chars": 7395,
"preview": "\"\"\" Command and Control \"\"\"\nimport json\nfrom typing import Dict, List, NoReturn, Union\n\nfrom autogpt.agent.agent_manager"
},
{
"path": "autogpt/cli.py",
"chars": 2583,
"preview": "\"\"\"Main script for the autogpt package.\"\"\"\nimport click\n\n\n@click.group(invoke_without_command=True)\n@click.option(\"-c\", "
},
{
"path": "autogpt/commands/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "autogpt/commands/analyze_code.py",
"chars": 796,
"preview": "\"\"\"Code evaluation module.\"\"\"\nfrom __future__ import annotations\n\nfrom autogpt.commands.command import command\nfrom auto"
},
{
"path": "autogpt/commands/audio_text.py",
"chars": 1365,
"preview": "\"\"\"Commands for converting audio to text.\"\"\"\nimport json\n\nimport requests\n\nfrom autogpt.commands.command import command\n"
},
{
"path": "autogpt/commands/command.py",
"chars": 5119,
"preview": "import functools\nimport importlib\nimport inspect\nfrom typing import Any, Callable, Optional\n\n# Unique identifier for aut"
},
{
"path": "autogpt/commands/execute_code.py",
"chars": 5672,
"preview": "\"\"\"Execute code in a Docker container\"\"\"\nimport os\nimport subprocess\nfrom pathlib import Path\n\nimport docker\nfrom docker"
},
{
"path": "autogpt/commands/file_operations.py",
"chars": 11183,
"preview": "\"\"\"File operations for AutoGPT\"\"\"\nfrom __future__ import annotations\n\nimport hashlib\nimport os\nimport os.path\nfrom typin"
},
{
"path": "autogpt/commands/git_operations.py",
"chars": 1035,
"preview": "\"\"\"Git operations for autogpt\"\"\"\nfrom git.repo import Repo\n\nfrom autogpt.commands.command import command\nfrom autogpt.co"
},
{
"path": "autogpt/commands/google_search.py",
"chars": 3427,
"preview": "\"\"\"Google search command for Autogpt.\"\"\"\nfrom __future__ import annotations\n\nimport json\n\nfrom duckduckgo_search import "
},
{
"path": "autogpt/commands/image_gen.py",
"chars": 4614,
"preview": "\"\"\" Image Generation Module for AutoGPT.\"\"\"\nimport io\nimport uuid\nfrom base64 import b64decode\n\nimport openai\nimport req"
},
{
"path": "autogpt/commands/improve_code.py",
"chars": 980,
"preview": "from __future__ import annotations\n\nimport json\n\nfrom autogpt.commands.command import command\nfrom autogpt.llm import ca"
},
{
"path": "autogpt/commands/task_statuses.py",
"chars": 636,
"preview": "\"\"\"Task Statuses module.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import NoReturn\n\nfrom autogpt.commands.comma"
},
{
"path": "autogpt/commands/times.py",
"chars": 230,
"preview": "from datetime import datetime\n\n\ndef get_datetime() -> str:\n \"\"\"Return the current date and time\n\n Returns:\n "
},
{
"path": "autogpt/commands/twitter.py",
"chars": 1098,
"preview": "\"\"\"A module that contains a command to send a tweet.\"\"\"\nimport os\n\nimport tweepy\n\nfrom autogpt.commands.command import c"
},
{
"path": "autogpt/commands/web_playwright.py",
"chars": 2136,
"preview": "\"\"\"Web scraping commands using Playwright\"\"\"\nfrom __future__ import annotations\n\nfrom autogpt.logs import logger\n\ntry:\n "
},
{
"path": "autogpt/commands/web_requests.py",
"chars": 3161,
"preview": "\"\"\"Browse a webpage and summarize it using the LLM model\"\"\"\nfrom __future__ import annotations\n\nimport requests\nfrom bs4"
},
{
"path": "autogpt/commands/web_selenium.py",
"chars": 5896,
"preview": "\"\"\"Selenium web scraping module.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom pathlib import Path\nfrom sys"
},
{
"path": "autogpt/commands/write_tests.py",
"chars": 1097,
"preview": "\"\"\"A module that contains a function to generate test cases for the submitted code.\"\"\"\nfrom __future__ import annotation"
},
{
"path": "autogpt/config/__init__.py",
"chars": 250,
"preview": "\"\"\"\nThis module contains the configuration classes for AutoGPT.\n\"\"\"\nfrom autogpt.config.ai_config import AIConfig\nfrom a"
},
{
"path": "autogpt/config/ai_config.py",
"chars": 5542,
"preview": "# sourcery skip: do-not-use-staticmethod\n\"\"\"\nA module that contains the AIConfig class object that contains the configur"
},
{
"path": "autogpt/config/config.py",
"chars": 12281,
"preview": "\"\"\"Configuration class to store the state of bools for different scripts access.\"\"\"\nimport os\nfrom typing import List\n\ni"
},
{
"path": "autogpt/configurator.py",
"chars": 4307,
"preview": "\"\"\"Configurator module.\"\"\"\nimport click\nfrom colorama import Back, Fore, Style\n\nfrom autogpt import utils\nfrom autogpt.c"
},
{
"path": "autogpt/js/overlay.js",
"chars": 804,
"preview": "const overlay = document.createElement('div');\nObject.assign(overlay.style, {\n position: 'fixed',\n zIndex: 999999,"
},
{
"path": "autogpt/json_utils/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "autogpt/json_utils/json_fix_general.py",
"chars": 3607,
"preview": "\"\"\"This module contains functions to fix JSON strings using general programmatic approaches, suitable for addressing\ncom"
},
{
"path": "autogpt/json_utils/json_fix_llm.py",
"chars": 8381,
"preview": "\"\"\"This module contains functions to fix JSON strings generated by LLM models, such as ChatGPT, using the assistance\nof "
},
{
"path": "autogpt/json_utils/llm_response_format_1.json",
"chars": 954,
"preview": "{\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"thoughts\""
},
{
"path": "autogpt/json_utils/utilities.py",
"chars": 2266,
"preview": "\"\"\"Utilities for the json_fixes package.\"\"\"\nimport json\nimport os.path\nimport re\n\nfrom jsonschema import Draft7Validator"
},
{
"path": "autogpt/llm/__init__.py",
"chars": 978,
"preview": "from autogpt.llm.api_manager import ApiManager\nfrom autogpt.llm.base import (\n ChatModelInfo,\n ChatModelResponse,\n"
},
{
"path": "autogpt/llm/api_manager.py",
"chars": 3967,
"preview": "from __future__ import annotations\n\nimport openai\n\nfrom autogpt.config import Config\nfrom autogpt.llm.modelsinfo import "
},
{
"path": "autogpt/llm/base.py",
"chars": 1412,
"preview": "from dataclasses import dataclass, field\nfrom typing import List, TypedDict\n\n\nclass Message(TypedDict):\n \"\"\"OpenAI Me"
},
{
"path": "autogpt/llm/chat.py",
"chars": 10490,
"preview": "import time\nfrom random import shuffle\n\nfrom openai.error import RateLimitError\n\nfrom autogpt.config import Config\nfrom "
},
{
"path": "autogpt/llm/llm_utils.py",
"chars": 9528,
"preview": "from __future__ import annotations\n\nimport functools\nimport time\nfrom itertools import islice\nfrom typing import List, O"
},
{
"path": "autogpt/llm/modelsinfo.py",
"chars": 486,
"preview": "COSTS = {\n \"gpt-3.5-turbo\": {\"prompt\": 0.002, \"completion\": 0.002},\n \"gpt-3.5-turbo-0301\": {\"prompt\": 0.002, \"comp"
},
{
"path": "autogpt/llm/providers/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "autogpt/llm/providers/openai.py",
"chars": 905,
"preview": "from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo\n\nOPEN_AI_CHAT_MODELS = {\n \"gpt-3.5-turbo\": ChatModelIn"
},
{
"path": "autogpt/llm/token_counter.py",
"chars": 2738,
"preview": "\"\"\"Functions for counting the number of tokens in a message or string.\"\"\"\nfrom __future__ import annotations\n\nfrom typin"
},
{
"path": "autogpt/log_cycle/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "autogpt/log_cycle/json_handler.py",
"chars": 531,
"preview": "import json\nimport logging\n\n\nclass JsonFileHandler(logging.FileHandler):\n def __init__(self, filename, mode=\"a\", enco"
},
{
"path": "autogpt/log_cycle/log_cycle.py",
"chars": 2705,
"preview": "import json\nimport os\nfrom typing import Any, Dict, Union\n\nfrom autogpt.logs import logger\n\nDEFAULT_PREFIX = \"agent\"\nFUL"
},
{
"path": "autogpt/logs.py",
"chars": 9760,
"preview": "\"\"\"Logging module for Auto-GPT.\"\"\"\nimport logging\nimport os\nimport random\nimport re\nimport time\nfrom logging import LogR"
},
{
"path": "autogpt/main.py",
"chars": 6171,
"preview": "\"\"\"The application entry point. Can be invoked by a CLI or any other front end application.\"\"\"\nimport logging\nimport sy"
},
{
"path": "autogpt/memory/__init__.py",
"chars": 2557,
"preview": "from autogpt.logs import logger\nfrom autogpt.memory.local import LocalCache\nfrom autogpt.memory.no_memory import NoMemor"
},
{
"path": "autogpt/memory/base.py",
"chars": 649,
"preview": "\"\"\"Base class for memory providers.\"\"\"\nimport abc\n\nfrom autogpt.singleton import AbstractSingleton\n\n\nclass MemoryProvide"
},
{
"path": "autogpt/memory/local.py",
"chars": 3235,
"preview": "from __future__ import annotations\n\nimport dataclasses\nfrom pathlib import Path\nfrom typing import Any, List\n\nimport num"
},
{
"path": "autogpt/memory/milvus.py",
"chars": 5228,
"preview": "\"\"\" Milvus memory storage provider.\"\"\"\nimport re\n\nfrom pymilvus import Collection, CollectionSchema, DataType, FieldSche"
},
{
"path": "autogpt/memory/no_memory.py",
"chars": 1743,
"preview": "\"\"\"A class that does not store any data. This is the default memory provider.\"\"\"\nfrom __future__ import annotations\n\nfro"
},
{
"path": "autogpt/memory/pinecone.py",
"chars": 2931,
"preview": "import pinecone\nfrom colorama import Fore, Style\n\nfrom autogpt.llm import get_ada_embedding\nfrom autogpt.logs import log"
},
{
"path": "autogpt/memory/redismem.py",
"chars": 5023,
"preview": "\"\"\"Redis memory provider.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Any\n\nimport numpy as np\nimport redis"
},
{
"path": "autogpt/memory/weaviate.py",
"chars": 4182,
"preview": "import weaviate\nfrom weaviate import Client\nfrom weaviate.embedded import EmbeddedOptions\nfrom weaviate.util import gene"
},
{
"path": "autogpt/memory_management/store_memory.py",
"chars": 1310,
"preview": "from autogpt.json_utils.utilities import (\n LLM_DEFAULT_RESPONSE_FORMAT,\n is_string_valid_json,\n)\nfrom autogpt.log"
},
{
"path": "autogpt/memory_management/summary_memory.py",
"chars": 4659,
"preview": "import copy\nimport json\nfrom typing import Dict, List, Tuple\nfrom autogpt.config import Config\nfrom autogpt.llm.llm_util"
},
{
"path": "autogpt/models/base_open_ai_plugin.py",
"chars": 7251,
"preview": "\"\"\"Handles loading of plugins.\"\"\"\nfrom typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar\n\nfrom auto_gpt"
},
{
"path": "autogpt/plugins.py",
"chars": 10346,
"preview": "\"\"\"Handles loading of plugins.\"\"\"\n\nimport importlib\nimport json\nimport os\nimport zipfile\nfrom pathlib import Path\nfrom t"
},
{
"path": "autogpt/processing/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "autogpt/processing/html.py",
"chars": 929,
"preview": "\"\"\"HTML processing functions\"\"\"\nfrom __future__ import annotations\n\nfrom bs4 import BeautifulSoup\nfrom requests.compat i"
},
{
"path": "autogpt/processing/text.py",
"chars": 5214,
"preview": "\"\"\"Text processing functions\"\"\"\nfrom typing import Dict, Generator, Optional\n\nimport spacy\nfrom selenium.webdriver.remot"
},
{
"path": "autogpt/prompts/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "autogpt/prompts/generator.py",
"chars": 5483,
"preview": "\"\"\" A module for generating custom prompt strings.\"\"\"\nimport json\nfrom typing import Any, Callable, Dict, List, Optional"
},
{
"path": "autogpt/prompts/prompt.py",
"chars": 3828,
"preview": "from colorama import Fore\n\nfrom autogpt.config.ai_config import AIConfig\nfrom autogpt.config.config import Config\nfrom a"
},
{
"path": "autogpt/setup.py",
"chars": 5839,
"preview": "\"\"\"Set up the AI and its goals\"\"\"\nimport re\n\nfrom colorama import Fore, Style\n\nfrom autogpt import utils\nfrom autogpt.co"
},
{
"path": "autogpt/singleton.py",
"chars": 632,
"preview": "\"\"\"The singleton metaclass for ensuring only one instance of a class.\"\"\"\nimport abc\n\n\nclass Singleton(abc.ABCMeta, type)"
},
{
"path": "autogpt/speech/__init__.py",
"chars": 146,
"preview": "\"\"\"This module contains the speech recognition and speech synthesis functions.\"\"\"\nfrom autogpt.speech.say import say_tex"
},
{
"path": "autogpt/speech/base.py",
"chars": 1116,
"preview": "\"\"\"Base class for all voice classes.\"\"\"\nimport abc\nfrom threading import Lock\n\nfrom autogpt.singleton import AbstractSin"
},
{
"path": "autogpt/speech/brian.py",
"chars": 1168,
"preview": "import logging\nimport os\n\nimport requests\nfrom playsound import playsound\n\nfrom autogpt.speech.base import VoiceBase\n\n\nc"
},
{
"path": "autogpt/speech/eleven_labs.py",
"chars": 2951,
"preview": "\"\"\"ElevenLabs speech module\"\"\"\nimport os\n\nimport requests\nfrom playsound import playsound\n\nfrom autogpt.config import Co"
},
{
"path": "autogpt/speech/gtts.py",
"chars": 455,
"preview": "\"\"\" GTTS Voice. \"\"\"\nimport os\n\nimport gtts\nfrom playsound import playsound\n\nfrom autogpt.speech.base import VoiceBase\n\n\n"
},
{
"path": "autogpt/speech/macos_tts.py",
"chars": 520,
"preview": "\"\"\" MacOS TTS Voice. \"\"\"\nimport os\n\nfrom autogpt.speech.base import VoiceBase\n\n\nclass MacOSTTS(VoiceBase):\n \"\"\"MacOS "
},
{
"path": "autogpt/speech/say.py",
"chars": 1429,
"preview": "\"\"\" Text to speech module \"\"\"\nimport threading\nfrom threading import Semaphore\n\nfrom autogpt.config import Config\nfrom a"
},
{
"path": "autogpt/spinner.py",
"chars": 2046,
"preview": "\"\"\"A simple spinner module\"\"\"\nimport itertools\nimport sys\nimport threading\nimport time\n\n\nclass Spinner:\n \"\"\"A simple "
},
{
"path": "autogpt/url_utils/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "autogpt/url_utils/validators.py",
"chars": 2803,
"preview": "import functools\nfrom typing import Any, Callable\nfrom urllib.parse import urljoin, urlparse\n\nfrom requests.compat impor"
},
{
"path": "autogpt/utils.py",
"chars": 4480,
"preview": "import os\nimport re\n\nimport requests\nimport yaml\nfrom colorama import Fore, Style\nfrom git.repo import Repo\n\nfrom autogp"
},
{
"path": "autogpt/workspace/__init__.py",
"chars": 82,
"preview": "from autogpt.workspace.workspace import Workspace\n\n__all__ = [\n \"Workspace\",\n]\n"
},
{
"path": "autogpt/workspace/workspace.py",
"chars": 3954,
"preview": "\"\"\"\n=========\nWorkspace\n=========\n\nThe workspace is a directory containing configuration and working files for an AutoGP"
},
{
"path": "azure.yaml.template",
"chars": 318,
"preview": "azure_api_type: azure\nazure_api_base: your-base-url-for-azure\nazure_api_version: api-version-for-azure\nazure_model_map:\n"
},
{
"path": "benchmark/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "benchmark/benchmark_entrepreneur_gpt_with_difficult_user.py",
"chars": 3592,
"preview": "import os\nimport subprocess\nimport sys\n\n\ndef benchmark_entrepreneur_gpt_with_difficult_user():\n # Test case to check "
},
{
"path": "codecov.yml",
"chars": 376,
"preview": "coverage:\n status:\n project:\n default:\n target: auto\n threshold: 1%\n informational: true\n "
},
{
"path": "data/.keep",
"chars": 0,
"preview": ""
},
{
"path": "data_ingestion.py",
"chars": 3119,
"preview": "import argparse\nimport logging\n\nfrom autogpt.commands.file_operations import ingest_file, list_files\nfrom autogpt.config"
},
{
"path": "docker-compose.yml",
"chars": 397,
"preview": "# To boot the app run the following:\n# docker-compose run auto-gpt\nversion: \"3.9\"\n\nservices:\n auto-gpt:\n depends_on:"
},
{
"path": "docs/challenges/beat.md",
"chars": 560,
"preview": "# Beat a Challenge\n\nIf you have a solution or idea to tackle an existing challenge, you can contribute by working on it "
},
{
"path": "docs/challenges/challenge_template.md",
"chars": 816,
"preview": "# Challenge Title\n\n## Description\n\nProvide a clear and concise description of the challenge. Include any relevant exampl"
},
{
"path": "docs/challenges/introduction.md",
"chars": 1705,
"preview": "indroduction.md\n# Introduction to Challenges\n\nWelcome to the Auto-GPT Challenges page! This is a space where we encourag"
},
{
"path": "docs/challenges/list.md",
"chars": 411,
"preview": "# List of Challenges\n\nThis page contains a curated list of challenges that Auto-GPT currently faces. If you think you ha"
},
{
"path": "docs/challenges/memory/challenge_a.md",
"chars": 947,
"preview": "# Memory Challenge A\n\n**Status**: Challenge Completed\n\n\n## Description\n\nThe agent, Follow-Instructions-GPT, has the foll"
},
{
"path": "docs/challenges/memory/challenge_b.md",
"chars": 1344,
"preview": "# Memory Challenge B\n\n**Status**: Current level to beat: level 3\n\n**Command to try**: \n```\npytest test/test_memory/test_"
},
{
"path": "docs/challenges/memory/introduction.md",
"chars": 538,
"preview": "# Memory Challenges\n\nMemory challenges are designed to test the ability of an AI agent, like Auto-GPT, to remember and u"
},
{
"path": "docs/challenges/submit.md",
"chars": 975,
"preview": "# Submit a Challenge\n\nIf you have identified a task or problem that Auto-GPT struggles with, you can submit it as a chal"
},
{
"path": "docs/configuration/imagegen.md",
"chars": 2080,
"preview": "# 🖼 Image Generation configuration\n\n| Config variable | Values | |\n| ----"
},
{
"path": "docs/configuration/memory.md",
"chars": 9176,
"preview": "## Setting Your Cache Type\n\nBy default, Auto-GPT set up with Docker Compose will use Redis as its memory backend.\nOtherw"
},
{
"path": "docs/configuration/search.md",
"chars": 1947,
"preview": "## 🔍 Google API Keys Configuration\n\n!!! note\n This section is optional. Use the official Google API if search attempt"
},
{
"path": "docs/configuration/voice.md",
"chars": 1054,
"preview": "# Text to Speech\n\nEnter this command to use TTS _(Text-to-Speech)_ for Auto-GPT\n\n``` shell\npython -m autogpt --speak\n```"
},
{
"path": "docs/index.md",
"chars": 258,
"preview": "# Auto-GPT\n\nWelcome to Auto-GPT. Please follow the [Installation](/setup/) guide to get started.\n\nIt is recommended to "
},
{
"path": "docs/plugins.md",
"chars": 547,
"preview": "## Plugins\n\n⚠️💀 **WARNING** 💀⚠️: Review the code of any plugin you use thoroughly, as plugins can execute any Python cod"
},
{
"path": "docs/setup.md",
"chars": 8281,
"preview": "# Setting up Auto-GPT\n\n## 📋 Requirements\n\nChoose an environment to run Auto-GPT in (pick one):\n\n - [Docker](https://doc"
},
{
"path": "docs/testing.md",
"chars": 836,
"preview": "# Running tests\n\nTo run all tests, use the following command:\n\n``` shell\npytest\n```\n\nIf `pytest` is not found:\n``` shell"
},
{
"path": "docs/usage.md",
"chars": 2924,
"preview": "# Usage\n\n## Command Line Arguments\nRunning with `--help` lists all the possible command line arguments you can pass:\n\n``"
},
{
"path": "main.py",
"chars": 25,
"preview": "from autogpt import main\n"
},
{
"path": "mkdocs.yml",
"chars": 1061,
"preview": "site_name: Auto-GPT\nsite_url: https://docs.agpt.co/\nrepo_url: https://github.com/Significant-Gravitas/Auto-GPT\nnav:\n - "
},
{
"path": "pyproject.toml",
"chars": 1129,
"preview": "[build-system]\nrequires = [\"hatchling\"]\nbuild-backend = \"hatchling.build\"\n\n[project]\nname = \"agpt\"\nversion = \"0.3.0\"\naut"
},
{
"path": "requirements.txt",
"chars": 973,
"preview": "beautifulsoup4>=4.12.2\ncolorama==0.4.6\ndistro==1.8.0\nopenai==0.27.2\nplaysound==1.2.2\npython-dotenv==1.0.0\npyyaml==6.0\nre"
},
{
"path": "run.bat",
"chars": 472,
"preview": "@echo off\nsetlocal enabledelayedexpansion\n\n:FindPythonCommand\nfor %%A in (python python3) do (\n where /Q %%A\n if !"
},
{
"path": "run.sh",
"chars": 513,
"preview": "#!/bin/bash\n\nfunction find_python_command() {\n if command -v python &> /dev/null\n then\n echo \"python\"\n e"
},
{
"path": "run_continuous.bat",
"chars": 60,
"preview": "@echo off\nset argument=--continuous\ncall run.bat %argument%\n"
},
{
"path": "run_continuous.sh",
"chars": 38,
"preview": "#!/bin/bash\n\n./run.sh --continuous $@\n"
},
{
"path": "scripts/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "scripts/check_requirements.py",
"chars": 827,
"preview": "import re\nimport sys\n\nimport pkg_resources\n\n\ndef main():\n requirements_file = sys.argv[1]\n with open(requirements_"
},
{
"path": "scripts/install_plugin_deps.py",
"chars": 959,
"preview": "import os\nimport subprocess\nimport sys\nimport zipfile\nfrom pathlib import Path\n\n\ndef install_plugin_dependencies():\n "
},
{
"path": "tests/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "tests/conftest.py",
"chars": 1085,
"preview": "from pathlib import Path\n\nimport pytest\nfrom pytest_mock import MockerFixture\n\nfrom autogpt.config import Config\nfrom au"
},
{
"path": "tests/context.py",
"chars": 200,
"preview": "import os\nimport sys\n\n# Add the scripts directory to the path so that we can import the browse module.\nsys.path.insert(\n"
},
{
"path": "tests/integration/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "tests/integration/agent_factory.py",
"chars": 6694,
"preview": "import pytest\n\nfrom autogpt.agent import Agent\nfrom autogpt.commands.command import CommandRegistry\nfrom autogpt.config "
},
{
"path": "tests/integration/agent_utils.py",
"chars": 457,
"preview": "import concurrent.futures\n\nfrom autogpt.agent.agent import Agent\n\n\ndef run_interaction_loop(agent: Agent, timeout: float"
},
{
"path": "tests/integration/cassettes/test_llm_utils/test_get_ada_embedding.yaml",
"chars": 10561,
"preview": "interactions:\n- request:\n body: '{\"input\": [[1985]], \"model\": \"text-embedding-ada-002\", \"encoding_format\":\n \"bas"
},
{
"path": "tests/integration/cassettes/test_llm_utils/test_get_ada_embedding_large_context.yaml",
"chars": 118203,
"preview": "interactions:\n- request:\n body: '{\"input\": [[5289, 564, 71, 773, 89, 2332, 19747, 87, 664, 71, 8311, 85,\n 392, 2"
},
{
"path": "tests/integration/cassettes/test_local_cache/test_get_relevant.yaml",
"chars": 63192,
"preview": "interactions:\n- request:\n body: '{\"input\": [\"Sample text 1\"], \"model\": \"text-embedding-ada-002\", \"encoding_format\":\n "
},
{
"path": "tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml",
"chars": 53905,
"preview": "interactions:\n- request:\n body: '{\"input\": [\"Assistant Reply: { \\\"thoughts\\\": { \\\"text\\\": Result:\n "
},
{
"path": "tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml",
"chars": 4087,
"preview": "interactions:\n- request:\n body: '{\"model\": \"gpt-3.5-turbo\", \"messages\": [{\"role\": \"system\", \"content\": \"\\nYour\n "
},
{
"path": "tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml",
"chars": 3565,
"preview": "interactions:\n- request:\n body: '{\"model\": \"gpt-3.5-turbo\", \"messages\": [{\"role\": \"system\", \"content\": \"\\nYour\n "
},
{
"path": "tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml",
"chars": 4330,
"preview": "interactions:\n- request:\n body: '{\"model\": \"gpt-3.5-turbo\", \"messages\": [{\"role\": \"system\", \"content\": \"\\nYour\n "
},
{
"path": "tests/integration/challenges/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "tests/integration/challenges/conftest.py",
"chars": 397,
"preview": "import pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--level\", action=\"store\", default=None, typ"
},
{
"path": "tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py",
"chars": 1623,
"preview": "import contextlib\nfrom functools import wraps\nfrom typing import Generator\n\nimport pytest\n\nfrom autogpt.commands.file_op"
},
{
"path": "tests/integration/challenges/memory/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml",
"chars": 91257,
"preview": "interactions:\n- request:\n body: '{\"model\": \"gpt-3.5-turbo\", \"messages\": [{\"role\": \"system\", \"content\": \"You\n are"
},
{
"path": "tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml",
"chars": 45192,
"preview": "interactions:\n- request:\n body: '{\"model\": \"gpt-3.5-turbo\", \"messages\": [{\"role\": \"system\", \"content\": \"You\n are"
},
{
"path": "tests/integration/challenges/memory/test_memory_challenge_a.py",
"chars": 2611,
"preview": "import pytest\r\n\r\nfrom autogpt.agent import Agent\r\nfrom autogpt.commands.file_operations import read_file, write_to_file\r"
},
{
"path": "tests/integration/challenges/memory/test_memory_challenge_b.py",
"chars": 3003,
"preview": "import pytest\r\n\r\nfrom autogpt.agent import Agent\r\nfrom autogpt.commands.file_operations import read_file, write_to_file\r"
},
{
"path": "tests/integration/challenges/utils.py",
"chars": 1931,
"preview": "import random\nfrom functools import wraps\nfrom typing import Optional\n\nimport pytest\n\n\ndef get_level_to_run(\n user_se"
},
{
"path": "tests/integration/conftest.py",
"chars": 581,
"preview": "import os\n\nimport pytest\n\nfrom tests.vcr.vcr_filter import before_record_request, before_record_response\n\n\n@pytest.fixtu"
},
{
"path": "tests/integration/goal_oriented/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml",
"chars": 36261,
"preview": "interactions:\n- request:\n body: '{\"model\": \"gpt-3.5-turbo\", \"messages\": [{\"role\": \"system\", \"content\": \"You\n are"
},
{
"path": "tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml",
"chars": 28729,
"preview": "interactions:\n- request:\n body: '{\"model\": \"gpt-3.5-turbo\", \"messages\": [{\"role\": \"system\", \"content\": \"You\n are"
},
{
"path": "tests/integration/goal_oriented/goal_oriented_tasks.md",
"chars": 383,
"preview": "If the goal oriented task pipeline fails, it means: \n- you somehow changed the way the system prompt is generated \n- or "
},
{
"path": "tests/integration/goal_oriented/test_browse_website.py",
"chars": 683,
"preview": "import pytest\r\n\r\nfrom autogpt.agent import Agent\r\nfrom autogpt.commands.file_operations import read_file\r\nfrom tests.int"
},
{
"path": "tests/integration/goal_oriented/test_write_file.py",
"chars": 671,
"preview": "import pytest\n\nfrom autogpt.agent import Agent\nfrom autogpt.commands.file_operations import read_file\nfrom tests.integra"
},
{
"path": "tests/integration/memory_tests.py",
"chars": 1614,
"preview": "import random\nimport string\nimport sys\nimport unittest\nfrom pathlib import Path\n\nfrom autogpt.config import Config\nfrom "
},
{
"path": "tests/integration/milvus_memory_tests.py",
"chars": 1933,
"preview": "# sourcery skip: snake-case-functions\n\"\"\"Tests for the MilvusMemory class.\"\"\"\nimport random\nimport string\nimport unittes"
},
{
"path": "tests/integration/test_execute_code.py",
"chars": 1414,
"preview": "import random\nimport string\nimport tempfile\n\nimport pytest\nfrom pytest_mock import MockerFixture\n\nimport autogpt.command"
},
{
"path": "tests/integration/test_git_commands.py",
"chars": 1239,
"preview": "import pytest\nfrom git.exc import GitCommandError\nfrom git.repo.base import Repo\n\nfrom autogpt.commands.git_operations i"
},
{
"path": "tests/integration/test_llm_utils.py",
"chars": 1941,
"preview": "import string\nfrom unittest.mock import MagicMock\n\nimport pytest\nfrom numpy.random import RandomState\nfrom pytest_mock i"
},
{
"path": "tests/integration/test_local_cache.py",
"chars": 2972,
"preview": "# sourcery skip: snake-case-functions\n\"\"\"Tests for LocalCache class\"\"\"\nimport unittest\n\nimport orjson\nimport pytest\n\nfro"
},
{
"path": "tests/integration/test_memory_management.py",
"chars": 1710,
"preview": "import json\n\nimport pytest\n\nfrom autogpt.config import Config\nfrom autogpt.memory import get_memory\nfrom autogpt.memory_"
},
{
"path": "tests/integration/test_setup.py",
"chars": 2280,
"preview": "from unittest.mock import patch\n\nimport pytest\n\nfrom autogpt.config.ai_config import AIConfig\nfrom autogpt.setup import "
},
{
"path": "tests/integration/weaviate_memory_tests.py",
"chars": 3861,
"preview": "import unittest\nfrom uuid import uuid4\n\nfrom weaviate import Client\nfrom weaviate.util import get_valid_uuid\n\nfrom autog"
},
{
"path": "tests/milvus_memory_test.py",
"chars": 2390,
"preview": "# sourcery skip: snake-case-functions\n\"\"\"Tests for the MilvusMemory class.\"\"\"\nimport os\nimport sys\nimport unittest\n\ntry:"
},
{
"path": "tests/mocks/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "tests/mocks/mock_commands.py",
"chars": 290,
"preview": "from autogpt.commands.command import command\n\n\n@command(\"function_based\", \"Function-based test command\")\ndef function_ba"
},
{
"path": "tests/test_agent.py",
"chars": 1247,
"preview": "from unittest.mock import MagicMock\n\nimport pytest\n\nfrom autogpt.agent import Agent\nfrom autogpt.config import Config\n\n\n"
},
{
"path": "tests/test_agent_manager.py",
"chars": 1914,
"preview": "import pytest\n\nfrom autogpt.agent.agent_manager import AgentManager\nfrom autogpt.llm import create_chat_completion\n\n\n@py"
},
{
"path": "tests/test_ai_config.py",
"chars": 1193,
"preview": "from autogpt.config.ai_config import AIConfig\n\n\"\"\"\nTest cases for the AIConfig class, which handles loads the AI configu"
},
{
"path": "tests/test_api_manager.py",
"chars": 4140,
"preview": "from unittest.mock import MagicMock, patch\n\nimport pytest\n\nfrom autogpt.llm import COSTS, ApiManager\n\napi_manager = ApiM"
},
{
"path": "tests/test_commands.py",
"chars": 6867,
"preview": "import os\nimport shutil\nimport sys\nfrom pathlib import Path\n\nimport pytest\n\nfrom autogpt.commands.command import Command"
},
{
"path": "tests/test_config.py",
"chars": 3316,
"preview": "\"\"\"\nTest cases for the Config class, which handles the configuration settings\nfor the AI and ensures it behaves as a sin"
},
{
"path": "tests/test_image_gen.py",
"chars": 3057,
"preview": "import functools\nimport hashlib\nfrom pathlib import Path\n\nimport pytest\nfrom PIL import Image\n\nfrom autogpt.commands.ima"
},
{
"path": "tests/test_logs.py",
"chars": 1260,
"preview": "import pytest\n\nfrom autogpt.logs import remove_color_codes\n\n\n@pytest.mark.parametrize(\n \"raw_text, clean_text\",\n ["
},
{
"path": "tests/test_prompt_generator.py",
"chars": 4452,
"preview": "from unittest import TestCase\n\nfrom autogpt.prompts.generator import PromptGenerator\n\n\nclass TestPromptGenerator(TestCas"
},
{
"path": "tests/test_token_counter.py",
"chars": 2126,
"preview": "import pytest\n\nfrom autogpt.llm import count_message_tokens, count_string_tokens\n\n\ndef test_count_message_tokens():\n "
},
{
"path": "tests/test_utils.py",
"chars": 4593,
"preview": "import os\nfrom unittest.mock import Mock, patch\n\nimport pytest\nimport requests\nfrom colorama import Fore\nfrom git import"
}
]
// ... and 22 more files (download for full content)
About this extraction
This page contains the full source code of the RealHossie/Auto-GPT-Chinese GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 222 files (925.1 KB), approximately 335.1k tokens, and a symbol index with 648 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.