Showing preview only (1,713K chars total). Download the full file or copy to clipboard to get everything.
Repository: foreveryh/mentis
Branch: main
Commit: 7859b536b98b
Files: 240
Total size: 1.4 MB
Directory structure:
gitextract_zde6lsy3/
├── .gitignore
├── README.md
├── __init__.py
├── api/
│ ├── __init__.py
│ ├── agent/
│ │ ├── __init__.py
│ │ └── loader.py
│ ├── server.py
│ └── utils.py
├── core/
│ ├── __init__.py
│ ├── a2a/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agent_task_manager.py
│ │ ├── client/
│ │ │ ├── __init__.py
│ │ │ ├── card_resolver.py
│ │ │ └── client.py
│ │ ├── config.json
│ │ ├── server/
│ │ │ ├── __init__.py
│ │ │ ├── server.py
│ │ │ ├── task_manager.py
│ │ │ └── utils.py
│ │ ├── types.py
│ │ └── utils/
│ │ ├── __init__.py
│ │ ├── in_memory_cache.py
│ │ └── push_notification_auth.py
│ ├── agents/
│ │ ├── __init__.py
│ │ ├── base/
│ │ │ ├── base_agent.py
│ │ │ ├── create_react_agent_wrapper.py
│ │ │ └── react_agent.py
│ │ ├── react_based_supervisor/
│ │ │ ├── __init__.py
│ │ │ ├── agent_name.py
│ │ │ ├── handoff.py
│ │ │ ├── planning_handler.py
│ │ │ ├── simple_planning_tool.py
│ │ │ ├── state_schema.py
│ │ │ └── supervisor.py
│ │ ├── react_supervisor_agent.py
│ │ ├── sb_supervisor_agent.py
│ │ ├── state_based_supervisor/
│ │ │ ├── __init__.py
│ │ │ ├── agent_name.py
│ │ │ ├── evaluate_result_node.py
│ │ │ ├── handoff.py
│ │ │ ├── planner_node.py
│ │ │ ├── planning_handler.py
│ │ │ ├── prompt.py
│ │ │ ├── state_schema.py
│ │ │ ├── supervisor_graph.py
│ │ │ └── supervisor_node.py
│ │ └── sub_agents/
│ │ ├── __init__.py
│ │ ├── coder_agent.py
│ │ ├── data_analyst_agent.py
│ │ ├── designer_agent.py
│ │ ├── reporter_agent.py
│ │ └── research_agent.py
│ ├── llm/
│ │ ├── llm_manager.py
│ │ └── model_config.py
│ ├── mcp/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── config_loader.py
│ │ ├── mcp_server_config.json
│ │ ├── run_server.py
│ │ ├── server.py
│ │ └── test/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── minimal_fastmcp_test.py
│ │ └── test_minimal_client.py
│ ├── tools/
│ │ ├── __init__.py
│ │ ├── e2b_tool.py
│ │ ├── firecrawl_tool.py
│ │ ├── registry.py
│ │ └── replicate_flux_tool.py
│ └── utils/
│ ├── agent_utils.py
│ └── timezone.py
├── examples/
│ ├── 01_supervisor_test.py
│ ├── 02_supervisor_agent_test.py
│ ├── 03_tavily_tools_test.py
│ ├── 04_react_agent_test.py
│ ├── 05_react_agent_user_input.py
│ ├── 06_web_extraction_tools_test.py
│ ├── 07_web_extraction_with_filesystem.py
│ ├── 08_react_agent_tool_registry_test.py
│ ├── 09_e2b_code_interpreter_test.py
│ ├── 10_financial_data_analysis.py
│ ├── 11_e2b_sandbox_test.py
│ ├── 12_planning_supervisor_test.py
│ ├── 13_multi_agent_roles_test.py
│ ├── 14_mcp_client_fetch_test.py
│ ├── 15_mcp_agent_test.py
│ ├── 16_google_a2a/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agent_task_manager_test.py
│ │ ├── client_example.py
│ │ ├── currency_agent_test.py
│ │ ├── currency_agent_test_README.md
│ │ └── langgraph_integration.py
│ ├── TODO_computer_tool_demo.py
│ ├── __init__.py
│ ├── state_based_supervisor_examples/
│ │ ├── 01_simple.py
│ │ ├── 02_tavily.py
│ │ └── 03_multi_agents.py
│ └── web_agents/
│ ├── README.md
│ ├── README_SPEC.md
│ ├── __init__.py
│ ├── research_assistant/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ └── graph.py
│ └── weather_agent/
│ ├── README.md
│ └── __init__.py
├── instructions/
│ ├── 00.Langgraph 和 React Agent.md
│ ├── 01.supervisor_pattern.md
│ ├── 02.supervisor_pattern_agent.md
│ ├── 03.tavily_search_integration.md
│ ├── 04.react_agent.md
│ ├── 05.react_agent_user_input.md
│ ├── 06.web_extraction_tools.md
│ ├── 07.web_extraction_with_filesystem.md
│ ├── 08.react_agent_tool_registry.md
│ └── 09.e2b_sandbox_integration.md
├── log_analyzer.py
├── pyproject.toml
├── requirements.txt
├── setup.py
├── super_agents/
│ ├── __init__.py
│ ├── browser_use/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agent/
│ │ │ ├── __init__.py
│ │ │ ├── graph.py
│ │ │ ├── nodes.py
│ │ │ ├── prompts.py
│ │ │ ├── schemas.py
│ │ │ ├── state.py
│ │ │ └── tools.py
│ │ ├── agent.py
│ │ ├── browser/
│ │ │ ├── browser.py
│ │ │ ├── detector.py
│ │ │ ├── findVisibleInteractiveElements.js
│ │ │ ├── models.py
│ │ │ └── utils.py
│ │ ├── llm.py
│ │ └── main.py
│ ├── customized_deep_research/
│ │ ├── PRD_README.md
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── main.py
│ │ └── reason_graph/
│ │ ├── __init__.py
│ │ ├── graph.py
│ │ ├── nodes.py
│ │ ├── prompt.py
│ │ ├── schemas.py
│ │ ├── state.py
│ │ └── tools.py
│ └── deep_research/
│ ├── README.md
│ ├── __init__.py
│ ├── a2a_adapter/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── client_example.py
│ │ ├── deep_research_task_manager.py
│ │ ├── dr_terminal_output.md
│ │ ├── run_server.py
│ │ └── setup.py
│ ├── main.py
│ ├── output/
│ │ ├── research_report_analyze_smartvalue_co_ltds_9417t_core_business_key_productsservices_eg_government_cloud_solutions_mo_20250418_125137.md
│ │ ├── research_report_id_like_a_thorough_analysis_of_li_auto_stock_including_summary_company_overview_key_metrics_performa_20250327_121800.md
│ │ └── research_report_id_like_a_thorough_analysis_of_xpev_stock_including_summary_company_overview_key_metrics_performance_20250327_105350.md
│ ├── reason_graph/
│ │ ├── __init__.py
│ │ ├── graph.py
│ │ ├── nodes.py
│ │ ├── prompt.py
│ │ ├── schemas.py
│ │ ├── state.py
│ │ └── tools.py
│ └── tests/
│ ├── __init__.py
│ └── test_graph.py
├── web/
│ ├── .gitignore
│ ├── README.md
│ ├── app/
│ │ ├── api/
│ │ │ └── agent/
│ │ │ └── route.ts
│ │ ├── chat/
│ │ │ ├── [id]/
│ │ │ │ ├── agent-types.ts
│ │ │ │ ├── components/
│ │ │ │ │ ├── chatbot-node.tsx
│ │ │ │ │ ├── checkpoint-card.tsx
│ │ │ │ │ ├── node-card.tsx
│ │ │ │ │ ├── reminder.tsx
│ │ │ │ │ ├── research/
│ │ │ │ │ │ ├── report-preview.tsx
│ │ │ │ │ │ ├── research-node.tsx
│ │ │ │ │ │ ├── research-status.tsx
│ │ │ │ │ │ └── search-results.tsx
│ │ │ │ │ └── weather/
│ │ │ │ │ ├── cloudy.tsx
│ │ │ │ │ ├── rainy.tsx
│ │ │ │ │ ├── snowy.tsx
│ │ │ │ │ ├── sunny.tsx
│ │ │ │ │ └── weather-node.tsx
│ │ │ │ └── page.tsx
│ │ │ └── page.tsx
│ │ ├── deep-research/
│ │ │ ├── [id]/
│ │ │ │ └── page.tsx
│ │ │ └── page.tsx
│ │ ├── globals.css
│ │ ├── layout.tsx
│ │ └── page.tsx
│ ├── components/
│ │ ├── app-sidebar.tsx
│ │ ├── theme-provider.tsx
│ │ ├── theme-switcher.tsx
│ │ └── ui/
│ │ ├── badge.tsx
│ │ ├── button.tsx
│ │ ├── card.tsx
│ │ ├── checkbox.tsx
│ │ ├── dialog.tsx
│ │ ├── input.tsx
│ │ ├── popover.tsx
│ │ ├── progress.tsx
│ │ ├── separator.tsx
│ │ ├── sheet.tsx
│ │ ├── sidebar.tsx
│ │ ├── skeleton.tsx
│ │ ├── textarea.tsx
│ │ └── tooltip.tsx
│ ├── components.json
│ ├── eslint.config.mjs
│ ├── hooks/
│ │ ├── use-mobile.tsx
│ │ └── useLangGraphAgent/
│ │ ├── actions.ts
│ │ ├── api.ts
│ │ ├── ascii-tree.ts
│ │ ├── types.ts
│ │ └── useLangGraphAgent.tsx
│ ├── next.config.ts
│ ├── package.json
│ ├── postcss.config.mjs
│ ├── stores/
│ │ └── chat-store.tsx
│ ├── tailwind.config.ts
│ └── tsconfig.json
└── web_for_a2a/
├── .gitignore
├── Instruction.md
├── README.md
├── app/
│ ├── api/
│ │ └── a2a/
│ │ └── route.ts
│ ├── deepresearch/
│ │ └── page.tsx
│ ├── globals.css
│ ├── layout.tsx
│ └── page.tsx
├── package.json
├── postcss.config.js
├── tailwind.config.js
└── tsconfig.json
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# Virtual Environment
venv/
env/
ENV/
# IDE
.idea/
.vscode/
*.swp
*.swo
# OS specific
.DS_Store
Thumbs.db
# LangSmith
.langchain.db
.langsmith/
# Logs
*.log
# Env
.env
# output
exampels/logs/
exampels/output/
examples/output/sandbox_test
================================================
FILE: README.md
================================================
# Mentis - Agent Development Kit
[](https://www.python.org/downloads/)
[](https://opensource.org/licenses/MIT)
## 概述 (Overview)
Mentis 是一个基于 LangGraph 构建的、可扩展的多 Agent ADK(Agent Development Kit)。它的核心是一个**状态驱动的规划型 Supervisor Agent**,负责理解用户复杂请求、制定执行计划,并智能地协调一组具有不同专业能力的子 Agent (Specialist Agents) 来共同完成任务。
此框架旨在实现复杂任务的自动化处理,通过 Agent 间的协作提供比单一 Agent 更强大、更灵活的问题解决能力。
## 核心特性 (Core Features)
* **Multi-Agent 架构**: 采用中心化的 Supervisor 协调多个专门的子 Agent (如 Research, Coder, Reporter, Designer, Data Analyst)。
* **State-Based Planning**: 引入独立的 `Planner` 节点负责初始规划,`Supervisor` 专注于基于计划状态的执行和调度,`Evaluator` 节点负责评估子 Agent 结果并更新状态。计划状态通过 LangGraph 持久化(需配置 Checkpointer)。
* **模块化 Agent 设计**: 基于 `BaseAgent` 和 `ReactAgent` 构建,易于添加或修改具有不同能力的子 Agent。
* **工具注册与管理**: 通过 `core/tools/registry.py` 实现工具的集中注册、分类和动态加载。
* **可配置 LLM**: 支持通过 `LLMManager` (或环境变量) 配置和切换不同的 LLM Provider (OpenAI, DeepSeek, XAI Grok via compatible endpoint) 和模型。
* **持久化支持**: 基于 LangGraph 的 Checkpointer 机制,可以实现对话状态和计划的持久化。
* **清晰的执行流程**: Planner -> Supervisor -> (Handoff -> Agent -> Evaluator -> Supervisor 循环) -> 最终输出/Reporter。
* **A2A 协议支持**: 实现了 Google 的 Agent-to-Agent (A2A) 协议,使 Mentis Agents 能够与其他支持 A2A 协议的系统进行互操作。
## 架构概览 (Architecture Overview)
1. **用户请求 (Input)**: 用户通过入口点 (`main.py` 或 API) 提交任务请求。
2. **规划节点 (Planner Node)**: 分析请求,生成一个包含任务步骤、建议 Agent 的初始计划 (`Plan`),并更新到图状态 (`PlanningAgentState`)。
3. **主管节点 (Supervisor Node)**: 接收带有计划的状态,根据计划状态和消息历史决定下一步行动:
* 启动新任务 (标记 'in_progress')。
* 委派 'in_progress' 的任务给合适的子 Agent (通过 Handoff 工具)。
* 等待子 Agent 完成。
* 判断计划是否最终完成。
* 决定最终输出方式(自己总结或调用 Reporter)。
4. **切换执行器 (Handoff Executor)**: 处理 Supervisor 发出的 `transfer_to_` 工具调用,并将控制权和状态传递给目标子 Agent。
5. **子 Agent 节点 (Specialist Agent Nodes)**: 继承自 `ReactAgent` 或 `BaseAgent`,执行具体的任务(研究、编码、生成报告/图像、数据分析),可能调用其自身的工具。
6. **评估节点 (Evaluate Result Node)**: 接收子 Agent 的执行结果,进行确定性评估(成功/失败),更新对应任务的状态和 Plan 的整体状态。
7. **循环与结束**: 流程在 Evaluator -> Supervisor 之间循环,直到 Supervisor 判断 Plan 完成,然后路由到 `END` 或 `ReporterAgent`。
## 快速开始 (Getting Started)
### 1. 环境设置 (Prerequisites)
* Python 3.11+
* 使用 `pip` 或 `uv` 等工具管理依赖。
### 2. 安装依赖 (Installation)
在项目根目录运行:
建议使用 uv 管理
```bash
uv venv
source .venv/bin/activate
uv sync
```
```bash
# pip install -r requirements.txt
# 或者 uv pip install -r requirements.txt
```
(requirements.txt 我没维护,请确保 `requirements.txt` 文件包含了所有必要的库,如 `langchain`, `langgraph`, `langchain-openai`, `e2b` (如果使用 E2B), `replicate` (如果使用 Replicate), `tavily-python`, `exa-py`, `python-dotenv`, `anyio`, `tiktoken` 等)。
### 3. 配置环境 (Configuration)
* 复制 `.env.example` 文件为 `.env`。
* 在 `.env` 文件中填入您所需的 API Keys/Tokens:
* `OPENAI_API_KEY` (如果使用 OpenAI 模型)
* `DEEPSEEK_API_KEY` (如果使用 DeepSeek 模型)
* `XAI_API_KEY` (如果使用 XAI Grok,并确认 Base URL)
* `REPLICATE_API_TOKEN` (如果使用 Replicate 工具)
* `E2B_API_KEY` (如果使用 E2B Code Interpreter,推荐!)
* `TAVILY_API_KEY` (如果使用 Tavily 搜索,推荐!)
* `EXA_API_KEY` (如果使用 Exa 搜索)
* `LANGCHAIN_TRACING_V2="true"` (强烈推荐,用于 LangSmith 调试)
* `LANGCHAIN_API_KEY="ls_..."` (您的 LangSmith Key)
* `LANGCHAIN_PROJECT="Your_Project_Name"` (您在 LangSmith 上的项目名)
* **LLM 配置**:
* 如果您使用了 `LLMManager`(如示例所示),请检查并配置其读取的模型配置文件(例如 `config/models.yaml`,路径可能不同)。
* 如果您在 `tools.py` 中直接根据环境变量初始化 LLM,请确保设置了对应的环境变量,如 `LLM_PROVIDER`, `LLM_MODEL_NAME`, `LLM_BASE_URL` (用于兼容 API)。
* **工具配置**: 确保 `core/tools/__init__.py` 或 `registry.py` 中的工具预注册逻辑能够正确找到并初始化您需要的工具。
### 4. 运行示例 (Running Examples)
项目包含示例脚本以演示框架的使用:
```bash
# 从项目根目录 (mentis/) 运行
python examples/state_based_supervisor_examples/03_multi_agents.py
```
脚本会提示您输入初始请求。您可以进行简单尝试:
* `"What is the capital of France?"` (简单测试)
* `"Write a short, four-line poem about spring."` (测试 Reporter)
* `"Generate an image of a cat wearing a top hat, oil painting style."` (测试 Designer)
* `"Write a Python function to calculate factorial and run it for 5."` (测试 Coder)
## 项目结构 (Project Structure)
```
mentis/
├── api/ # (可选) API 服务相关代码
├── core/ # 核心框架代码
│ ├── a2a/ # A2A 协议的客户端和服务器实现
│ ├── agents/ # Agent 定义 (base, react, supervisor, sub-agents)
│ │ ├── base/
│ │ ├── state_based_supervisor/ # Supervisor 相关 (graph, node, planner, evaluator)
│ │ ├── sub_agents/ # 具体子 Agent 实现 (research, coder, etc.)
│ │ └── sb_supervisor_agent.py # SupervisorAgent 类定义
│ ├── llm/ # (可选) LLM 管理或配置
│ ├── tools/ # 工具定义和注册表 (registry, e2b, replicate, etc.)
│ └── utils/ # 通用辅助函数
├── examples/ # 示例和测试脚本
│ └── state_based_supervisor_examples/
│ └── 03_multi_agents.py # 我们使用的测试脚本
├── super_agents/ # 独立功能型 Agent 实现
│ └── deep_research/ # DeepResearch Agent 实现
│ └── a2a_adapter/ # DeepResearch 的 A2A 协议适配器
├── web/ # (可选) Web 客户端代码
├── web_for_a2a/ # 基于 A2A 协议的 Web 界面
├── .env.example # 环境变量示例
├── requirements.txt # Python 依赖
└── README.md # 本文件
```
## Super Agents (独立功能型 Agent)
除了由 Supervisor 协调的、专注于单一技能的 Specialist Agents (如 Coder, Researcher) 之外,本框架也支持构建和集成更复杂的 **"Super Agents"**。
Super Agent 可以理解为一个**独立的、具有端到端能力、能够完成一个相对完整且复杂任务的 Agent 图**。它可以包含自己的规划、执行、甚至内部协调逻辑。
这些 Super Agents 既可以**独立运行**以完成特定的大型任务,也可以被更高层的协调者(例如我们的 Supervisor Agent)**视为一种强大的“能力”或“工具”**来调用,以处理其复杂计划中的某个步骤。
### DeepResearch Agent (第一个实例)
https://github.com/user-attachments/assets/2a685709-5be0-43a3-9e2d-934ef5fa3315
`DeepResearch Agent` 是我们在此框架理念下实现的第一个 Super Agent 实例(其早期版本是我们开发此 Multi-Agent 框架的基础)。
* **核心功能**: 旨在针对用户给定的**任意主题**,自动化地执行一个**深度研究**流程。
* **内部工作流**: 它包含自己的一套完整的内部步骤,大致如下:
1. **研究规划 (Plan Research)**: 分析主题,生成初步的搜索查询和分析点。
2. **多源搜索 (Multi-Source Search)**: 调用网页搜索 (Tavily)、学术搜索 (Exa) 等工具获取信息。
3. **(可选) 分析执行 (Perform Analysis)**: 对搜索结果进行初步分析(如情感、SWOT 等)。
4. **差距分析 (Gap Analysis)**: 评估已有信息,识别知识空白和局限性。
5. **(可选) 补充搜索 (Gap Filling)**: 针对知识空白进行额外的、更具针对性的搜索。
6. **最终综合 (Final Synthesis)**: 整合所有信息,提炼关键发现和不确定性。
7. **报告生成 (Report Generation)**: 将综合结果和上下文信息,撰写成一份详细的、带引用的 Markdown 研究报告。
* **当前状态**: 该 Agent 的核心逻辑和节点已基本实现,并且现在支持 A2A 协议和专用 Web 界面。
#### A2A 协议支持
我们为 DeepResearch Agent 实现了完整的 A2A 协议适配器,使其能够:
* 作为标准的 A2A 服务被发现和调用
* 通过 `tasks/send` 和 `tasks/sendSubscribe` 端点接收研究任务
* 提供实时的流式研究进度更新
* 返回结构化的研究结果
* 支持推送通知机制
这使得 DeepResearch Agent 可以轻松地与其他支持 A2A 协议的系统(如 Google Assistant)集成,或者被自定义的前端应用调用。
#### 专用 Web 界面
https://github.com/user-attachments/assets/640365c7-839b-4765-b9ac-ee0ac961ceb8
我们还开发了一个基于 Next.js 的现代 Web 界面,专门用于与 DeepResearch A2A 服务交互:
* 提供直观的用户界面,用于输入研究主题和启动研究任务
* 实时显示研究进度和中间更新(通过 Server-Sent Events)
* 美观地展示最终生成的研究报告
* 演示了如何在前端应用中使用浏览器原生 API 处理 A2A 流式响应
**如何体验 DeepResearch Agent:**
1. **独立运行模式**:
* 确保环境配置: 确认您的 `.env` 文件中包含了所需的所有 API Keys(例如 `OPENAI_API_KEY`/`DEEPSEEK_API_KEY`, `TAVILY_API_KEY`, `EXA_API_KEY`)。
* 运行脚本: 在项目根目录执行:
```bash
python super_agents/deep_research/main.py
```
* 输入主题并查看结果: 生成的报告通常会保存在 `output/` 文件夹中。
2. **A2A 服务模式**:
* 启动 A2A 服务器:
```bash
cd super_agents/deep_research/a2a_adapter
python run_server.py
```
* 服务器将在默认端口(通常是 8000)启动,并提供符合 A2A 规范的 API 端点。
3. **Web 界面模式**:
* 确保 A2A 服务器正在运行
* 启动 Web 界面:
```bash
cd web_for_a2a
npm install
npm run dev
```
* 在浏览器中访问 http://localhost:3000/deepresearch 使用图形界面与 DeepResearch Agent 交互。
## 未来工作 (Future Work / Contributing)
* 完善子 Agent 的工具集和 Prompt。
* 增强 Evaluator Node 的评估逻辑。
* 添加更复杂的任务依赖处理。
* 优化长对话历史的管理。
* 集成持久化 Checkpointer (如 SQLite, Redis)。
* 欢迎提出 Issue 或 Pull Request!
* 有问题也可以添加我的微信 brown🩷cony999
## 许可证 (License)
This project is licensed under the MIT License - see the LICENSE file for details.
================================================
FILE: __init__.py
================================================
# Project package initialization
================================================
FILE: api/__init__.py
================================================
================================================
FILE: api/agent/__init__.py
================================================
================================================
FILE: api/agent/loader.py
================================================
# Agent Loader Module
# This module is responsible for loading agents from the web_agents directory
import importlib
import os
import sys
from typing import Dict, Optional, Any, List
from langgraph.graph import StateGraph
from langgraph.graph.graph import CompiledGraph # Add this import
# Try to import deep_research_app
try:
# Adjust this import path based on your project structure
from super_agents.deep_research.reason_graph.graph import web_app as deep_research_app
except ImportError:
print("Warning: Failed to import deep_research_app. DeepResearchAgent will be unavailable.")
deep_research_app = None
# Add examples directory to Python path to allow importing web_agents
examples_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'examples')
if examples_path not in sys.path:
sys.path.append(examples_path)
def list_available_agents() -> Dict[str, str]:
"""List all available agents in the web_agents directory
Returns:
Dict[str, str]: A dictionary mapping agent names to their descriptions
"""
agents = {}
web_agents_dir = os.path.join(examples_path, 'web_agents')
# Check if web_agents directory exists
if not os.path.exists(web_agents_dir) or not os.path.isdir(web_agents_dir):
pass # Continue with empty agents dict
else:
# Iterate through subdirectories in web_agents
for item in os.listdir(web_agents_dir):
agent_dir = os.path.join(web_agents_dir, item)
# Skip non-directories and special directories
if not os.path.isdir(agent_dir) or item.startswith('__') or item.startswith('.'):
continue
# Check if the directory contains an __init__.py file with get_graph function
init_file = os.path.join(agent_dir, '__init__.py')
if os.path.exists(init_file):
# Try to get description from README.md
readme_file = os.path.join(agent_dir, 'README.md')
description = item # Default description is the directory name
if os.path.exists(readme_file):
try:
with open(readme_file, 'r', encoding='utf-8') as f:
first_line = f.readline().strip()
if first_line.startswith('# '):
description = first_line[2:]
except Exception:
pass
agents[item] = description
# Add deep_research to available agents if it's imported successfully
if deep_research_app is not None:
agents["deep_research"] = "Deep Research Agent for in-depth topic exploration"
return agents
def load_agent(agent_name: str) -> Optional[CompiledGraph]:
"""Load an agent from the web_agents directory or special agents
Args:
agent_name (str): The name of the agent to load
Returns:
Optional[CompiledGraph]: The compiled graph for the agent, or None if the agent could not be loaded
"""
# Special case for deep_research agent
if agent_name == "deep_research":
if deep_research_app:
return deep_research_app
else:
print(f"ERROR: DeepResearchAgent requested but not available.")
return None
# Standard agents from web_agents directory
try:
# Import the agent module
module = importlib.import_module(f'web_agents.{agent_name}')
# Check if the module has a get_graph function
if hasattr(module, 'get_graph'):
# Call the get_graph function to get the compiled graph
return module.get_graph()
else:
print(f"Error: Agent '{agent_name}' does not have a get_graph function")
return None
except ImportError as e:
print(f"Error importing agent '{agent_name}': {e}")
return None
except Exception as e:
print(f"Error loading agent '{agent_name}': {e}")
return None
# Default agent to use if none is specified
DEFAULT_AGENT = 'research_assistant'
# DEFAULT_AGENT = 'weather_agent'
def get_default_agent() -> Optional[CompiledGraph]:
"""Get the default agent
Returns:
Optional[CompiledGraph]: The compiled graph for the default agent, or None if it could not be loaded
"""
return load_agent(DEFAULT_AGENT)
================================================
FILE: api/server.py
================================================
import uvicorn
from langgraph.types import Command, Interrupt
from fastapi import FastAPI, Request, HTTPException, Query
from fastapi.middleware.cors import CORSMiddleware
from sse_starlette.sse import EventSourceResponse
from typing import AsyncGenerator, Dict, Optional, Union, Any
from api.utils import message_chunk_event, interrupt_event, custom_event, checkpoint_event, format_state_snapshot, stream_update_event
import asyncio
import traceback
import json
from langchain_core.messages import HumanMessage
from langchain_core.runnables import RunnableConfig
# Import the agent loader
from api.agent.loader import load_agent, list_available_agents, get_default_agent
# Load the default agent
graph = get_default_agent()
# Track active connections
active_connections: Dict[str, asyncio.Event] = {}
app = FastAPI(
title="LangGraph API",
description="API for LangGraph interactions",
version="0.1.0"
)
# Configure CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # In production, replace with specific origins
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/agents")
async def list_agents():
"""Endpoint returning a list of available agents."""
return list_available_agents()
@app.get("/state")
async def state(thread_id: str | None = None, agent: Optional[str] = Query(None)):
"""Endpoint returning current graph state."""
if not thread_id:
raise HTTPException(status_code=400, detail="thread_id is required")
# Load the specified agent if provided
current_graph = load_agent(agent) if agent else graph
if not current_graph:
raise HTTPException(status_code=404, detail=f"Agent '{agent}' not found")
config: RunnableConfig = {"configurable": {"thread_id": thread_id}}
state = await current_graph.aget_state(config)
return format_state_snapshot(state)
@app.get("/history")
async def history(thread_id: str | None = None, agent: Optional[str] = Query(None)):
"""Endpoint returning complete state history. Used for restoring graph."""
if not thread_id:
raise HTTPException(status_code=400, detail="thread_id is required")
# Load the specified agent if provided
current_graph = load_agent(agent) if agent else graph
if not current_graph:
raise HTTPException(status_code=404, detail=f"Agent '{agent}' not found")
config: RunnableConfig = {"configurable": {"thread_id": thread_id}}
records = []
async for state in current_graph.aget_state_history(config):
records.append(format_state_snapshot(state))
return records
@app.post("/agent/stop")
async def stop_agent(request: Request):
"""Endpoint for stopping the running agent."""
body = await request.json()
thread_id = body.get("thread_id")
if not thread_id:
raise HTTPException(status_code=400, detail="thread_id is required")
if thread_id in active_connections:
active_connections[thread_id].set()
return {"status": "stopped", "thread_id": thread_id}
raise HTTPException(status_code=404, detail="Thread is not running")
@app.post("/agent")
async def agent(request: Request):
"""Endpoint for running the agent."""
body = await request.json()
request_type = body.get("type")
if not request_type:
raise HTTPException(status_code=400, detail="type is required")
thread_id = body.get("thread_id")
if not thread_id:
raise HTTPException(status_code=400, detail="thread_id is required")
# Get the agent name if provided
agent_name = body.get("agent")
# Load the specified agent if provided
current_graph = load_agent(agent_name) if agent_name else graph
if not current_graph:
raise HTTPException(status_code=404, detail=f"Agent '{agent_name or 'default'}' not found")
stop_event = asyncio.Event()
active_connections[thread_id] = stop_event
config: RunnableConfig = {"configurable": {"thread_id": thread_id}}
initial_graph_state: Dict[str, Any] = {}
input_for_astream: Optional[Union[Dict, Command]] = None # input for astream
# Get initial state or messages from frontend
initial_state_input = body.get("state", {"messages": []})
if not isinstance(initial_state_input, dict):
raise HTTPException(status_code=400, detail="state must be a dictionary")
if agent_name == "deep_research":
# --- Prepare state for DeepResearch Agent ---
print("Preparing state for DeepResearchAgent...")
# Extract topic from the first message in state['messages']
first_message_content = ""
try:
# Ensure initial_state_input['messages'] is a list and not empty
if isinstance(initial_state_input.get('messages'), list) and initial_state_input['messages']:
# Assume the first message's content is the topic
first_message_content = initial_state_input['messages'][0]['content']
else:
# Try to get topic from other fields in state (alternative)
first_message_content = initial_state_input.get('topic', '')
except Exception as e:
print(f"Warning: Could not extract topic from initial state input: {e}")
if not first_message_content or not isinstance(first_message_content, str):
raise HTTPException(status_code=400, detail="A valid 'topic' string is required for deep_research agent, expected in state.messages[0].content or state.topic")
# Build the ResearchState needed by DeepResearch Agent (at least topic and depth)
initial_graph_state = {
"topic": first_message_content,
"depth": initial_state_input.get("depth", "advanced"), # Optional: allow frontend to specify depth
"messages": [], # DeepResearch manages its own message history
"stream_updates": [], # Initialize stream_updates
# Initialize other ResearchState fields to None or default values
"plan": None, "research_plan": None, "search_results": [],
"gap_analysis": None, "final_synthesis": None,
"final_report_markdown": None,
}
print(f"Initial ResearchState: {{'topic': '{initial_graph_state['topic']}', 'depth': '{initial_graph_state['depth']}', ...}}")
# DeepResearch Agent's astream input is the complete initial state
if request_type == "run":
input_for_astream = initial_graph_state
elif request_type == "resume":
# DeepResearch Agent might not support or need different resume approach
print("Warning: 'resume' might not be fully supported for DeepResearchAgent yet.")
# Assume resume Command can be understood by the graph
input_for_astream = Command(resume=body.get("resume"))
config["configurable"]["checkpoint_id"] = body.get("resume") # Resume usually needs checkpoint ID
else: # Fork, Replay typically only need config
config_from_request = body.get("config")
if not config_from_request:
raise HTTPException(status_code=400, detail="config is required for fork/replay")
config = config_from_request # Use complete config provided in the request
input_for_astream = None
else: # For Supervisor or other Agents (assume using PlanningAgentState)
print("Preparing state for Supervisor/Other Agent...")
# --- Prepare PlanningAgentState ---
# Ensure messages list contains correct BaseMessage objects (or let BaseAgent preprocess)
initial_messages = initial_state_input.get("messages", [])
initial_graph_state = {
"messages": initial_messages,
"plan": None, # Planner node will create it
"error": None
# Add other fields needed by PlanningAgentState and set to None or default values
}
# --- Set astream input (logic similar to before) ---
if request_type == "run":
# For PlanningAgentState, initial input typically only contains messages
input_for_astream = {"messages": initial_messages}
elif request_type == "resume":
resume_val = body.get("resume")
if not resume_val:
raise HTTPException(status_code=400, detail="resume value is required")
input_for_astream = Command(resume=resume_val)
# Ensure config includes checkpoint_id for resuming
if "configurable" not in config:
config["configurable"] = {}
config["configurable"]["checkpoint_id"] = resume_val
elif request_type == "fork":
config_from_request = body.get("config")
if not config_from_request:
raise HTTPException(status_code=400, detail="config is required for fork")
config = config_from_request # Fork uses complete config provided
# Fork typically starts from specified checkpoint, no extra state dict input needed
input_for_astream = None
elif request_type == "replay":
config_from_request = body.get("config")
if not config_from_request:
raise HTTPException(status_code=400, detail="config is required for replay")
config = config_from_request
input_for_astream = None
else:
raise HTTPException(status_code=400, detail="invalid request type")
# Ensure config always has thread_id (important for all agents)
if "configurable" not in config:
config["configurable"] = {}
config["configurable"]["thread_id"] = thread_id
# --- State and Input preparation complete ---
async def generate_events() -> AsyncGenerator[dict, None]:
try:
# 设置recursion_limit为100,解决深度研究时的递归限制问题
if agent_name == "deep_research" and "recursion_limit" not in config:
config["recursion_limit"] = 100
async for chunk in current_graph.astream(
input_for_astream, # Use prepared input
config, # Use prepared config
stream_mode=["debug", "messages", "updates", "custom"],
):
if stop_event.is_set():
break
chunk_type, chunk_data = chunk
if chunk_type == "debug":
# type can be checkpoint, task, task_result
if isinstance(chunk_data, dict) and "type" in chunk_data:
debug_type = chunk_data["type"]
if debug_type == "checkpoint":
yield checkpoint_event(chunk_data)
elif debug_type == "task_result":
interrupts = chunk_data["payload"].get(
"interrupts", [])
if interrupts and len(interrupts) > 0:
yield interrupt_event(interrupts)
elif chunk_type == "messages":
# 确保chunk_data是一个包含至少两个元素的列表/元组,并且第二个元素是一个包含langgraph_node的字典
if isinstance(chunk_data, (list, tuple)) and len(chunk_data) > 1 and isinstance(chunk_data[1], dict) and "langgraph_node" in chunk_data[1]:
yield message_chunk_event(chunk_data[1]["langgraph_node"], chunk_data[0])
else:
print(f"Warning: Unexpected messages chunk_data format: {chunk_data}")
# 尝试使用安全的默认值
node_name = chunk_data[1].get("langgraph_node", "unknown") if isinstance(chunk_data, (list, tuple)) and len(chunk_data) > 1 and isinstance(chunk_data[1], dict) else "unknown"
message = chunk_data[0] if isinstance(chunk_data, (list, tuple)) and len(chunk_data) > 0 else None
if message is not None:
yield message_chunk_event(node_name, message)
elif chunk_type == "custom":
# Check if this is a StreamUpdate
if isinstance(chunk_data, dict) and all(k in chunk_data for k in ['id', 'type', 'status', 'title']):
yield stream_update_event(chunk_data)
else:
yield custom_event(chunk_data)
elif chunk_type == "updates":
# Handle state update events (e.g., real-time Plan updates)
pass # Currently ignore updates events, rely on checkpoint or custom
# --- Loop ended ---
yield {"event": "end", "data": "{}"} # Send an end event to frontend
except Exception as e:
print(f"Error during agent execution stream: {e}")
traceback.print_exc()
# Send error event to frontend
yield {"event": "error", "data": json.dumps({"message": f"Agent execution error: {e}"})}
finally:
if thread_id in active_connections:
del active_connections[thread_id]
return EventSourceResponse(generate_events())
def main():
uvicorn.run("api.server:app", host="0.0.0.0", port=8000, reload=True)
if __name__ == "__main__":
import sys
import os
# 将项目根目录添加到 Python 路径中
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
main()
================================================
FILE: api/utils.py
================================================
import json
from typing import Dict, Any, List, Optional
from langchain_core.messages import BaseMessage, AIMessage, HumanMessage, ToolMessage
from langgraph.types import StateSnapshot
def checkpoint_event(value):
"""Create a checkpoint event for the client."""
def format_values(values: dict):
formatted_values = values.copy()
if "messages" in formatted_values:
formatted_values["messages"] = [
{
"type": msg.get("type") if isinstance(msg, dict) else msg.type,
"content": msg.get("content") if isinstance(msg, dict) else msg.content,
"id": msg.get("id") if isinstance(msg, dict) else msg.id,
"tool_calls": msg.get("tool_calls") if isinstance(msg, dict) else (msg.tool_calls if hasattr(msg, 'tool_calls') else None)
}
for msg in formatted_values["messages"]
]
return formatted_values
def format_writes(writes: dict):
if writes is None:
return None
formatted_writes = {}
for key, value in writes.items():
if isinstance(value, dict):
formatted_writes[key] = format_values(value)
elif isinstance(value, list):
formatted_writes[key] = [format_values(item) if isinstance(
item, dict) else item for item in value]
else:
formatted_writes[key] = value
return formatted_writes
configurable = value["payload"]["config"]["configurable"]
data = {
"next": value["payload"]["next"],
"values": format_values(value["payload"]["values"]),
"config": {
"configurable": {
"checkpoint_id": configurable["checkpoint_id"],
"checkpoint_ns": configurable["checkpoint_ns"],
"thread_id": configurable["thread_id"]
}
},
"metadata": {
"source": value["payload"]["metadata"]["source"],
"step": value["payload"]["metadata"]["step"],
"writes": format_writes(value["payload"]["metadata"]["writes"]),
"parents": value["payload"]["metadata"]["parents"]
}
}
return {
"event": "checkpoint",
"data": json.dumps(data)
}
def message_chunk_event(node_name, message_chunk):
"""Create a message chunk event for the client."""
def format_messages(value):
"""Format message chunk into a serializable dictionary.
This is needed because the message class is not serializable.
"""
return {
"content": value.content,
"id": value.id,
"tool_calls": value.tool_calls if hasattr(value, 'tool_calls') else None,
"tool_call_chunks": value.tool_call_chunks if hasattr(value, 'tool_call_chunks') else None
}
return {
"event": "message_chunk",
"data": json.dumps({
"node_name": node_name,
"message_chunk": format_messages(message_chunk)
})
}
def interrupt_event(interrupts):
"""Create an interrupt event for the client."""
formatted_interrupts = [{"value": interrupt["value"]}
for interrupt in interrupts]
return {
"event": "interrupt",
"data": json.dumps(formatted_interrupts)
}
def custom_event(value):
"""Create a custom event for the client."""
return {
"event": "custom",
"data": json.dumps(value)
}
def format_state_snapshot(snapshot: StateSnapshot):
interrupts = []
for task in snapshot.tasks:
for interrupt in task.interrupts:
interrupts.append({"value": interrupt.value})
return {
"values": snapshot.values,
"next": snapshot.next,
"config": snapshot.config,
"interrupts": interrupts,
"parent_config": snapshot.parent_config,
"metadata": snapshot.metadata
}
def stream_update_event(data: dict):
"""为 DeepResearch Agent 的 StreamUpdateData 创建一个 stream_update 事件。
Args:
data: 从 add_stream_update 产生的、符合 StreamUpdateData 结构的字典。
Returns:
符合 SSE EventSourceResponse 格式的字典。
"""
if not isinstance(data, dict):
# 如果传入的不是字典,返回一个错误事件
return {
"event": "error",
"data": json.dumps({"message": "Internal server error: Invalid stream update data type."})
}
return {
"event": "stream_update",
"data": json.dumps(data, default=str)
}
================================================
FILE: core/__init__.py
================================================
# Core module initialization
================================================
FILE: core/a2a/README.md
================================================
# Mentis A2A (Agent2Agent) 协议集成
本目录 (`core/a2a/`) 包含用于实现 Agent2Agent (A2A) 协议的客户端和服务器实现,使 Mentis Agents 能够与其他支持 A2A 协议的代理系统进行通信和协作。
## 背景
A2A 是由 Google 发起的开放标准,旨在使不同框架(如 LangGraph、CrewAI、Google ADK、Genkit)或不同供应商构建的 AI 代理能够发现彼此的能力,协商交互模式(文本、文件、数据等),并在任务上进行协作。
## 核心组件
### 1. A2A 客户端 (`A2AClient`)
`A2AClient` 类(位于 `client/client.py`)提供了与支持 A2A 协议的服务器进行交互的功能:
* **代理发现:** 支持通过 `.well-known/agent.json` 端点自动发现代理能力(Agent Card)。
* **任务管理:** 提供发送、获取和取消任务的方法。
* **推送通知:** 支持设置和获取任务的推送通知配置。
* **流式响应:** 支持通过流式API接收任务执行的实时更新。
* **异步架构:** 基于 `asyncio` 和 `httpx` 构建,适合异步应用。
### 2. A2A 服务器 (`A2AServer`)
`A2AServer` 类(位于 `server/server.py`)允许将现有的 Mentis Agent 暴露为支持 A2A 协议的服务:
* **基于 Starlette:** 使用 Starlette 框架提供 HTTP 和 SSE 端点。
* **任务处理:** 支持任务的创建、执行和状态跟踪。
* **流式更新:** 通过 Server-Sent Events (SSE) 提供任务执行的实时更新。
* **Agent Card:** 通过 `.well-known/agent.json` 端点公开代理能力。
### 3. 辅助工具
#### 推送通知认证 (`PushNotificationAuth`)
`PushNotificationAuth` 类(位于 `utils/push_notification_auth.py`)提供了安全的推送通知机制:
* **发送方认证 (`PushNotificationSenderAuth`):**
- 生成和管理 JWT 密钥对
- 验证推送通知 URL
- 签名并发送推送通知
- 提供 JWKS 端点供接收方获取公钥
* **接收方认证 (`PushNotificationReceiverAuth`):**
- 从 JWKS URL 加载公钥
- 验证接收到的推送通知的完整性和时效性
- 防止重放攻击
#### 内存缓存 (`InMemoryCache`)
`InMemoryCache` 类(位于 `utils/in_memory_cache.py`)提供了线程安全的内存缓存实现:
* **单例模式:** 确保应用中只有一个缓存实例
* **TTL 支持:** 支持设置缓存项的过期时间
* **线程安全:** 使用锁机制确保并发安全
## 数据类型
A2A 协议定义了几个关键数据类型(位于 `types.py`):
* **AgentCard:** 描述代理的元数据,包括名称、描述、URL、能力和技能。
* **Task:** 表示代理执行的任务,包含状态、内容和产物。
* **Part:** 内容的一部分,可以是文本、文件或数据。
* **Artifact:** 代理产生的产物,如结果、生成的文件等。
* **TaskState:** 任务状态枚举(已提交、进行中、需要输入、已完成、已取消、失败)。
* **PushNotificationConfig:** 推送通知配置,包含回调URL和认证信息。
## 如何使用
### 1. 创建和使用 A2A 客户端
```python
import asyncio
from common.types import AgentCard
from core.a2a.client.client import A2AClient
async def main():
# 方式1:直接指定URL创建客户端
async with A2AClient(url="http://localhost:8000/a2a") as client:
# 发送任务
response = await client.send_task({"text": "请帮我研究人工智能"})
task_id = response["result"]["taskId"]
# 获取任务结果
task_response = await client.get_task({"id": task_id})
# 设置推送通知
await client.set_task_callback({
"taskId": task_id,
"callbackUrl": "https://your-callback-url.com/webhook"
})
# 方式2:通过Agent Card创建客户端
agent_card = AgentCard(name="Example Agent", url="http://localhost:8000/a2a")
async with A2AClient(agent_card=agent_card) as client:
# 使用流式API接收实时更新
async for update in client.send_task_streaming({"text": "分析最新的AI趋势"}):
print(update)
# 运行
asyncio.run(main())
```
### 2. 创建 A2A 服务器
```python
from core.a2a.server.server import A2AServer
from core.a2a.server.task_manager import InMemoryTaskManager
from common.types import AgentCard
# 创建Agent卡片
agent_card = AgentCard(
name="My Agent",
description="一个示例代理",
url="http://localhost:5000"
)
# 创建任务管理器
task_manager = InMemoryTaskManager()
# 创建服务器
server = A2AServer(
host="0.0.0.0",
port=5000,
endpoint="/",
agent_card=agent_card,
task_manager=task_manager
)
# 启动服务器
server.start()
```
### 3. 配置推送通知
#### 发送方配置
```python
from core.a2a.utils.push_notification_auth import PushNotificationSenderAuth
# 创建发送方认证
sender_auth = PushNotificationSenderAuth()
# 生成密钥对
sender_auth.generate_jwk()
# 添加JWKS端点到你的服务器
app.add_route("/.well-known/jwks.json", sender_auth.handle_jwks_endpoint)
# 验证接收方URL
is_valid = await sender_auth.verify_push_notification_url("https://receiver-url.com/webhook")
# 发送推送通知
if is_valid:
await sender_auth.send_push_notification(
"https://receiver-url.com/webhook",
{"event": "task_completed", "taskId": "123"}
)
```
#### 接收方配置
```python
from core.a2a.utils.push_notification_auth import PushNotificationReceiverAuth
from starlette.requests import Request
# 创建接收方认证
receiver_auth = PushNotificationReceiverAuth()
# 加载发送方的公钥
await receiver_auth.load_jwks("https://sender-url.com/.well-known/jwks.json")
# 在webhook处理函数中验证推送通知
async def webhook_handler(request: Request):
is_valid = await receiver_auth.verify_push_notification(request)
if is_valid:
# 处理推送通知...
data = await request.json()
print(f"收到有效的推送通知: {data}")
```
### 4. 使用内存缓存
```python
from core.a2a.utils.in_memory_cache import InMemoryCache
# 获取缓存实例
cache = InMemoryCache()
# 设置缓存项(带TTL)
cache.set("api_result", {"data": "some_value"}, ttl=300) # 5分钟过期
# 获取缓存项
result = cache.get("api_result")
if result:
print(f"从缓存获取结果: {result}")
else:
print("缓存已过期或不存在")
# 删除缓存项
cache.delete("api_result")
# 清空所有缓存
cache.clear()
```
## 完整示例
查看 `examples/16_a2a_integration_test.py` 获取完整的集成示例,包括:
1. 创建 A2A 服务器,将现有 Agent 暴露为 A2A 服务
2. 使用 A2A 客户端连接到 A2A 服务器
3. 创建一个 Agent,使用 A2A 客户端作为工具
运行示例:
```bash
# 启动 A2A 服务器
python -m examples.16_a2a_integration_test server
# 运行 A2A 客户端
python -m examples.16_a2a_integration_test client
# 运行带有 A2A 工具的 Agent
python -m examples.16_a2a_integration_test agent
```
## 与 MCP 的关系
Mentis 同时支持 MCP(Model Context Protocol)和 A2A(Agent2Agent)协议:
* **MCP:** 专注于 AI 模型与外部工具/服务的交互,主要用于扩展单个 Agent 的能力。
* **A2A:** 专注于不同 Agent 之间的通信和协作,使多个 Agent 能够协同工作。
这两个协议是互补的,可以同时使用以构建功能强大的 Agent 系统。
================================================
FILE: core/a2a/__init__.py
================================================
================================================
FILE: core/a2a/agent_task_manager.py
================================================
import asyncio
import logging
import traceback
from typing import Dict, Any, Union, AsyncIterable, Optional
from core.a2a.types import (
TaskState, TaskStatus, Task, Artifact, Message, TextPart,
SendTaskRequest, SendTaskResponse, GetTaskRequest, GetTaskResponse,
CancelTaskRequest, CancelTaskResponse, SendTaskStreamingRequest, SendTaskStreamingResponse,
SetTaskPushNotificationRequest, SetTaskPushNotificationResponse,
GetTaskPushNotificationRequest, GetTaskPushNotificationResponse,
TaskResubscriptionRequest, TaskSendParams, JSONRPCResponse, InvalidParamsError,
TaskNotFoundError, TaskNotCancelableError, PushNotificationNotSupportedError,
TaskArtifactUpdateEvent, TaskStatusUpdateEvent, InternalError, TaskIdParams,
PushNotificationConfig
)
from core.a2a.server.task_manager import TaskManager, InMemoryTaskManager
from core.a2a.server import utils
logger = logging.getLogger(__name__)
class AgentTaskManager(InMemoryTaskManager):
"""
AgentTaskManager是连接LangGraph Agent与A2A协议的关键组件。
它负责管理任务生命周期、处理流式响应、更新任务状态以及发送推送通知。
"""
def __init__(self, agent, notification_sender_auth=None):
"""
初始化AgentTaskManager
Args:
agent: LangGraph Agent实例
notification_sender_auth: 推送通知认证(可选)
"""
super().__init__()
self.agent = agent
self.notification_sender_auth = notification_sender_auth
async def _run_streaming_agent(self, request: SendTaskStreamingRequest):
"""
运行流式Agent并处理响应
Args:
request: 流式任务请求
"""
task_send_params: TaskSendParams = request.params
query = self._get_user_query(task_send_params)
try:
async for item in self.agent.stream(query, task_send_params.sessionId):
is_task_complete = item["is_task_complete"]
require_user_input = item["require_user_input"]
artifact = None
message = None
parts = [{"type": "text", "text": item["content"]}]
end_stream = False
if not is_task_complete and not require_user_input:
task_state = TaskState.WORKING
message = Message(role="agent", parts=parts)
elif require_user_input:
task_state = TaskState.INPUT_REQUIRED
message = Message(role="agent", parts=parts)
end_stream = True
else:
task_state = TaskState.COMPLETED
artifact = Artifact(parts=parts, index=0, append=False)
end_stream = True
task_status = TaskStatus(state=task_state, message=message)
latest_task = await self.update_store(
task_send_params.id,
task_status,
None if artifact is None else [artifact],
)
await self.send_task_notification(latest_task)
if artifact:
task_artifact_update_event = TaskArtifactUpdateEvent(
id=task_send_params.id, artifact=artifact
)
await self.enqueue_events_for_sse(
task_send_params.id, task_artifact_update_event
)
task_update_event = TaskStatusUpdateEvent(
id=task_send_params.id, status=task_status, final=end_stream
)
await self.enqueue_events_for_sse(
task_send_params.id, task_update_event
)
except Exception as e:
logger.error(f"An error occurred while streaming the response: {e}")
await self.enqueue_events_for_sse(
task_send_params.id,
InternalError(message=f"An error occurred while streaming the response: {e}")
)
def _get_user_query(self, task_send_params: TaskSendParams) -> str:
"""
从任务参数中提取用户查询 (采用 Google Demo 的严格方法)
Args:
task_send_params: 任务发送参数
Returns:
str: 用户查询文本
"""
if not task_send_params.message or not task_send_params.message.parts:
logger.warning(f"[_get_user_query] Message or parts are empty for task {task_send_params.id}")
return "" # 或者可以抛出错误,取决于你的设计
# 直接获取第一个 part
part = task_send_params.message.parts[0]
logger.debug(f"[_get_user_query] First part: type={type(part)}, value={part!r}") # 保留调试日志
# 严格检查第一个 part 是否为 TextPart 实例
if not isinstance(part, TextPart):
logger.error(f"[_get_user_query] First part is not a TextPart instance! Type: {type(part)}")
# 直接抛出错误,这会中断流程并提供明确信息
raise ValueError(f"Expected first message part to be TextPart, but got {type(part)}")
# 如果检查通过,直接返回文本
logger.debug(f"[_get_user_query] Extracted query from TextPart: '{part.text}'")
return part.text
def _validate_request(
self, request: Union[SendTaskRequest, SendTaskStreamingRequest]
) -> JSONRPCResponse | None:
"""
验证请求参数
Args:
request: 任务请求
Returns:
JSONRPCResponse | None: 错误响应或None
"""
task_send_params: TaskSendParams = request.params
if not utils.are_modalities_compatible(
task_send_params.acceptedOutputModes, self.agent.SUPPORTED_CONTENT_TYPES
):
logger.warning(
"Unsupported output mode. Received %s, Support %s",
task_send_params.acceptedOutputModes,
self.agent.SUPPORTED_CONTENT_TYPES,
)
return utils.new_incompatible_types_error(request.id)
if task_send_params.pushNotification and not task_send_params.pushNotification.url:
logger.warning("Push notification URL is missing")
return JSONRPCResponse(id=request.id, error=InvalidParamsError(message="Push notification URL is missing"))
return None
async def on_send_task(self, request: SendTaskRequest) -> SendTaskResponse:
"""
处理发送任务请求
Args:
request: 任务请求
Returns:
SendTaskResponse: 任务响应
"""
validation_error = self._validate_request(request)
if validation_error:
return SendTaskResponse(id=request.id, error=validation_error.error)
if request.params.pushNotification:
if not await self.set_push_notification_info(request.params.id, request.params.pushNotification):
return SendTaskResponse(id=request.id, error=InvalidParamsError(message="Push notification URL is invalid"))
await self.upsert_task(request.params)
task = await self.update_store(
request.params.id, TaskStatus(state=TaskState.WORKING), None
)
await self.send_task_notification(task)
task_send_params: TaskSendParams = request.params
query = self._get_user_query(task_send_params)
try:
agent_response = self.agent.invoke(query, task_send_params.sessionId)
# 处理Agent响应并更新任务状态
parts = [{"type": "text", "text": agent_response}]
artifact = Artifact(parts=parts, index=0, append=False)
task = await self.update_store(
task_send_params.id,
TaskStatus(state=TaskState.COMPLETED),
[artifact]
)
await self.send_task_notification(task)
return SendTaskResponse(id=request.id, result=task)
except Exception as e:
# 建议也稍微改进一下异常处理日志和返回信息
logger.error(f"Error during agent invocation or task processing: {e}", exc_info=True)
# 记录失败状态
try:
# 确保即使在异常处理中也能更新状态
task_failed : Task = await self.update_store(
task_send_params.id,
TaskStatus(state=TaskState.FAILED, error={"message": str(e)}),
None
)
await self.send_task_notification(task_failed)
except Exception as update_err:
# 如果更新状态也失败,记录下来
logger.error(f"Failed to update task status to FAILED after initial error: {update_err}", exc_info=True)
# 返回更合适的错误类型和消息
# return SendTaskResponse(id=request.id, error=InvalidParamsError(message=f"Error processing task: {e}"))
# InternalError 可能更合适,因为错误发生在服务器内部处理中
return SendTaskResponse(id=request.id, error=InternalError(message=f"Error processing task: {str(e) or type(e).__name__}"))
async def on_send_task_subscribe(
self, request: SendTaskStreamingRequest
) -> AsyncIterable[SendTaskStreamingResponse] | JSONRPCResponse:
"""
处理流式任务请求
Args:
request: 流式任务请求
Returns:
AsyncIterable[SendTaskStreamingResponse] | JSONRPCResponse: 流式响应或错误
"""
try:
error = self._validate_request(request)
if error:
return error
await self.upsert_task(request.params)
if request.params.pushNotification:
if not await self.set_push_notification_info(request.params.id, request.params.pushNotification):
return JSONRPCResponse(id=request.id, error=InvalidParamsError(message="Push notification URL is invalid"))
task_send_params: TaskSendParams = request.params
sse_event_queue = await self.setup_sse_consumer(task_send_params.id, False)
asyncio.create_task(self._run_streaming_agent(request))
return self.dequeue_events_for_sse(
request.id, task_send_params.id, sse_event_queue
)
except Exception as e:
logger.error(f"Error in SSE stream: {e}")
print(traceback.format_exc())
return JSONRPCResponse(
id=request.id,
error=InternalError(
message="An error occurred while streaming the response"
),
)
async def _process_agent_response(
self, request: SendTaskRequest, agent_response: dict
) -> SendTaskResponse:
"""Processes the agent's response and updates the task store."""
task_send_params: TaskSendParams = request.params
task_id = task_send_params.id
history_length = task_send_params.historyLength
task_status = None
parts = [{"type": "text", "text": agent_response["content"]}]
artifact = None
if agent_response["require_user_input"]:
task_status = TaskStatus(
state=TaskState.INPUT_REQUIRED,
message=Message(role="agent", parts=parts),
)
else:
task_status = TaskStatus(state=TaskState.COMPLETED)
artifact = Artifact(parts=parts)
task = await self.update_store(
task_id, task_status, None if artifact is None else [artifact]
)
task_result = self.append_task_history(task, history_length)
await self.send_task_notification(task)
return SendTaskResponse(id=request.id, result=task_result)
async def on_resubscribe_to_task(
self, request: TaskResubscriptionRequest
) -> AsyncIterable[SendTaskStreamingResponse] | JSONRPCResponse:
task_id_params: TaskIdParams = request.params
try:
sse_event_queue = await self.setup_sse_consumer(task_id_params.id, True)
return self.dequeue_events_for_sse(request.id, task_id_params.id, sse_event_queue)
except Exception as e:
logger.error(f"Error while reconnecting to SSE stream: {e}")
return JSONRPCResponse(
id=request.id,
error=InternalError(
message=f"An error occurred while reconnecting to stream: {e}"
),
)
async def send_task_notification(self, task: Task):
if not await self.has_push_notification_info(task.id):
logger.info(f"No push notification info found for task {task.id}")
return
push_info = await self.get_push_notification_info(task.id)
logger.info(f"Notifying for task {task.id} => {task.status.state}")
await self.notification_sender_auth.send_push_notification(
push_info.url,
data=task.model_dump(exclude_none=True)
)
async def set_push_notification_info(self, task_id: str, push_notification_config: PushNotificationConfig):
# Verify the ownership of notification URL by issuing a challenge request.
if self.notification_sender_auth:
is_verified = await self.notification_sender_auth.verify_push_notification_url(push_notification_config.url)
if not is_verified:
return False
await super().set_push_notification_info(task_id, push_notification_config)
return True
================================================
FILE: core/a2a/client/__init__.py
================================================
================================================
FILE: core/a2a/client/card_resolver.py
================================================
import httpx
from core.a2a.types import (
AgentCard,
A2AClientJSONError,
)
import json
class A2ACardResolver:
def __init__(self, base_url, agent_card_path="/.well-known/agent.json"):
self.base_url = base_url.rstrip("/")
self.agent_card_path = agent_card_path.lstrip("/")
def get_agent_card(self) -> AgentCard:
with httpx.Client() as client:
response = client.get(self.base_url + "/" + self.agent_card_path)
response.raise_for_status()
try:
return AgentCard(**response.json())
except json.JSONDecodeError as e:
raise A2AClientJSONError(str(e)) from e
================================================
FILE: core/a2a/client/client.py
================================================
import httpx
from httpx_sse import connect_sse
from typing import Any, AsyncIterable
from core.a2a.types import (
AgentCard,
GetTaskRequest,
SendTaskRequest,
SendTaskResponse,
JSONRPCRequest,
GetTaskResponse,
CancelTaskResponse,
CancelTaskRequest,
SetTaskPushNotificationRequest,
SetTaskPushNotificationResponse,
GetTaskPushNotificationRequest,
GetTaskPushNotificationResponse,
A2AClientHTTPError,
A2AClientJSONError,
SendTaskStreamingRequest,
SendTaskStreamingResponse,
)
import json
class A2AClient:
def __init__(self, agent_card: AgentCard = None, url: str = None):
if agent_card:
self.url = agent_card.url
elif url:
self.url = url
else:
raise ValueError("Must provide either agent_card or url")
async def send_task(self, payload: dict[str, Any]) -> SendTaskResponse:
request = SendTaskRequest(params=payload)
return SendTaskResponse(**await self._send_request(request))
async def send_task_streaming(
self, payload: dict[str, Any]
) -> AsyncIterable[SendTaskStreamingResponse]:
request = SendTaskStreamingRequest(params=payload)
with httpx.Client(timeout=None) as client:
with connect_sse(
client, "POST", self.url, json=request.model_dump()
) as event_source:
try:
for sse in event_source.iter_sse():
yield SendTaskStreamingResponse(**json.loads(sse.data))
except json.JSONDecodeError as e:
raise A2AClientJSONError(str(e)) from e
except httpx.RequestError as e:
raise A2AClientHTTPError(400, str(e)) from e
async def _send_request(self, request: JSONRPCRequest) -> dict[str, Any]:
async with httpx.AsyncClient() as client:
try:
# Image generation could take time, adding timeout
response = await client.post(
self.url, json=request.model_dump(), timeout=30
)
response.raise_for_status()
return response.json()
except httpx.HTTPStatusError as e:
raise A2AClientHTTPError(e.response.status_code, str(e)) from e
except json.JSONDecodeError as e:
raise A2AClientJSONError(str(e)) from e
async def get_task(self, payload: dict[str, Any]) -> GetTaskResponse:
request = GetTaskRequest(params=payload)
return GetTaskResponse(**await self._send_request(request))
async def cancel_task(self, payload: dict[str, Any]) -> CancelTaskResponse:
request = CancelTaskRequest(params=payload)
return CancelTaskResponse(**await self._send_request(request))
async def set_task_callback(
self, payload: dict[str, Any]
) -> SetTaskPushNotificationResponse:
request = SetTaskPushNotificationRequest(params=payload)
return SetTaskPushNotificationResponse(**await self._send_request(request))
async def get_task_callback(
self, payload: dict[str, Any]
) -> GetTaskPushNotificationResponse:
request = GetTaskPushNotificationRequest(params=payload)
return GetTaskPushNotificationResponse(**await self._send_request(request))
================================================
FILE: core/a2a/config.json
================================================
{
"local_agent": {
"url": "http://127.0.0.1:8000/",
"auth": {
"type": "none"
}
}
}
================================================
FILE: core/a2a/server/__init__.py
================================================
================================================
FILE: core/a2a/server/server.py
================================================
# core/a2a/server/server.py
from starlette.applications import Starlette
from starlette.responses import JSONResponse
from sse_starlette.sse import EventSourceResponse
from starlette.requests import Request
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
# --- 添加 Pydantic 的 ValidationError 导入 ---
from pydantic import ValidationError
# --- 导入结束 ---
from core.a2a.types import (
A2ARequest,
JSONRPCResponse,
InvalidRequestError,
JSONParseError,
GetTaskRequest,
CancelTaskRequest,
SendTaskRequest,
SetTaskPushNotificationRequest,
GetTaskPushNotificationRequest,
InternalError,
AgentCard,
TaskResubscriptionRequest,
SendTaskStreamingRequest,
MethodNotFoundError,
# 确保 ValidationError 没有在这里导入
)
import json
from typing import AsyncIterable, Any, Optional, Union
from core.a2a.server.task_manager import TaskManager
import logging
logger = logging.getLogger(__name__)
class A2AServer:
def __init__(
self,
host="0.0.0.0",
port=5000,
endpoint="/",
agent_card: AgentCard = None,
task_manager: TaskManager = None,
allowed_origins: Optional[list[str]] = None,
):
self.host = host
self.port = port
self.endpoint = endpoint
self.task_manager = task_manager
self.agent_card = agent_card
if allowed_origins is None:
# 本地开发时默认只允许 localhost:3000
allowed_origins = ["http://localhost:3000"]
logger.warning("CORS allow_origins set to 'http://localhost:3000' for local development.")
else:
logger.info(f"CORS allow_origins configured: {allowed_origins}")
middleware = [
Middleware(
CORSMiddleware,
allow_origins=allowed_origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
]
self.app = Starlette(middleware=middleware, debug=True)
self.app.add_route(self.endpoint, self._process_request, methods=["POST"])
self.app.add_route(
"/.well-known/agent.json", self._get_agent_card, methods=["GET"]
)
logger.info(f"A2AServer initialized. Endpoint: {self.endpoint}, Agent Card Endpoint: /.well-known/agent.json")
def start(self):
if self.agent_card is None: raise ValueError("agent_card must be provided to A2AServer")
if self.task_manager is None: raise ValueError("task_manager must be provided to A2AServer")
import uvicorn
logger.info(f"Starting Uvicorn server on {self.host}:{self.port}...")
uvicorn.run(self.app, host=self.host, port=self.port)
def _get_agent_card(self, request: Request) -> JSONResponse:
logger.debug("Received request for /.well-known/agent.json")
if not self.agent_card:
logger.error("Agent card requested but not configured in A2AServer.")
return JSONResponse({"error": "Agent card not configured"}, status_code=500)
return JSONResponse(self.agent_card.model_dump(exclude_none=True))
async def _process_request(self, request: Request) -> Union[JSONResponse, EventSourceResponse]:
result = None; json_rpc_request = None; request_id_for_error = None
try:
try: body = await request.json(); logger.debug(f"Received request body: {body}")
except json.JSONDecodeError as e: logger.error(f"JSON decoding failed: {e}"); raise JSONParseError()
try:
json_rpc_request = A2ARequest.validate_python(body); request_id_for_error = getattr(json_rpc_request, 'id', None)
logger.info(f"Processing valid A2A request: Method='{json_rpc_request.method}', ID='{request_id_for_error}', TaskID='{getattr(json_rpc_request.params, 'id', 'N/A')}'")
except ValidationError as e:
logger.error(f"A2A request validation failed: {e}"); req_id_fallback = body.get('id') if isinstance(body, dict) else None
# 注意: 这里抛出的 InvalidRequestError 会在下面的 except Exception 中被捕获
raise InvalidRequestError(data=json.loads(e.json())) from e
# 分发给 TaskManager
if isinstance(json_rpc_request, GetTaskRequest): result = await self.task_manager.on_get_task(json_rpc_request)
elif isinstance(json_rpc_request, SendTaskRequest): result = await self.task_manager.on_send_task(json_rpc_request)
elif isinstance(json_rpc_request, SendTaskStreamingRequest): result = await self.task_manager.on_send_task_subscribe(json_rpc_request)
elif isinstance(json_rpc_request, CancelTaskRequest): result = await self.task_manager.on_cancel_task(json_rpc_request)
elif isinstance(json_rpc_request, SetTaskPushNotificationRequest): result = await self.task_manager.on_set_task_push_notification(json_rpc_request)
elif isinstance(json_rpc_request, GetTaskPushNotificationRequest): result = await self.task_manager.on_get_task_push_notification(json_rpc_request)
elif isinstance(json_rpc_request, TaskResubscriptionRequest): result = await self.task_manager.on_resubscribe_to_task(json_rpc_request)
else: logger.warning(f"Unhandled validated request type: {type(json_rpc_request)}"); raise MethodNotFoundError(data={"method": getattr(json_rpc_request, 'method', 'unknown')})
logger.debug(f"[A2AServer] Result from TaskManager method '{json_rpc_request.method}': type={type(result)}")
return self._create_response(result) # 调用 _create_response
except Exception as e:
# 统一处理所有在请求处理(包括验证和 task manager 调用)中发生的异常
logger.error(f"Exception during request processing: {e}", exc_info=True)
return self._handle_exception(e, request_id=request_id_for_error) # 使用 _handle_exception
def _handle_exception(self, e: Exception, request_id: Optional[Union[str, int]] = None) -> JSONResponse:
status_code = 500; json_rpc_error: Optional[JSONRPCError] = None
if isinstance(e, JSONParseError): json_rpc_error = e; status_code = 400
elif isinstance(e, InvalidRequestError): json_rpc_error = e; status_code = 400
elif isinstance(e, MethodNotFoundError): json_rpc_error = e; status_code = 404 # 或 501
# --- 现在可以正确捕获 Pydantic 的 ValidationError ---
elif isinstance(e, ValidationError):
logger.warning(f"Pydantic Validation error caught in handler: {e}")
error_data = str(e);
try: error_data = json.loads(e.json())
except: pass
# 通常 Pydantic 验证错误发生在请求处理阶段是 InvalidRequestError 的一种
# 如果发生在响应创建阶段则更像是 InternalError
json_rpc_error = InvalidRequestError(message="Request/Response data validation failed", data=error_data)
status_code = 400 # 认为是客户端请求或服务器返回的数据结构问题
# --- 捕获结束 ---
elif isinstance(e, ValueError) and "Unexpected result type" in str(e):
logger.error(f"Internal error due to unexpected result type: {e}", exc_info=False)
json_rpc_error = InternalError(message="Server error: Unexpected result type from handler.")
status_code = 500
elif isinstance(e, NotImplementedError):
logger.error(f"Method not implemented: {e}", exc_info=True)
json_rpc_error = MethodNotFoundError(message=f"Method not implemented: {e}")
status_code = 501
else:
logger.error(f"Unhandled internal exception: {e}", exc_info=True)
json_rpc_error = InternalError(message=f"An internal server error occurred: {type(e).__name__}")
status_code = 500
response = JSONRPCResponse(id=request_id, error=json_rpc_error)
logger.debug(f"Returning error response: {response.model_dump(exclude_none=True)}")
return JSONResponse(response.model_dump(exclude_none=True), status_code=status_code)
def _create_response(self, result: Any) -> Union[JSONResponse, EventSourceResponse]:
if isinstance(result, AsyncIterable):
logger.debug("[A2AServer] Creating EventSourceResponse (text/event-stream)")
async def event_generator(stream_result: AsyncIterable) -> AsyncIterable[dict[str, str]]:
try:
async for item in stream_result:
if hasattr(item, 'model_dump_json'):
json_data = item.model_dump_json(exclude_none=True)
logger.debug(f"A2AServer yielding SSE data: {json_data}")
yield {"data": json_data}
else:
logger.warning(f"Yielding non-Pydantic object in event stream: {type(item)}")
yield {"data": json.dumps(str(item))}
except Exception as gen_err:
logger.error(f"Error during SSE event generation: {gen_err}", exc_info=True)
try:
# 尝试 yield 一个标准的 JSON-RPC 错误事件
error_payload = JSONRPCResponse(id=None, error=InternalError(message=f"Streaming generation error: {gen_err}"))
yield {"event": "error", "data": error_payload.model_dump_json(exclude_none=True)}
except Exception as yield_err:
logger.error(f"Failed to yield error event to SSE stream: {yield_err}", exc_info=True)
return EventSourceResponse(event_generator(result))
elif isinstance(result, JSONRPCResponse):
logger.debug("[A2AServer] Creating JSONResponse (application/json)")
return JSONResponse(result.model_dump(exclude_none=True))
else:
logger.error(f"Unexpected result type received by _create_response: {type(result)}")
raise ValueError(f"Unexpected result type: {type(result)}")
================================================
FILE: core/a2a/server/task_manager.py
================================================
from abc import ABC, abstractmethod
from typing import Union, AsyncIterable, List
from core.a2a.types import Task
from core.a2a.types import (
JSONRPCResponse,
TaskIdParams,
TaskQueryParams,
GetTaskRequest,
TaskNotFoundError,
SendTaskRequest,
CancelTaskRequest,
TaskNotCancelableError,
SetTaskPushNotificationRequest,
GetTaskPushNotificationRequest,
GetTaskResponse,
CancelTaskResponse,
SendTaskResponse,
SetTaskPushNotificationResponse,
GetTaskPushNotificationResponse,
PushNotificationNotSupportedError,
TaskSendParams,
TaskStatus,
TaskState,
TaskResubscriptionRequest,
SendTaskStreamingRequest,
SendTaskStreamingResponse,
Artifact,
PushNotificationConfig,
TaskStatusUpdateEvent,
JSONRPCError,
TaskPushNotificationConfig,
InternalError,
)
from core.a2a.server.utils import new_not_implemented_error
import asyncio
import logging
logger = logging.getLogger(__name__)
class TaskManager(ABC):
@abstractmethod
async def on_get_task(self, request: GetTaskRequest) -> GetTaskResponse:
pass
@abstractmethod
async def on_cancel_task(self, request: CancelTaskRequest) -> CancelTaskResponse:
pass
@abstractmethod
async def on_send_task(self, request: SendTaskRequest) -> SendTaskResponse:
pass
@abstractmethod
async def on_send_task_subscribe(
self, request: SendTaskStreamingRequest
) -> Union[AsyncIterable[SendTaskStreamingResponse], JSONRPCResponse]:
pass
@abstractmethod
async def on_set_task_push_notification(
self, request: SetTaskPushNotificationRequest
) -> SetTaskPushNotificationResponse:
pass
@abstractmethod
async def on_get_task_push_notification(
self, request: GetTaskPushNotificationRequest
) -> GetTaskPushNotificationResponse:
pass
@abstractmethod
async def on_resubscribe_to_task(
self, request: TaskResubscriptionRequest
) -> Union[AsyncIterable[SendTaskResponse], JSONRPCResponse]:
pass
class InMemoryTaskManager(TaskManager):
def __init__(self):
self.tasks: dict[str, Task] = {}
self.push_notification_infos: dict[str, PushNotificationConfig] = {}
self.lock = asyncio.Lock()
self.task_sse_subscribers: dict[str, List[asyncio.Queue]] = {}
self.subscriber_lock = asyncio.Lock()
async def on_get_task(self, request: GetTaskRequest) -> GetTaskResponse:
logger.info(f"Getting task {request.params.id}")
task_query_params: TaskQueryParams = request.params
async with self.lock:
task = self.tasks.get(task_query_params.id)
if task is None:
return GetTaskResponse(id=request.id, error=TaskNotFoundError())
task_result = self.append_task_history(
task, task_query_params.historyLength
)
return GetTaskResponse(id=request.id, result=task_result)
async def on_cancel_task(self, request: CancelTaskRequest) -> CancelTaskResponse:
logger.info(f"Cancelling task {request.params.id}")
task_id_params: TaskIdParams = request.params
async with self.lock:
task = self.tasks.get(task_id_params.id)
if task is None:
return CancelTaskResponse(id=request.id, error=TaskNotFoundError())
return CancelTaskResponse(id=request.id, error=TaskNotCancelableError())
@abstractmethod
async def on_send_task(self, request: SendTaskRequest) -> SendTaskResponse:
pass
@abstractmethod
async def on_send_task_subscribe(
self, request: SendTaskStreamingRequest
) -> Union[AsyncIterable[SendTaskStreamingResponse], JSONRPCResponse]:
pass
async def set_push_notification_info(self, task_id: str, notification_config: PushNotificationConfig):
async with self.lock:
task = self.tasks.get(task_id)
if task is None:
raise ValueError(f"Task not found for {task_id}")
self.push_notification_infos[task_id] = notification_config
return
async def get_push_notification_info(self, task_id: str) -> PushNotificationConfig:
async with self.lock:
task = self.tasks.get(task_id)
if task is None:
raise ValueError(f"Task not found for {task_id}")
return self.push_notification_infos[task_id]
return
async def has_push_notification_info(self, task_id: str) -> bool:
async with self.lock:
return task_id in self.push_notification_infos
async def on_set_task_push_notification(
self, request: SetTaskPushNotificationRequest
) -> SetTaskPushNotificationResponse:
logger.info(f"Setting task push notification {request.params.id}")
task_notification_params: TaskPushNotificationConfig = request.params
try:
await self.set_push_notification_info(task_notification_params.id, task_notification_params.pushNotificationConfig)
except Exception as e:
logger.error(f"Error while setting push notification info: {e}")
return JSONRPCResponse(
id=request.id,
error=InternalError(
message="An error occurred while setting push notification info"
),
)
return SetTaskPushNotificationResponse(id=request.id, result=task_notification_params)
async def on_get_task_push_notification(
self, request: GetTaskPushNotificationRequest
) -> GetTaskPushNotificationResponse:
logger.info(f"Getting task push notification {request.params.id}")
task_params: TaskIdParams = request.params
try:
notification_info = await self.get_push_notification_info(task_params.id)
except Exception as e:
logger.error(f"Error while getting push notification info: {e}")
return GetTaskPushNotificationResponse(
id=request.id,
error=InternalError(
message="An error occurred while getting push notification info"
),
)
return GetTaskPushNotificationResponse(id=request.id, result=TaskPushNotificationConfig(id=task_params.id, pushNotificationConfig=notification_info))
async def upsert_task(self, task_send_params: TaskSendParams) -> Task:
logger.info(f"Upserting task {task_send_params.id}")
async with self.lock:
task = self.tasks.get(task_send_params.id)
if task is None:
task = Task(
id=task_send_params.id,
sessionId = task_send_params.sessionId,
messages=[task_send_params.message],
status=TaskStatus(state=TaskState.SUBMITTED),
history=[task_send_params.message],
)
self.tasks[task_send_params.id] = task
else:
task.history.append(task_send_params.message)
return task
async def on_resubscribe_to_task(
self, request: TaskResubscriptionRequest
) -> Union[AsyncIterable[SendTaskStreamingResponse], JSONRPCResponse]:
return new_not_implemented_error(request.id)
async def update_store(
self, task_id: str, status: TaskStatus, artifacts: list[Artifact]
) -> Task:
async with self.lock:
try:
task = self.tasks[task_id]
except KeyError:
logger.error(f"Task {task_id} not found for updating the task")
raise ValueError(f"Task {task_id} not found")
task.status = status
if status.message is not None:
task.history.append(status.message)
if artifacts is not None:
if task.artifacts is None:
task.artifacts = []
task.artifacts.extend(artifacts)
return task
def append_task_history(self, task: Task, historyLength: int | None):
new_task = task.model_copy()
if historyLength is not None and historyLength > 0:
new_task.history = new_task.history[-historyLength:]
else:
new_task.history = []
return new_task
async def setup_sse_consumer(self, task_id: str, is_resubscribe: bool = False):
async with self.subscriber_lock:
if task_id not in self.task_sse_subscribers:
if is_resubscribe:
raise ValueError("Task not found for resubscription")
else:
self.task_sse_subscribers[task_id] = []
sse_event_queue = asyncio.Queue(maxsize=0) # <=0 is unlimited
self.task_sse_subscribers[task_id].append(sse_event_queue)
return sse_event_queue
async def enqueue_events_for_sse(self, task_id, task_update_event):
async with self.subscriber_lock:
if task_id not in self.task_sse_subscribers:
return
current_subscribers = self.task_sse_subscribers[task_id]
for subscriber in current_subscribers:
await subscriber.put(task_update_event)
async def dequeue_events_for_sse(
self, request_id, task_id, sse_event_queue: asyncio.Queue
) -> AsyncIterable[SendTaskStreamingResponse] | JSONRPCResponse:
try:
while True:
event = await sse_event_queue.get()
if isinstance(event, JSONRPCError):
yield SendTaskStreamingResponse(id=request_id, error=event)
break
yield SendTaskStreamingResponse(id=request_id, result=event)
if isinstance(event, TaskStatusUpdateEvent) and event.final:
break
finally:
async with self.subscriber_lock:
if task_id in self.task_sse_subscribers:
self.task_sse_subscribers[task_id].remove(sse_event_queue)
================================================
FILE: core/a2a/server/utils.py
================================================
from core.a2a.types import (
JSONRPCResponse,
ContentTypeNotSupportedError,
UnsupportedOperationError,
)
from typing import List
def are_modalities_compatible(
server_output_modes: List[str], client_output_modes: List[str]
):
"""Modalities are compatible if they are both non-empty
and there is at least one common element."""
if client_output_modes is None or len(client_output_modes) == 0:
return True
if server_output_modes is None or len(server_output_modes) == 0:
return True
return any(x in server_output_modes for x in client_output_modes)
def new_incompatible_types_error(request_id):
return JSONRPCResponse(id=request_id, error=ContentTypeNotSupportedError())
def new_not_implemented_error(request_id):
return JSONRPCResponse(id=request_id, error=UnsupportedOperationError())
================================================
FILE: core/a2a/types.py
================================================
from typing import Union, Any
from pydantic import BaseModel, Field, TypeAdapter
from typing import Literal, List, Annotated, Optional
from datetime import datetime
from pydantic import model_validator, ConfigDict, field_serializer
from uuid import uuid4
from enum import Enum
from typing_extensions import Self
class TaskState(str, Enum):
SUBMITTED = "submitted"
WORKING = "working"
INPUT_REQUIRED = "input-required"
COMPLETED = "completed"
CANCELED = "canceled"
FAILED = "failed"
UNKNOWN = "unknown"
class TextPart(BaseModel):
type: Literal["text"] = "text"
text: str
metadata: dict[str, Any] | None = None
class FileContent(BaseModel):
name: str | None = None
mimeType: str | None = None
bytes: str | None = None
uri: str | None = None
@model_validator(mode="after")
def check_content(self) -> Self:
if not (self.bytes or self.uri):
raise ValueError("Either 'bytes' or 'uri' must be present in the file data")
if self.bytes and self.uri:
raise ValueError(
"Only one of 'bytes' or 'uri' can be present in the file data"
)
return self
class FilePart(BaseModel):
type: Literal["file"] = "file"
file: FileContent
metadata: dict[str, Any] | None = None
class DataPart(BaseModel):
type: Literal["data"] = "data"
data: dict[str, Any]
metadata: dict[str, Any] | None = None
Part = Annotated[Union[TextPart, FilePart, DataPart], Field(discriminator="type")]
class Message(BaseModel):
role: Literal["user", "agent"]
parts: List[Part]
metadata: dict[str, Any] | None = None
class TaskStatus(BaseModel):
state: TaskState
message: Message | None = None
timestamp: datetime = Field(default_factory=datetime.now)
@field_serializer("timestamp")
def serialize_dt(self, dt: datetime, _info):
return dt.isoformat()
class Artifact(BaseModel):
name: str | None = None
description: str | None = None
parts: List[Part]
metadata: dict[str, Any] | None = None
index: int = 0
append: bool | None = None
lastChunk: bool | None = None
class Task(BaseModel):
id: str
sessionId: str | None = None
status: TaskStatus
artifacts: List[Artifact] | None = None
history: List[Message] | None = None
metadata: dict[str, Any] | None = None
class TaskStatusUpdateEvent(BaseModel):
id: str
status: TaskStatus
final: bool = False
metadata: dict[str, Any] | None = None
class TaskArtifactUpdateEvent(BaseModel):
id: str
artifact: Artifact
metadata: dict[str, Any] | None = None
class AuthenticationInfo(BaseModel):
model_config = ConfigDict(extra="allow")
schemes: List[str]
credentials: str | None = None
class PushNotificationConfig(BaseModel):
url: str
token: str | None = None
authentication: AuthenticationInfo | None = None
class TaskIdParams(BaseModel):
id: str
metadata: dict[str, Any] | None = None
class TaskQueryParams(TaskIdParams):
historyLength: int | None = None
class TaskSendParams(BaseModel):
id: str
sessionId: str = Field(default_factory=lambda: uuid4().hex)
message: Message
acceptedOutputModes: Optional[List[str]] = None
pushNotification: PushNotificationConfig | None = None
historyLength: int | None = None
metadata: dict[str, Any] | None = None
class TaskPushNotificationConfig(BaseModel):
id: str
pushNotificationConfig: PushNotificationConfig
## RPC Messages
class JSONRPCMessage(BaseModel):
jsonrpc: Literal["2.0"] = "2.0"
id: int | str | None = Field(default_factory=lambda: uuid4().hex)
class JSONRPCRequest(JSONRPCMessage):
method: str
params: dict[str, Any] | None = None
class JSONRPCError(BaseModel):
code: int
message: str
data: Any | None = None
class JSONRPCResponse(JSONRPCMessage):
result: Any | None = None
error: JSONRPCError | None = None
class SendTaskRequest(JSONRPCRequest):
method: Literal["tasks/send"] = "tasks/send"
params: TaskSendParams
class SendTaskResponse(JSONRPCResponse):
result: Task | None = None
class SendTaskStreamingRequest(JSONRPCRequest):
method: Literal["tasks/sendSubscribe"] = "tasks/sendSubscribe"
params: TaskSendParams
class SendTaskStreamingResponse(JSONRPCResponse):
result: TaskStatusUpdateEvent | TaskArtifactUpdateEvent | None = None
class GetTaskRequest(JSONRPCRequest):
method: Literal["tasks/get"] = "tasks/get"
params: TaskQueryParams
class GetTaskResponse(JSONRPCResponse):
result: Task | None = None
class CancelTaskRequest(JSONRPCRequest):
method: Literal["tasks/cancel",] = "tasks/cancel"
params: TaskIdParams
class CancelTaskResponse(JSONRPCResponse):
result: Task | None = None
class SetTaskPushNotificationRequest(JSONRPCRequest):
method: Literal["tasks/pushNotification/set",] = "tasks/pushNotification/set"
params: TaskPushNotificationConfig
class SetTaskPushNotificationResponse(JSONRPCResponse):
result: TaskPushNotificationConfig | None = None
class GetTaskPushNotificationRequest(JSONRPCRequest):
method: Literal["tasks/pushNotification/get",] = "tasks/pushNotification/get"
params: TaskIdParams
class GetTaskPushNotificationResponse(JSONRPCResponse):
result: TaskPushNotificationConfig | None = None
class TaskResubscriptionRequest(JSONRPCRequest):
method: Literal["tasks/resubscribe",] = "tasks/resubscribe"
params: TaskIdParams
A2ARequest = TypeAdapter(
Annotated[
Union[
SendTaskRequest,
GetTaskRequest,
CancelTaskRequest,
SetTaskPushNotificationRequest,
GetTaskPushNotificationRequest,
TaskResubscriptionRequest,
SendTaskStreamingRequest,
],
Field(discriminator="method"),
]
)
## Error types
class JSONParseError(JSONRPCError):
code: int = -32700
message: str = "Invalid JSON payload"
data: Any | None = None
class InvalidRequestError(JSONRPCError):
code: int = -32600
message: str = "Request payload validation error"
data: Any | None = None
class MethodNotFoundError(JSONRPCError):
code: int = -32601
message: str = "Method not found"
data: None = None
class InvalidParamsError(JSONRPCError):
code: int = -32602
message: str = "Invalid parameters"
data: Any | None = None
class InternalError(JSONRPCError):
code: int = -32603
message: str = "Internal error"
data: Any | None = None
class TaskNotFoundError(JSONRPCError):
code: int = -32001
message: str = "Task not found"
data: None = None
class TaskNotCancelableError(JSONRPCError):
code: int = -32002
message: str = "Task cannot be canceled"
data: None = None
class PushNotificationNotSupportedError(JSONRPCError):
code: int = -32003
message: str = "Push Notification is not supported"
data: None = None
class UnsupportedOperationError(JSONRPCError):
code: int = -32004
message: str = "This operation is not supported"
data: None = None
class ContentTypeNotSupportedError(JSONRPCError):
code: int = -32005
message: str = "Incompatible content types"
data: None = None
class AgentProvider(BaseModel):
organization: str
url: str | None = None
class AgentCapabilities(BaseModel):
streaming: bool = False
pushNotifications: bool = False
stateTransitionHistory: bool = False
class AgentAuthentication(BaseModel):
schemes: List[str]
credentials: str | None = None
class AgentSkill(BaseModel):
id: str
name: str
description: str | None = None
tags: List[str] | None = None
examples: List[str] | None = None
inputModes: List[str] | None = None
outputModes: List[str] | None = None
class AgentCard(BaseModel):
name: str
description: str | None = None
url: str
provider: AgentProvider | None = None
version: str
documentationUrl: str | None = None
capabilities: AgentCapabilities
authentication: AgentAuthentication | None = None
defaultInputModes: List[str] = ["text"]
defaultOutputModes: List[str] = ["text"]
skills: List[AgentSkill]
class A2AClientError(Exception):
pass
class A2AClientHTTPError(A2AClientError):
def __init__(self, status_code: int, message: str):
self.status_code = status_code
self.message = message
super().__init__(f"HTTP Error {status_code}: {message}")
class A2AClientJSONError(A2AClientError):
def __init__(self, message: str):
self.message = message
super().__init__(f"JSON Error: {message}")
class MissingAPIKeyError(Exception):
"""Exception for missing API key."""
pass
================================================
FILE: core/a2a/utils/__init__.py
================================================
================================================
FILE: core/a2a/utils/in_memory_cache.py
================================================
"""In Memory Cache utility."""
import threading
import time
from typing import Any, Dict, Optional
class InMemoryCache:
"""A thread-safe Singleton class to manage cache data.
Ensures only one instance of the cache exists across the application.
"""
_instance: Optional["InMemoryCache"] = None
_lock: threading.Lock = threading.Lock()
_initialized: bool = False
def __new__(cls):
"""Override __new__ to control instance creation (Singleton pattern).
Uses a lock to ensure thread safety during the first instantiation.
Returns:
The singleton instance of InMemoryCache.
"""
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
"""Initialize the cache storage.
Uses a flag (_initialized) to ensure this logic runs only on the very first
creation of the singleton instance.
"""
if not self._initialized:
with self._lock:
if not self._initialized:
# print("Initializing SessionCache storage")
self._cache_data: Dict[str, Dict[str, Any]] = {}
self._ttl: Dict[str, float] = {}
self._data_lock: threading.Lock = threading.Lock()
self._initialized = True
def set(self, key: str, value: Any, ttl: Optional[int] = None) -> None:
"""Set a key-value pair.
Args:
key: The key for the data.
value: The data to store.
ttl: Time to live in seconds. If None, data will not expire.
"""
with self._data_lock:
self._cache_data[key] = value
if ttl is not None:
self._ttl[key] = time.time() + ttl
else:
if key in self._ttl:
del self._ttl[key]
def get(self, key: str, default: Any = None) -> Any:
"""Get the value associated with a key.
Args:
key: The key for the data within the session.
default: The value to return if the session or key is not found.
Returns:
The cached value, or the default value if not found.
"""
with self._data_lock:
if key in self._ttl and time.time() > self._ttl[key]:
del self._cache_data[key]
del self._ttl[key]
return default
return self._cache_data.get(key, default)
def delete(self, key: str) -> None:
"""Delete a specific key-value pair from a cache.
Args:
key: The key to delete.
Returns:
True if the key was found and deleted, False otherwise.
"""
with self._data_lock:
if key in self._cache_data:
del self._cache_data[key]
if key in self._ttl:
del self._ttl[key]
return True
return False
def clear(self) -> bool:
"""Remove all data.
Returns:
True if the data was cleared, False otherwise.
"""
with self._data_lock:
self._cache_data.clear()
self._ttl.clear()
return True
return False
================================================
FILE: core/a2a/utils/push_notification_auth.py
================================================
from jwcrypto import jwk
import uuid
from starlette.responses import JSONResponse
from starlette.requests import Request
from typing import Any
import jwt
import time
import json
import hashlib
import httpx
import logging
from jwt import PyJWK, PyJWKClient
logger = logging.getLogger(__name__)
AUTH_HEADER_PREFIX = 'Bearer '
class PushNotificationAuth:
def _calculate_request_body_sha256(self, data: dict[str, Any]):
"""Calculates the SHA256 hash of a request body.
This logic needs to be same for both the agent who signs the payload and the client verifier.
"""
body_str = json.dumps(
data,
ensure_ascii=False,
allow_nan=False,
indent=None,
separators=(",", ":"),
)
return hashlib.sha256(body_str.encode()).hexdigest()
class PushNotificationSenderAuth(PushNotificationAuth):
def __init__(self):
self.public_keys = []
self.private_key_jwk: PyJWK = None
@staticmethod
async def verify_push_notification_url(url: str) -> bool:
async with httpx.AsyncClient(timeout=10) as client:
try:
validation_token = str(uuid.uuid4())
response = await client.get(
url,
params={"validationToken": validation_token}
)
response.raise_for_status()
is_verified = response.text == validation_token
logger.info(f"Verified push-notification URL: {url} => {is_verified}")
return is_verified
except Exception as e:
logger.warning(f"Error during sending push-notification for URL {url}: {e}")
return False
def generate_jwk(self):
key = jwk.JWK.generate(kty='RSA', size=2048, kid=str(uuid.uuid4()), use="sig")
self.public_keys.append(key.export_public(as_dict=True))
self.private_key_jwk = PyJWK.from_json(key.export_private())
def handle_jwks_endpoint(self, _request: Request):
"""Allow clients to fetch public keys.
"""
return JSONResponse({
"keys": self.public_keys
})
def _generate_jwt(self, data: dict[str, Any]):
"""JWT is generated by signing both the request payload SHA digest and time of token generation.
Payload is signed with private key and it ensures the integrity of payload for client.
Including iat prevents from replay attack.
"""
iat = int(time.time())
return jwt.encode(
{"iat": iat, "request_body_sha256": self._calculate_request_body_sha256(data)},
key=self.private_key_jwk,
headers={"kid": self.private_key_jwk.key_id},
algorithm="RS256"
)
async def send_push_notification(self, url: str, data: dict[str, Any]):
jwt_token = self._generate_jwt(data)
headers = {'Authorization': f"Bearer {jwt_token}"}
async with httpx.AsyncClient(timeout=10) as client:
try:
response = await client.post(
url,
json=data,
headers=headers
)
response.raise_for_status()
logger.info(f"Push-notification sent for URL: {url}")
except Exception as e:
logger.warning(f"Error during sending push-notification for URL {url}: {e}")
class PushNotificationReceiverAuth(PushNotificationAuth):
def __init__(self):
self.public_keys_jwks = []
self.jwks_client = None
async def load_jwks(self, jwks_url: str):
self.jwks_client = PyJWKClient(jwks_url)
async def verify_push_notification(self, request: Request) -> bool:
auth_header = request.headers.get("Authorization")
if not auth_header or not auth_header.startswith(AUTH_HEADER_PREFIX):
print("Invalid authorization header")
return False
token = auth_header[len(AUTH_HEADER_PREFIX):]
signing_key = self.jwks_client.get_signing_key_from_jwt(token)
decode_token = jwt.decode(
token,
signing_key,
options={"require": ["iat", "request_body_sha256"]},
algorithms=["RS256"],
)
actual_body_sha256 = self._calculate_request_body_sha256(await request.json())
if actual_body_sha256 != decode_token["request_body_sha256"]:
# Payload signature does not match the digest in signed token.
raise ValueError("Invalid request body")
if time.time() - decode_token["iat"] > 60 * 5:
# Do not allow push-notifications older than 5 minutes.
# This is to prevent replay attack.
raise ValueError("Token is expired")
return True
================================================
FILE: core/agents/__init__.py
================================================
# Agents module initialization
================================================
FILE: core/agents/base/base_agent.py
================================================
import json
from typing import List, Dict, Any, Optional, Union, Callable, Sequence, TypeVar, cast
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models import LanguageModelLike
from langchain_core.messages import BaseMessage, SystemMessage, HumanMessage, AIMessage, ToolMessage
from langchain_core.tools import BaseTool
from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph
from langgraph.types import Checkpointer
from langgraph.graph.graph import CompiledGraph
from langgraph.graph.state import CompiledStateGraph
import logging
try:
import tiktoken
TIKTOKEN_AVAILABLE = True
except ImportError:
TIKTOKEN_AVAILABLE = False
print("Warning: Tiktoken not installed. Using naive token estimation.")
logger = logging.getLogger(__name__)
DEFAULT_MODEL_NAME = "gpt-4o-mini"
StateSchema = TypeVar("StateSchema", bound=Union[dict, Any])
class BaseAgent:
def __init__(
self,
name: str,
model: Union[BaseChatModel, LanguageModelLike],
tools: Optional[List[Union[BaseTool, Callable]]] = None,
prompt: Optional[Union[str, SystemMessage, Callable]] = None,
checkpointer: Optional[Checkpointer] = None,
max_context_messages: Optional[int] = None, # Limit number of recent messages
max_context_tokens: Optional[int] = None, # Limit total estimated tokens
model_name: Optional[str] = "gpt-4o-mini", # Optional, used for future token estimation improvements
description: str = "No description provided."
):
if max_context_messages and max_context_tokens:
raise ValueError("Only one of max_context_messages or max_context_tokens should be set.")
if name is None or name == "LangGraph":
raise ValueError("Agent name must be specified.")
self.name = name
self.model = model
self.tools = tools or []
self.base_prompt = prompt
self.checkpointer = checkpointer
self.max_context_messages = max_context_messages
self.max_context_tokens = max_context_tokens
self.model_name = model_name or getattr(model, "model_name", DEFAULT_MODEL_NAME)
self.description = description
self._workflow: Optional[StateGraph] = None
self._compiled_agent: Optional[CompiledGraph] = None # Stores the final compiled graph
self._tokenizer = None
if TIKTOKEN_AVAILABLE:
try: self._tokenizer = tiktoken.encoding_for_model(self.model_name)
except KeyError:
try:
self._tokenizer = tiktoken.get_encoding("cl100k_base")
# print(f"Warning: Tiktoken encoding for model '{self.model_name}' not found. Using 'cl100k_base'.")
except Exception as e: print(f"Error getting tiktoken encoding 'cl100k_base': {e}.")
except Exception as e: print(f"Error initializing tiktoken for model '{self.model_name}': {e}.")
def _estimate_tokens(self, message: BaseMessage) -> int:
content_to_encode = ""
if isinstance(message, (HumanMessage, SystemMessage, AIMessage)):
if isinstance(message.content, str): content_to_encode = message.content
elif isinstance(message.content, list):
for block in message.content:
if isinstance(block, dict) and block.get("type") == "text": content_to_encode += block.get("text", "") + "\n"
elif isinstance(message, ToolMessage):
content_to_encode = message.content if isinstance(message.content, str) else json.dumps(message.content)
else: content_to_encode = str(message)
if self._tokenizer:
try: return len(self._tokenizer.encode(content_to_encode, disallowed_special=()))
except Exception: pass
return len(content_to_encode) // 2
def _truncate_by_tokens(self, messages: Sequence[BaseMessage]) -> List[BaseMessage]:
if not self.max_context_tokens: return list(messages)
truncated_messages: List[BaseMessage] = []
total_tokens = 0
preserved_system_message: Optional[SystemMessage] = None
# Check if the first message is a SystemMessage, preserve it if so
# Note: This assumes only ONE leading SystemMessage should be preserved.
if messages and isinstance(messages[0], SystemMessage):
preserved_system_message = messages[0]
messages_to_truncate = messages[1:]
try:
system_tokens = self._estimate_tokens(preserved_system_message)
# Only count if it doesn't exceed limit by itself
if system_tokens <= self.max_context_tokens:
total_tokens += system_tokens
else:
print(f"Warning: System message alone ({system_tokens} tokens) exceeds token limit ({self.max_context_tokens}). It might be truncated if context grows.")
# Don't add to total_tokens yet, let truncation logic handle it.
preserved_system_message = None # Don't preserve if it's too big initially
except Exception: pass # Ignore errors estimating system message
else:
messages_to_truncate = messages
# Iterate backwards from the most recent message
for msg in reversed(messages_to_truncate):
try:
msg_tokens = self._estimate_tokens(msg)
# Check if adding this message exceeds the limit
if total_tokens + msg_tokens <= self.max_context_tokens:
truncated_messages.append(msg)
total_tokens += msg_tokens
else:
print(f"Context Token Limit ({self.max_context_tokens}) reached. Truncating older messages.")
break # Limit reached
except Exception as e:
print(f"Warning: Failed to estimate tokens for message, skipping: {e}")
continue
# Re-add the system message at the beginning if it was preserved
final_list = list(reversed(truncated_messages))
if preserved_system_message:
try: system_tokens = self._estimate_tokens(preserved_system_message)
except Exception: system_tokens = 0
# Ensure adding system message doesn't push over limit *again* (edge case)
if total_tokens - (msg_tokens if 'msg_tokens' in locals() and total_tokens + msg_tokens > self.max_context_tokens else 0) + system_tokens <= self.max_context_tokens:
final_list.insert(0, preserved_system_message)
elif not final_list: # If only system message fits
return [preserved_system_message]
# Else: System message doesn't fit with the truncated history, omit it.
return final_list
def _truncate_messages(self, messages: Sequence[BaseMessage]) -> List[BaseMessage]:
"""根据配置(优先 token 数,其次消息数)截断消息历史。"""
if self.max_context_tokens is not None:
return self._truncate_by_tokens(messages)
elif self.max_context_messages is not None:
if messages and isinstance(messages[0], SystemMessage):
# Keep system message + last N-1 messages
keep_count = self.max_context_messages - 1
return [messages[0]] + list(messages[-keep_count:]) if keep_count > 0 and len(messages) > 1 else [messages[0]]
else:
return list(messages[-self.max_context_messages:])
return list(messages)
def _get_state_value(self, state: StateSchema, key: str, default: Any = None) -> Any:
return state.get(key, default) if isinstance(state, dict) else getattr(state, key, default)
def _format_tools_for_prompt(self, tools: List[Union[BaseTool, Callable]]) -> str:
"""Formats the tool list for inclusion in the prompt."""
if not tools:
return "No tools available for use."
# 使用 getattr 安全地访问 name 和 description
return "\n".join([
f"- **{getattr(t, 'name', 'Unnamed Tool')}**: {getattr(t, 'description', 'No description available.')}"
for t in tools
])
# --- build/compile/get_agent ---
def build(self) -> Optional[StateGraph]:
"""构建 Agent 的 LangGraph 工作流图定义。子类应实现。"""
raise NotImplementedError("Subclasses must implement build() or override compile() directly.")
def compile(self) -> CompiledGraph:
"""编译 Agent 工作流。"""
if self._compiled_agent is not None:
return self._compiled_agent
# 尝试调用 build() 来获取 StateGraph
workflow = self.build()
if workflow is None or not isinstance(workflow, StateGraph):
# 如果 build() 不返回 StateGraph (例如 ReactAgent),
# 子类的 compile() 需要被覆盖以处理编译
raise ValueError(
f"Agent '{self.name}': build() did not return a valid StateGraph, "
"and compile() was not overridden to handle direct compilation."
)
print(f"Compiling graph for agent: {self.name}")
try:
# 编译 StateGraph 并存储结果
self._compiled_agent = workflow.compile(
checkpointer=self.checkpointer,
debug=getattr(self, 'debug', False) # 传递 debug 标志
)
print(f"Graph compiled successfully for agent: {self.name}")
return self._compiled_agent
except Exception as e:
print(f"!!! Error compiling graph for agent {self.name}: {e}")
import traceback
traceback.print_exc()
raise e
def get_agent(self) -> CompiledGraph:
"""获取编译后的核心图实例,如果未编译则先编译。"""
if self._compiled_agent is None:
print(f"Agent '{self.name}' not compiled yet. Compiling now.")
self.compile()
if self._compiled_agent is None:
raise RuntimeError(f"Failed to get compiled agent for '{self.name}'.")
return self._compiled_agent
# --- invoke/ainvoke: 标准入口点,调用编译后的图 ---
def invoke(self, state: Dict[str, Any], config: Optional[RunnableConfig] = None) -> Dict[str, Any]:
"""同步调用编译后的 Agent 图。"""
try:
compiled_agent = self.get_agent() # 获取 (或编译) 图
print(f"--- Invoking Agent: {self.name} ---")
# 直接调用编译后的图,预处理由图内部的 prompt callable 处理 (如果使用 ReactAgent)
# 或由 Supervisor 节点逻辑处理 (如果使用自定义 Supervisor)
result = compiled_agent.invoke(state, config=config)
print(f"--- Agent Invocation Complete: {self.name} ---")
return cast(Dict[str, Any], result) # 假设返回字典
except Exception as e:
print(f"!!! Error during {self.name} agent invocation: {e}")
import traceback
traceback.print_exc()
# 返回带错误标记的状态 (可能是输入状态)
state["error"] = f"Agent invocation failed: {e}"
return state
async def ainvoke(self, state: Dict[str, Any], config: Optional[RunnableConfig] = None) -> Dict[str, Any]:
"""异步调用编译后的 Agent 图。"""
try:
compiled_agent = self.get_agent() # 获取 (或编译) 图
print(f"--- Invoking Agent Async: {self.name} ---")
# 直接调用编译后的图
result = await compiled_agent.ainvoke(state, config=config)
print(f"--- Agent Invocation Complete Async: {self.name} ---")
return cast(Dict[str, Any], result) # 假设返回字典
except Exception as e:
print(f"!!! Error during {self.name} agent async invocation: {e}")
import traceback
traceback.print_exc()
state["error"] = f"Agent async invocation failed: {e}"
return state
def run(self, state: Dict[str, Any]) -> Dict[str, Any]:
"""Run the supervisor workflow synchronously.
Args:
state: The input state for the workflow
Returns:
The output state from the workflow
"""
return self.invoke(state)
async def arun(self, state: Dict[str, Any]) -> Dict[str, Any]:
"""Run the supervisor workflow asynchronously.
Args:
state: The input state for the workflow
Returns:
The output state from the workflow
"""
return await self.ainvoke(state)
def reset(self):
"""重置编译状态,强制下次重新编译。"""
print(f"Resetting compiled graph for agent '{self.name}'. Will recompile on next use.")
self._compiled_agent = None
self._workflow = None
def add_tools(self, tools: List[Union[BaseTool, Callable]]) -> None:
"""添加工具到 Agent 的工具列表。"""
print(f"Warning: Adding tools to {self.name} post-initialization. Agent needs recompilation.")
self.tools.extend(tools)
self.reset()
================================================
FILE: core/agents/base/create_react_agent_wrapper.py
================================================
import logging
from typing import Optional, Callable, Dict
from langgraph.utils.runnable import RunnableCallable
from langchain_core.runnables.config import RunnableConfig
logger = logging.getLogger(__name__)
class CreateReactAgentWrapper(RunnableCallable):
def __init__(
self,
agent,
name: str = "agent",
before_invoke: Optional[Callable[[dict], dict]] = None,
before_ainvoke: Optional[Callable[[dict], dict]] = None,
after_invoke: Optional[Callable[[dict, dict], None]] = None,
after_ainvoke: Optional[Callable[[dict, dict], None]] = None
):
"""
:param agent: The underlying compiled graph or runnable
:param name: Unique name for this wrapper (avoid duplicates)
:param before_invoke: A sync callback that modifies the state before the wrapped agent call
:param before_ainvoke: An async callback that modifies the state before the wrapped agent call
:param after_invoke: A sync callback that inspects (state, output) after the wrapped call
:param after_ainvoke: An async callback that inspects (state, output) after the wrapped call
"""
self._agent = agent
self.name = name or getattr(agent, "name", "agent")
self.before_invoke = before_invoke
self.after_invoke = after_invoke
self.before_ainvoke = before_ainvoke
self.after_ainvoke = after_ainvoke
# We define the sync/async "call" functions for RunnableCallable
def call(state: Dict, config: Optional[RunnableConfig] = None, **kwargs) -> Dict:
logger.info(f"[{self.name}] (sync) call() - started. State keys: {list(state.keys())}")
# Or use print if you prefer
# print(f"🟢 [Sync] Invoking wrapper: {self.name}, state keys: {list(state.keys())}")
# before_invoke callback
if self.before_invoke:
state = self.before_invoke(state)
# Call the underlying agent
output = self._agent.invoke(state, config, **kwargs)
# after_invoke callback
if self.after_invoke:
self.after_invoke(state, output)
logger.info(f"[{self.name}] (sync) call() - finished. Output keys: {list(output.keys())}")
return output
async def acall(state: Dict, config: Optional[RunnableConfig] = None, **kwargs) -> Dict:
logger.info(f"[{self.name}] (async) acall() - started. State keys: {list(state.keys())}")
# print(f"🟢 [Async] Invoking wrapper: {self.name}, state keys: {list(state.keys())}")
if self.before_ainvoke:
state = await self.before_ainvoke(state)
output = await self._agent.ainvoke(state, config, **kwargs)
if self.after_ainvoke:
await self.after_ainvoke(state, output)
logger.info(f"[{self.name}] (async) acall() - finished. Output keys: {list(output.keys())}")
return output
# Pass these to RunnableCallable
super().__init__(call, acall, name=self.name)
================================================
FILE: core/agents/base/react_agent.py
================================================
from typing import Any, Callable, Dict, List, Optional, Type, Union, Literal, Sequence
from langchain_core.language_models import LanguageModelLike, LanguageModelInput
from langchain_core.tools import BaseTool
from langgraph.graph import StateGraph
from langgraph.graph.graph import CompiledGraph
from langgraph.types import Checkpointer
from langgraph.store.base import BaseStore
from langchain_core.messages import BaseMessage, SystemMessage # 导入 SystemMessage
from langgraph.prebuilt import create_react_agent
from langgraph.prebuilt.chat_agent_executor import (
AgentState,
StateSchemaType,
StructuredResponseSchema,
)
from core.agents.base.base_agent import BaseAgent
import logging
logger = logging.getLogger(__name__)
class ReactAgent(BaseAgent):
"""ReAct Agent class for reasoning and acting with tools.
This class provides a high-level interface for creating a ReAct agent workflow
that can perform multi-step reasoning and tool calling.
"""
def __init__(
self,
model: LanguageModelLike,
tools: Optional[List[Union[BaseTool, Callable]]] = None,
prompt: Optional[str] = None,
response_format: Optional[
Union[StructuredResponseSchema, tuple[str, StructuredResponseSchema]]
] = None,
state_schema: StateSchemaType = AgentState,
config_schema: Type[Any] = None,
checkpointer: Optional[Checkpointer] = None,
store: Optional[BaseStore] = None,
interrupt_before: Optional[List[str]] = None,
interrupt_after: Optional[List[str]] = None,
debug: bool = False,
version: Literal["v1", "v2"] = "v1",
name: str = "react_agent",
description: str = "ReAct agent for reasoning and acting with tools.",
max_context_messages: Optional[int] = None,
max_context_tokens: Optional[int] = None,
model_name: Optional[str] = "gpt-4o-mini",
):
"""Initialize a ReAct agent.
Args:
model: Language model to use for the agent
tools: Optional list of tools available to the agent
prompt: Optional prompt to use for the agent
response_format: Optional schema for structured output
state_schema: State schema to use for the agent graph
config_schema: Optional schema for configuration
interrupt_before: Optional list of nodes to interrupt before execution
interrupt_after: Optional list of nodes to interrupt after execution
debug: Whether to enable debug mode
version: Version of the ReAct agent ("v1" or "v2")
name: Name of the agent
max_context_messages: Optional limit on number of recent messages
max_context_tokens: Optional limit on total estimated tokens
model_name: Optional model name for token estimation
"""
# Call BaseAgent's __init__ to initialize parent class attributes
super().__init__(
name=name,
model=model,
tools=tools or [],
prompt=prompt,
description=description,
checkpointer=checkpointer,
max_context_messages=max_context_messages,
max_context_tokens=max_context_tokens,
model_name=model_name
)
# Initialize ReactAgent specific attributes
self.response_format = response_format
self.react_state_schema = state_schema
self.react_config_schema = config_schema
self.react_store = store
self.react_interrupt_before = interrupt_before
self.react_interrupt_after = interrupt_after
self.react_debug = debug
self.react_version = version
def _prepare_llm_input(self, state: Dict[str, Any]) -> LanguageModelInput:
"""
准备 LLM 输入:截断消息历史并添加基础 System Prompt (如果存在)。
作为 Callable 传递给 create_react_agent 的 prompt 参数。
"""
# 1. 从状态获取消息 (BaseAgent 的方法)
messages = self._get_state_value(state, "messages", [])
# 2. 截断消息 (BaseAgent 的方法)
# 注意:这里截断的是进入 LLM 前的列表,checkpointer 中的完整历史不受影响
# --- 添加 Debug 打印 (截断前) ---
# print(f"\nDEBUG _prepare_llm_input ({self.name}): BEFORE truncation (length {len(messages)}):")
# for i, msg in enumerate(messages[-5:]): # 只看最后几条
# print(f" Msg {i-5}: Type={type(msg).__name__}, ToolCallID={getattr(msg, 'tool_call_id', 'N/A')}")
# ---
truncated_messages = self._truncate_messages(messages)
# --- 添加 Debug 打印 (截断后) ---
# print(f"DEBUG _prepare_llm_input ({self.name}): AFTER truncation (length {len(truncated_messages)}):")
# for i, msg in enumerate(truncated_messages[-5:]): # 只看最后几条
# print(f" Msg {i-5}: Type={type(msg).__name__}, ToolCallID={getattr(msg, 'tool_call_id', 'N/A')}")
# ---
# 3. 添加基础 System Prompt (如果存在)
final_messages: List[BaseMessage] = []
if self.base_prompt:
if isinstance(self.base_prompt, str):
final_messages.append(SystemMessage(content=self.base_prompt))
elif isinstance(self.base_prompt, SystemMessage):
final_messages.append(self.base_prompt)
# 如果 self.base_prompt 是其他 Runnable 或 Callable,需要相应处理
# 但 create_react_agent 的 prompt 通常是 str 或 SystemMessage
final_messages.extend(truncated_messages)
# print(f"DEBUG [{self.name}]: Preparing LLM input with {len(final_messages)} messages.") # Optional debug log
# 返回最终的消息列表给 LLM
return final_messages
def build(self) -> Optional[StateGraph]:
"""对于 ReactAgent,核心图由 create_react_agent 直接创建,无需 build。"""
print(f"Note: ReactAgent '{self.name}' uses create_react_agent in compile(). Build returns None.")
self._workflow = None
return None
def compile(self) -> CompiledGraph:
"""使用 create_react_agent 构建并编译核心 ReAct 工作流,存储在 _compiled_agent。"""
if self._compiled_agent is not None:
return self._compiled_agent
print(f"[[DEBUG]] Compiling core ReAct agent for: {self.name} using create_react_agent")
try:
# 使用 create_react_agent 创建编译后的图
# 将 self._prepare_llm_input 作为 prompt callable 传入
compiled_agent = create_react_agent(
model=self.model,
tools=self.tools,
prompt=self._prepare_llm_input, # <--- 关键改动:传入准备函数
state_schema=self.react_state_schema,
config_schema=self.react_config_schema,
checkpointer=self.checkpointer,
store=self.react_store,
interrupt_before=self.react_interrupt_before,
interrupt_after=self.react_interrupt_after,
debug=self.react_debug,
version=self.react_version,
name=self.name,
)
# 存储编译好的图
self._compiled_agent = compiled_agent
print(f"Core ReAct graph compiled successfully for agent: {self.name}")
return self._compiled_agent
except Exception as e:
print(f"!!! Error compiling graph for agent {self.name} using create_react_agent: {e}")
import traceback
traceback.print_exc()
self._compiled_agent = None
raise e
================================================
FILE: core/agents/react_based_supervisor/__init__.py
================================================
# 从当前目录导入create_supervisor函数
from .supervisor import create_supervisor
__all__ = ["create_supervisor"]
================================================
FILE: core/agents/react_based_supervisor/agent_name.py
================================================
import re
from typing import Literal
from langchain_core.language_models import LanguageModelLike
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.runnables import RunnableLambda
NAME_PATTERN = re.compile(r"<name>(.*?)</name>", re.DOTALL)
CONTENT_PATTERN = re.compile(r"<content>(.*?)</content>", re.DOTALL)
AgentNameMode = Literal["inline"]
def _is_content_blocks_content(content: list[dict] | str) -> bool:
return (
isinstance(content, list)
and len(content) > 0
and isinstance(content[0], dict)
and "type" in content[0]
)
def add_inline_agent_name(message: BaseMessage) -> BaseMessage:
"""Add name and content XML tags to the message content.
Examples:
>>> add_inline_agent_name(AIMessage(content="Hello", name="assistant"))
AIMessage(content="<name>assistant</name><content>Hello</content>", name="assistant")
>>> add_inline_agent_name(AIMessage(content=[{"type": "text", "text": "Hello"}], name="assistant"))
AIMessage(content=[{"type": "text", "text": "<name>assistant</name><content>Hello</content>"}], name="assistant")
"""
if not isinstance(message, AIMessage) or not message.name:
return message
formatted_message = message.model_copy()
if _is_content_blocks_content(formatted_message.content):
text_blocks = [block for block in message.content if block["type"] == "text"]
non_text_blocks = [block for block in message.content if block["type"] != "text"]
content = text_blocks[0]["text"] if text_blocks else ""
formatted_content = f"<name>{message.name}</name><content>{content}</content>"
formatted_message.content = non_text_blocks + [{"type": "text", "text": formatted_content}]
else:
formatted_message.content = (
f"<name>{message.name}</name><content>{formatted_message.content}</content>"
)
return formatted_message
def remove_inline_agent_name(message: BaseMessage) -> BaseMessage:
"""Remove explicit name and content XML tags from the AI message content.
Examples:
>>> remove_inline_agent_name(AIMessage(content="<name>assistant</name><content>Hello</content>", name="assistant"))
AIMessage(content="Hello", name="assistant")
>>> remove_inline_agent_name(AIMessage(content=[{"type": "text", "text": "<name>assistant</name><content>Hello</content>"}], name="assistant"))
AIMessage(content=[{"type": "text", "text": "Hello"}], name="assistant")
"""
if not isinstance(message, AIMessage) or not message.name:
return message
is_content_blocks_content = _is_content_blocks_content(message.content)
if is_content_blocks_content:
text_blocks = [block for block in message.content if block["type"] == "text"]
if not text_blocks:
return message
non_text_blocks = [block for block in message.content if block["type"] != "text"]
content = text_blocks[0]["text"]
else:
content = message.content
name_match: re.Match | None = NAME_PATTERN.search(content)
content_match: re.Match | None = CONTENT_PATTERN.search(content)
if not name_match or not content_match:
return message
if name_match.group(1) != message.name:
return message
parsed_content = content_match.group(1)
parsed_message = message.model_copy()
if is_content_blocks_content:
content_blocks = non_text_blocks
if parsed_content:
content_blocks.append({"type": "text", "text": parsed_content})
parsed_message.content = content_blocks
else:
parsed_message.content = parsed_content
return parsed_message
def with_agent_name(
model: LanguageModelLike,
agent_name_mode: AgentNameMode,
) -> LanguageModelLike:
"""Attach formatted agent names to the messages passed to and from a language model.
This is useful for making a message history with multiple agents more coherent.
NOTE: agent name is consumed from the message.name field.
If you're using an agent built with create_react_agent, name is automatically set.
If you're building a custom agent, make sure to set the name on the AI message returned by the LLM.
Args:
model: Language model to add agent name formatting to.
agent_name_mode: Use to specify how to expose the agent name to the LLM.
- "inline": Add the agent name directly into the content field of the AI message using XML-style tags.
Example: "How can I help you" -> "<name>agent_name</name><content>How can I help you?</content>".
"""
if agent_name_mode == "inline":
process_input_message = add_inline_agent_name
process_output_message = remove_inline_agent_name
else:
raise ValueError(
f"Invalid agent name mode: {agent_name_mode}. Needs to be one of: {AgentNameMode.__args__}"
)
def process_input_messages(messages: list[BaseMessage]) -> list[BaseMessage]:
return [process_input_message(message) for message in messages]
model = (
process_input_messages
| model
| RunnableLambda(process_output_message, name="process_output_message")
)
return model
================================================
FILE: core/agents/react_based_supervisor/handoff.py
================================================
import re
import uuid
from langchain_core.messages import AIMessage, ToolCall, ToolMessage
from langchain_core.tools import BaseTool, InjectedToolCallId, tool
from langgraph.prebuilt import InjectedState
from langgraph.types import Command
from typing_extensions import Annotated
WHITESPACE_RE = re.compile(r"\s+")
def _normalize_agent_name(agent_name: str) -> str:
"""Normalize an agent name to be used inside the tool name."""
return WHITESPACE_RE.sub("_", agent_name.strip()).lower()
def create_handoff_tool(*, agent_name: str) -> BaseTool:
"""Create a tool that can handoff control to the requested agent.
Args:
agent_name: The name of the agent to handoff control to, i.e.
the name of the agent node in the multi-agent graph.
Agent names should be simple, clear and unique, preferably in snake_case,
although you are only limited to the names accepted by LangGraph
nodes as well as the tool names accepted by LLM providers
(the tool name will look like this: `transfer_to_<agent_name>`).
"""
tool_name = f"transfer_to_{_normalize_agent_name(agent_name)}"
@tool(tool_name)
def handoff_to_agent(
state: Annotated[dict, InjectedState],
tool_call_id: Annotated[str, InjectedToolCallId],
):
"""Ask another agent for help."""
tool_message = ToolMessage(
content=f"Successfully transferred to {agent_name}",
name=tool_name,
tool_call_id=tool_call_id,
)
return Command(
goto=agent_name,
graph=Command.PARENT,
update={"messages": state["messages"] + [tool_message]},
)
return handoff_to_agent
def create_handoff_back_messages(
agent_name: str, supervisor_name: str
) -> tuple[AIMessage, ToolMessage]:
"""Create a pair of (AIMessage, ToolMessage) to add to the message history when returning control to the supervisor."""
tool_call_id = str(uuid.uuid4())
tool_name = f"transfer_back_to_{_normalize_agent_name(supervisor_name)}"
tool_calls = [ToolCall(name=tool_name, args={}, id=tool_call_id)]
return (
AIMessage(
content=f"Transferring back to {supervisor_name}",
tool_calls=tool_calls,
name=agent_name,
),
ToolMessage(
content=f"Successfully transferred back to {supervisor_name}",
name=tool_name,
tool_call_id=tool_call_id,
),
)
================================================
FILE: core/agents/react_based_supervisor/planning_handler.py
================================================
import uuid
import datetime
from typing import List, Dict, Optional
class PlanningStateHandler:
"""
Manages a project plan.
A plan is a dict with:
- title (str)
- description (str)
- status (str): "planning", "in_progress", or "completed"
- tasks (list): each task is a dict with:
id, description, status, agent, notes, evaluation
- current_task_id (str or None)
- created_at (str)
- updated_at (str)
"""
@staticmethod
def _now() -> str:
return datetime.datetime.now().isoformat()
@staticmethod
def _gen_id() -> str:
return str(uuid.uuid4())
@staticmethod
def create_plan(title: str, description: str) -> Dict:
now = PlanningStateHandler._now()
return {
"title": title,
"description": description,
"status": "planning", # initial status
"tasks": [],
"current_task_id": None,
"created_at": now,
"updated_at": now
}
@staticmethod
def create_task(description: str,
status: str = "pending",
agent: str = "",
notes: str = "",
evaluation: str = "") -> Dict:
return {
"id": PlanningStateHandler._gen_id(),
"description": description.strip(),
"status": status.strip() if status else "pending",
"agent": agent.strip(),
"notes": notes.strip(),
"evaluation": evaluation.strip()
}
@staticmethod
def add_tasks(plan: Dict, tasks_data: List[Dict]) -> Dict:
for tinfo in tasks_data:
desc = tinfo.get("description", "Untitled Task")
status = tinfo.get("status", "pending")
agent = tinfo.get("agent", "")
notes = tinfo.get("notes", "")
eval_ = tinfo.get("evaluation", "")
task = PlanningStateHandler.create_task(desc, status, agent, notes, eval_)
plan["tasks"].append(task)
plan["updated_at"] = PlanningStateHandler._now()
return plan
@staticmethod
def update_task(plan: Dict,
by_id: Optional[str] = None,
new_desc: Optional[str] = None,
new_status: Optional[str] = None,
new_agent: Optional[str] = None,
new_notes: Optional[str] = None,
new_evaluation: Optional[str] = None) -> Dict:
"""
Update a task identified by by_id.
"""
if not by_id:
raise ValueError("Must provide 'by_id' to update a task.")
task = next((t for t in plan["tasks"] if t["id"] == by_id), None)
if not task:
raise ValueError("No matching task found with the given ID.")
if new_desc is not None:
task["description"] = new_desc.strip()
if new_status is not None:
task["status"] = new_status.strip()
if new_agent is not None:
task["agent"] = new_agent.strip()
if new_notes is not None:
task["notes"] = new_notes.strip()
if new_evaluation is not None:
task["evaluation"] = new_evaluation.strip()
plan["updated_at"] = PlanningStateHandler._now()
# Determine overall plan status
if any(t["status"] == "in_progress" for t in plan["tasks"]):
plan["status"] = "in_progress"
if all(t["status"] == "completed" for t in plan["tasks"]) and plan["tasks"]:
plan["status"] = "completed"
return plan
@staticmethod
def set_current_task(plan: Dict, task_id: str) -> Dict:
found = any(t["id"] == task_id for t in plan["tasks"])
if not found:
raise ValueError("Task ID not found in plan.")
plan["current_task_id"] = task_id
plan["updated_at"] = PlanningStateHandler._now()
return plan
@staticmethod
def finish_plan(plan: Dict) -> Dict:
"""
Forcefully mark the plan as completed.
"""
plan["status"] = "completed"
plan["updated_at"] = PlanningStateHandler._now()
return plan
================================================
FILE: core/agents/react_based_supervisor/simple_planning_tool.py
================================================
import json
from typing import Dict, List, Optional
from langchain_core.tools import BaseTool
from core.agents.supervisor.planning_handler import PlanningStateHandler
class SimplePlanningTool(BaseTool):
"""
A tool that manages a single project plan in memory.
It supports creating, viewing, adding tasks, updating tasks, setting the current task,
and finishing the plan. All operations return a JSON string.
"""
name: str = "planning"
description: str = ("Manage a project plan with actions to create, view, add tasks, update tasks, "
"set current task, and finish the plan. All data is stored in JSON.")
def __init__(self):
super().__init__()
self._plan: Optional[Dict] = None
def _run(self, action: str, **kwargs) -> str:
try:
if action == "create_plan":
return self._handle_create_plan(**kwargs)
elif action == "view_plan":
return self._handle_view_plan()
elif action == "add_tasks":
return self._handle_add_tasks(**kwargs)
elif action == "update_task":
return self._handle_update_task(**kwargs)
elif action == "set_current_task":
return self._handle_set_current_task(**kwargs)
elif action == "finish_plan":
return self._handle_finish_plan()
else:
return self._json_error(f"Unknown action: {action}")
except Exception as e:
return self._json_error(str(e))
async def _arun(self, action: str, **kwargs) -> str:
return self._run(action, **kwargs)
def _handle_create_plan(self, **kwargs) -> str:
title = kwargs.get("title", "Untitled Plan")
description = kwargs.get("description", "")
tasks_data = kwargs.get("tasks", [])
new_plan = PlanningStateHandler.create_plan(title, description)
PlanningStateHandler.add_tasks(new_plan, tasks_data)
self._plan = new_plan
return self._json_ok(self._plan)
def _handle_view_plan(self) -> str:
if not self._plan:
self._plan = PlanningStateHandler.create_plan("Untitled", "")
return self._json_ok(self._plan)
def _handle_add_tasks(self, **kwargs) -> str:
if not self._plan:
self._plan = PlanningStateHandler.create_plan("Untitled", "")
tasks_data = kwargs.get("tasks", [])
PlanningStateHandler.add_tasks(self._plan, tasks_data)
return self._json_ok(self._plan)
def _handle_update_task(self, **kwargs) -> str:
if not self._plan:
raise ValueError("No plan exists. Please create a plan first.")
# Use 'by_id' instead of 'task_id'
by_id = kwargs.get("by_id")
new_desc = kwargs.get("description")
new_status = kwargs.get("status")
new_agent = kwargs.get("agent")
new_notes = kwargs.get("notes")
new_evaluation = kwargs.get("evaluation")
updated = PlanningStateHandler.update_task(
self._plan,
by_id=by_id,
new_desc=new_desc,
new_status=new_status,
new_agent=new_agent,
new_notes=new_notes,
new_evaluation=new_evaluation
)
self._plan = updated
return self._json_ok(self._plan)
def _handle_set_current_task(self, **kwargs) -> str:
if not self._plan:
raise ValueError("No plan available to set current task.")
tid = kwargs.get("task_id")
if not tid:
raise ValueError("Must provide 'task_id' for set_current_task.")
PlanningStateHandler.set_current_task(self._plan, tid)
return self._json_ok(self._plan)
def _handle_finish_plan(self) -> str:
if not self._plan:
raise ValueError("No plan exists to finish.")
finished_plan = PlanningStateHandler.finish_plan(self._plan)
self._plan = finished_plan
return self._json_ok(finished_plan)
def _json_ok(self, plan_data: Dict) -> str:
return json.dumps({"ok": True, "plan": plan_data}, ensure_ascii=False, indent=2)
def _json_error(self, message: str) -> str:
return json.dumps({"ok": False, "error": message}, ensure_ascii=False, indent=2)
================================================
FILE: core/agents/react_based_supervisor/state_schema.py
================================================
from typing import Dict, List, Optional, Any, Literal, TypedDict, Union
from langchain_core.messages import BaseMessage
from langgraph.prebuilt.chat_agent_executor import AgentState
# 定义计划状态类型
PlanningStatus = Literal["not_started", "planning", "executing", "completed", "failed"]
# 定义任务状态类型
TaskStatus = Literal["pending", "in_progress", "completed", "failed"]
# 定义任务项
class Task(TypedDict, total=False):
"""任务项定义
表示计划中的一个任务项,包含任务描述、状态、分配的代理等信息
"""
id: str # 任务唯一标识符
description: str # 任务描述
status: TaskStatus # 任务状态
agent: Optional[str] # 分配的代理名称
created_at: str # 创建时间
updated_at: str # 更新时间
completed_at: Optional[str] # 完成时间
dependencies: Optional[List[str]] # 依赖的任务ID列表
notes: Optional[str] # 任务备注
# 定义计划
class Plan(TypedDict, total=False):
"""计划定义
表示一个完整的计划,包含计划状态、任务列表等信息
"""
status: PlanningStatus # 计划状态
tasks: List[Task] # 任务列表
current_task_id: Optional[str] # 当前执行的任务ID
created_at: str # 创建时间
updated_at: str # 更新时间
completed_at: Optional[str] # 完成时间
title: Optional[str] # 计划标题
description: Optional[str] # 计划描述
# 扩展AgentState以支持计划功能
class PlanningAgentState(AgentState):
"""支持计划功能的代理状态
扩展了AgentState,添加了plan字段用于存储计划信息
"""
plan: Optional[Plan] = None
================================================
FILE: core/agents/react_based_supervisor/supervisor.py
================================================
import inspect
from typing import Any, Callable, Literal, Optional, Type, Union, Dict, Optional
from langchain_core.language_models import BaseChatModel, LanguageModelLike
from langchain_core.tools import BaseTool
from langgraph.graph import END, START, StateGraph
from langgraph.prebuilt.chat_agent_executor import (
create_react_agent,
AgentState,
Prompt,
StateSchemaType,
StructuredResponseSchema,
)
from langgraph.pregel import Pregel
from langgraph.utils.runnable import RunnableCallable
from core.agents.base.react_agent import ReactAgent
from core.agents.supervisor.agent_name import AgentNameMode, with_agent_name
from core.agents.supervisor.handoff import (
create_handoff_back_messages,
create_handoff_tool,
)
OutputMode = Literal["full_history", "last_message"]
"""Mode for adding agent outputs to the message history in the multi-agent workflow
- `full_history`: add the entire agent message history
- `last_message`: add only the last message
"""
MODELS_NO_PARALLEL_TOOL_CALLS = {"o3-mini"}
def _supports_disable_parallel_tool_calls(model: LanguageModelLike) -> bool:
if not isinstance(model, BaseChatModel):
return False
if hasattr(model, "model_name") and model.model_name in MODELS_NO_PARALLEL_TOOL_CALLS:
return False
if not hasattr(model, "bind_tools"):
return False
if "parallel_tool_calls" not in inspect.signature(model.bind_tools).parameters:
return False
return True
def _make_call_agent(
agent: Pregel,
output_mode: OutputMode,
add_handoff_back_messages: bool,
supervisor_name: str,
) -> Callable[[dict], dict] | RunnableCallable:
if output_mode not in OutputMode.__args__:
raise ValueError(
f"Invalid agent output mode: {output_mode}. Needs to be one of {OutputMode.__args__}"
)
def _process_output(output: dict) -> dict:
messages = output["messages"]
if output_mode == "full_history":
pass
elif output_mode == "last_message":
messages = messages[-1:]
else:
raise ValueError(
f"Invalid agent output mode: {output_mode}. "
f"Needs to be one of {OutputMode.__args__}"
)
if add_handoff_back_messages:
messages.extend(create_handoff_back_messages(agent.name, supervisor_name))
return {
**output,
"messages": messages,
}
def call_agent(state: dict) -> dict:
#print(f"🟡 [Sync invoke] Handoff to agent '{agent.name}' with state keys: {list(state.keys())}")
output = agent.invoke(state)
#print(f"✅ [Sync invoke] Agent '{agent.name}' completed.")
return _process_output(output)
async def acall_agent(state: dict) -> dict:
#print(f"🟡 [Async invoke] Handoff to agent '{agent.name}' with state keys: {list(state.keys())}")
output = await agent.ainvoke(state)
#print(f"✅ [Async invoke] Agent '{agent.name}' completed.")
return _process_output(output)
return RunnableCallable(call_agent, acall_agent)
def create_supervisor(
agents: list[Pregel],
*,
model: LanguageModelLike,
tools: list[BaseTool | Callable] | None = None,
prompt: Prompt | None = None,
response_format: Optional[
Union[StructuredResponseSchema, tuple[str, StructuredResponseSchema]]
] = None,
state_schema: StateSchemaType = AgentState,
config_schema: Type[Any] | None = None,
output_mode: OutputMode = "last_message",
add_handoff_back_messages: bool = True,
supervisor_name: str = "supervisor",
include_agent_name: AgentNameMode | None = None,
) -> StateGraph:
"""Create a multi-agent supervisor.
Args:
agents: List of agents to manage
model: Language model to use for the supervisor
tools: Tools to use for the supervisor
prompt: Optional prompt to use for the supervisor. Can be one of:
- str: This is converted to a SystemMessage and added to the beginning of the list of messages in state["messages"].
- SystemMessage: this is added to the beginning of the list of messages in state["messages"].
- Callable: This function should take in full graph state and the output is then passed to the language model.
- Runnable: This runnable should take in full graph state and the output is then passed to the language model.
response_format: An optional schema for the final supervisor output.
If provided, output will be formatted to match the given schema and returned in the 'structured_response' state key.
If not provided, `structured_response` will not be present in the output state.
Can be passed in as:
- an OpenAI function/tool schema,
- a JSON Schema,
- a TypedDict class,
- or a Pydantic class.
- a tuple (prompt, schema), where schema is one of the above.
The prompt will be used together with the model that is being used to generate the structured response.
!!! Important
`response_format` requires the model to support `.with_structured_output`
!!! Note
`response_format` requires `structured_response` key in your state schema.
You can use the prebuilt `langgraph.prebuilt.chat_agent_executor.AgentStateWithStructuredResponse`.
state_schema: State schema to use for the supervisor graph.
config_schema: An optional schema for configuration.
Use this to expose configurable parameters via supervisor.config_specs.
output_mode: Mode for adding managed agents' outputs to the message history in the multi-agent workflow.
Can be one of:
- `full_history`: add the entire agent message history
- `last_message`: add only the last message (default)
add_handoff_back_messages: Whether to add a pair of (AIMessage, ToolMessage) to the message history
when returning control to the supervisor to indicate that a handoff has occurred.
supervisor_name: Name of the supervisor node.
include_agent_name: Use to specify how to expose the agent name to the underlying supervisor LLM.
- None: Relies on the LLM provider using the name attribute on the AI message. Currently, only OpenAI supports this.
- "inline": Add the agent name directly into the content field of the AI message using XML-style tags.
Example: "How can I help you" -> "<name>agent_name</name><content>How can I help you?</content>"
"""
agent_names = set()
for agent in agents:
if agent.name is None or agent.name == "LangGraph":
raise ValueError(
"Please specify a name when you create your agent, either via `create_react_agent(..., name=agent_name)` "
"or via `graph.compile(name=name)`."
)
if agent.name in agent_names:
raise ValueError(
f"Agent with name '{agent.name}' already exists. Agent names must be unique."
)
agent_names.add(agent.name)
handoff_tools = [create_handoff_tool(agent_name=agent.name) for agent in agents]
all_tools = (tools or []) + handoff_tools
if _supports_disable_parallel_tool_calls(model):
model = model.bind_tools(all_tools, parallel_tool_calls=False)
else:
model = model.bind_tools(all_tools)
if include_agent_name:
model = with_agent_name(model, include_agent_name)
supervisor = create_react_agent(
name=supervisor_name,
model=model,
tools=all_tools,
prompt=prompt,
state_schema=state_schema,
response_format=response_format,
debug=False,
)
# Build the multi-agent supervisor graph using the langgraph StateGraph setup
builder = StateGraph(state_schema, config_schema=config_schema)
builder.add_node(supervisor, destinations=tuple(agent_names) + (END,))
builder.add_edge(START, supervisor.name)
for agent in agents:
# If agent is a "ReactAgent" or similar
if hasattr(agent, "get_agent") and callable(agent.get_agent):
agent = agent.get_agent() # retrieve the compiled subgraph
builder.add_node(
agent.name,
_make_call_agent(
agent,
output_mode,
add_handoff_back_messages,
supervisor_name,
),
)
builder.add_edge(agent.name, supervisor.name)
return builder
================================================
FILE: core/agents/react_supervisor_agent.py
================================================
from typing import Any, Callable, Dict, List, Optional, Union
import re
from langchain_core.language_models import LanguageModelLike
from langchain_core.tools import BaseTool
from langgraph.graph import StateGraph
from langgraph.graph.state import CompiledStateGraph
from langgraph.types import Checkpointer
from langgraph.prebuilt.chat_agent_executor import (
AgentState,
StateSchemaType,
)
from langgraph.utils.runnable import RunnableCallable
from core.agents.react_based_supervisor import create_supervisor
from core.agents.react_based_supervisor.simple_planning_tool import SimplePlanningTool
from core.agents.base.base_agent import BaseAgent
from core.agents.react_based_supervisor.state_schema import PlanningAgentState
import logging
logger = logging.getLogger(__name__)
class SupervisorAgent(BaseAgent):
"""Supervisor class for managing multiple agents with planning capabilities.
This class provides a high-level interface for creating a supervisor workflow
that can manage and coordinate multiple agents. It also includes planning capabilities
to create and manage a plan for complex tasks using a state-driven approach.
The planning functionality is implemented using PlanningStateHandler and PlanningTool,
which provide a more structured and flexible way to manage tasks compared to the
previous TodolistTool approach.
"""
_PROMPT_TEMPLATE = """You are a Supervisor Agent. Your job is to analyze user requests and coordinate multiple agents to complete tasks.
## Task Approach Methodology
### Understanding Requirements
- Analyzing user requests to identify core needs
- Asking clarifying questions when requirements are ambiguous
- Breaking down complex requests into manageable components
- Identifying potential challenges before beginning work
### Coordination
- Identifying appropriate agents for each task
- Delegating tasks to specialized agents
- Tracking progress and ensuring task completion
- Synthesizing information from multiple agents
Remember: Effective coordination is essential for successful task completion. Take time to understand the request and delegate appropriately.
{tools}
"""
_PLANNING_PROMPT_TEMPLATE = """You are a Supervisor agent. Your role is to analyze user requests, break them down into actionable tasks, and coordinate specialized agents (e.g., research_expert, coder_expert, reporter_expert) to complete them.
# Working with Complex Requests
1. FIRST, carefully analyze the user's request and break it down into clear, actionable tasks
2. Identify which agent is best suited for each part of the task
3. Use the handoff tools to delegate tasks to appropriate agents ONE AT A TIME
4. WAIT for each agent to COMPLETELY FINISH their assigned task before proceeding
5. Review the output from each agent before delegating the next task
6. Maintain a sequential workflow - never delegate multiple tasks simultaneously
7. Synthesize the results and provide a coherent response to the user
8. Provide a final summary when all tasks are done
"""
_PLANNING_TOOL_TEMPLATE = """
# Planning Tool Instructions
You have access to a "planning" tool that uses JSON for all operations. Do NOT include any "state" field in your calls. Use the following actions exactly as defined:
1. "create_plan": Create a new plan.
- Required fields:
- title (string)
- description (string)
- tasks (list of task objects). Each task object must include:
"description": string,
"status": "pending" (all tasks must have "status": "pending" initially),
"agent": string (empty if not assigned),
"notes": string (empty if none),
"evaluation": string (empty if none)
- Example:
{
"action": "create_plan",
"title": "Python Scraper for Tech News",
"description": "Build a Python scraper to fetch the latest tech news and save it as CSV",
"tasks": [
{"description": "Research Python scraping libraries", "status": "pending", "agent": "", "notes": "", "evaluation": ""},
{"description": "Implement the scraper", "status": "pending", "agent": "", "notes": "", "evaluation": ""},
{"description": "Test the code", "status": "pending", "agent": "", "notes": "", "evaluation": ""}
]
}
2. "view_plan": Retrieve the current plan.
- Example:
{
"action": "view_plan"
}
3. "add_tasks": Add additional tasks to the plan.
- Required:
- tasks: list of task objects (same format as above)
- Example:
{
"action": "add_tasks",
"tasks": [
{"description": "Write documentation", "status": "pending", "agent": "", "notes": "", "evaluation": ""}
]
}
4. "update_task": Update an existing task.
- Identify the task by "by_id" (the task's unique ID from the plan).
- You may update any of: "description", "status", "agent", "notes", "evaluation".
- Example:
{
"action": "update_task",
"by_id": "TASK-UUID",
"status": "completed",
"evaluation": "The scraper works perfectly."
}
5. "set_current_task": Set the current task by its ID.
- Example:
{
"action": "set_current_task",
"task_id": "TASK-UUID"
}
6. "finish_plan": Mark the entire plan as completed.
- Example:
{
"action": "finish_plan"
}
Important:
- Always produce valid JSON for your tool calls.
- Continuously update and monitor the plan until every task's status is "completed" before delivering your final answer.
- If the plan is not fully completed, do not stop; keep updating the plan with appropriate calls.
"""
def __init__(
self,
agents: List[BaseAgent],
model: LanguageModelLike,
tools: Optional[List[Union[BaseTool, Callable]]] = None,
prompt: Optional[str] = None,
state_schema: StateSchemaType = AgentState,
supervisor_name: str = "supervisor",
checkpointer: Optional[Checkpointer] = None,
output_mode: str = "last_message", # * full_history or last_message *
enable_planning: bool = True, # * True or False *
):
"""Initialize a supervisor.
Args:
agents: List of agents to manage
model: Language model to use for the supervisor
tools: Optional list of tools available to the supervisor
prompt: Optional prompt to use for the supervisor
state_schema: State schema to use for the supervisor graph
supervisor_name: Name of the supervisor node
checkpointer: Optional checkpointer to use for the supervisor
output_mode: Mode for adding agent outputs to the message history
("full_history" or "last_message")
enable_planning: Whether to enable planning capabilities
auto_planning: Whether to automatically generate plans for complex tasks
"""
# 设置规划相关属性
self._enable_planning = enable_planning
# 如果启用规划功能,设置状态模式为PlanningAgentState
if self._enable_planning and state_schema == AgentState:
state_schema = PlanningAgentState
# Store agent-specific attributes before super().__init__
self.agents = agents
self.output_mode = output_mode
self.supervisor_name = supervisor_name
self.state_schema = state_schema
self.checkpointer = checkpointer
self.tools = tools or []
self._workflow = None
# 生成基础提示词
# _agents_prompt = self._generate_agents_prompt()
_final_prompt = self._PLANNING_PROMPT_TEMPLATE + "/n/n" + self._PLANNING_TOOL_TEMPLATE if self._enable_planning else self._PROMPT_TEMPLATE
if tools is None:
tools = []
# 如果启用规划功能,添加规划提示模板并添加规划工具
if self._enable_planning:
tools.append(SimplePlanningTool())
# 初始化BaseAgent父类
super().__init__(
name=supervisor_name,
model=model,
tools=tools,
checkpointer=checkpointer,
prompt=_final_prompt,
)
def build(self) -> StateGraph:
"""Build the supervisor workflow.
Returns:
The built StateGraph
"""
if self._workflow is not None:
return self._workflow
self._workflow = create_supervisor(
agents=self.agents,
model=self.model,
tools=self.tools,
prompt=self.base_prompt,
state_schema=self.state_schema,
supervisor_name=self.supervisor_name,
output_mode=self.output_mode,
)
return self._workflow
================================================
FILE: core/agents/sb_supervisor_agent.py
================================================
# reason_graph/supervisor_agent.py
from typing import Callable, List, Optional, Union, cast, Literal
from langchain_core.language_models import LanguageModelLike
from langchain_core.tools import BaseTool
from langgraph.graph import StateGraph
from langgraph.types import Checkpointer
# 内部导入
from core.agents.base.base_agent import BaseAgent
from core.agents.state_based_supervisor.state_schema import PlanningAgentState, StateSchemaType # 导入 PlanningAgentState
# 导入重构后的 create_supervisor 函数
from core.agents.state_based_supervisor.supervisor_graph import create_supervisor
from core.agents.state_based_supervisor.agent_name import AgentNameMode
import logging
logger = logging.getLogger(__name__)
class SupervisorAgent(BaseAgent):
"""
Supervisor Agent 类 (最终版)
负责协调子 Agent 并管理规划 (使用状态驱动方法)。
invoke/ainvoke 继承自 BaseAgent,负责完整流程。
"""
def __init__(
self,
agents: List[BaseAgent], # 子 Agent 实例列表
model: LanguageModelLike, # Supervisor 使用的 LLM
tools: Optional[List[Union[BaseTool, Callable]]] = None, # Supervisor 特有工具
state_schema: StateSchemaType = PlanningAgentState,
supervisor_name: str = "supervisor",
checkpointer: Optional[Checkpointer] = None,
output_mode: str = "last_message",
# enable_planning: bool = True, # 不再需要,强制使用 Planning
include_agent_name: Optional[str] = "inline",
# BaseAgent 参数
max_context_messages: Optional[int] = None,
max_context_tokens: Optional[int] = None,
model_name: Optional[str] = None,
):
"""初始化 Supervisor Agent"""
if state_schema != PlanningAgentState:
print("Warning: SupervisorAgent forces state_schema to PlanningAgentState.")
state_schema = PlanningAgentState
self.sub_agents = agents
self.output_mode = output_mode
self.include_agent_name = cast(Optional[AgentNameMode], include_agent_name)
# 初始化 BaseAgent 父类
super().__init__(
name=supervisor_name,
model=model,
tools=tools or [],
checkpointer=checkpointer,
prompt=None, # 核心 Prompt 在 supervisor_node_logic 中处理
max_context_messages=max_context_messages,
max_context_tokens=max_context_tokens,
model_name=model_name,
)
# _workflow_definition 和 _executable_agent 由 BaseAgent 管理
def build(self) -> Optional[StateGraph]:
"""构建 Supervisor 的 LangGraph 工作流图定义。"""
# 调用重构后的 create_supervisor 函数来获取 StateGraph 定义
# 这个 StateGraph 包含了手写的 supervisor_node_logic
if self._workflow: return self._workflow
print(f"Building supervisor graph definition for '{self.name}'...")
try:
graph_definition = create_supervisor(
model=self.model,
sub_agents=self.sub_agents,
state_schema=PlanningAgentState, # 强制使用
tools=self.tools,
output_mode=cast(Literal["full_history", "last_message"], self.output_mode),
supervisor_name=self.name,
include_agent_name=self.include_agent_name,
)
self._workflow = graph_definition # 存储图定义
print(f"Supervisor graph definition built for '{self.name}'.")
return self._workflow
except Exception as e:
print(f"!!! Error building supervisor graph definition '{self.name}': {e}")
import traceback
traceback.print_exc()
self._workflow = None
raise e
# compile 方法继承自 BaseAgent
# 它会调用上面的 build() 获取 StateGraph 定义,然后编译它,
# 并创建包含预处理步骤的最终 _executable_agent
# invoke, ainvoke, get_agent, reset 继承自 BaseAgent
================================================
FILE: core/agents/state_based_supervisor/__init__.py
================================================
================================================
FILE: core/agents/state_based_supervisor/agent_name.py
================================================
import re
from typing import Literal
from langchain_core.language_models import LanguageModelLike
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.runnables import RunnableLambda
NAME_PATTERN = re.compile(r"<name>(.*?)</name>", re.DOTALL)
CONTENT_PATTERN = re.compile(r"<content>(.*?)</content>", re.DOTALL)
AgentNameMode = Literal["inline"]
def _is_content_blocks_content(content: list[dict] | str) -> bool:
return (
isinstance(content, list)
and len(content) > 0
and isinstance(content[0], dict)
and "type" in content[0]
)
def add_inline_agent_name(message: BaseMessage) -> BaseMessage:
"""Add name and content XML tags to the message content.
Examples:
>>> add_inline_agent_name(AIMessage(content="Hello", name="assistant"))
AIMessage(content="<name>assistant</name><content>Hello</content>", name="assistant")
>>> add_inline_agent_name(AIMessage(content=[{"type": "text", "text": "Hello"}], name="assistant"))
AIMessage(content=[{"type": "text", "text": "<name>assistant</name><content>Hello</content>"}], name="assistant")
"""
if not isinstance(message, AIMessage) or not message.name:
return message
formatted_message = message.model_copy()
if _is_content_blocks_content(formatted_message.content):
text_blocks = [block for block in message.content if block["type"] == "text"]
non_text_blocks = [block for block in message.content if block["type"] != "text"]
content = text_blocks[0]["text"] if text_blocks else ""
formatted_content = f"<name>{message.name}</name><content>{content}</content>"
formatted_message.content = non_text_blocks + [{"type": "text", "text": formatted_content}]
else:
formatted_message.content = (
f"<name>{message.name}</name><content>{formatted_message.content}</content>"
)
return formatted_message
def remove_inline_agent_name(message: BaseMessage) -> BaseMessage:
"""Remove explicit name and content XML tags from the AI message content.
Examples:
>>> remove_inline_agent_name(AIMessage(content="<name>assistant</name><content>Hello</content>", name="assistant"))
AIMessage(content="Hello", name="assistant")
>>> remove_inline_agent_name(AIMessage(content=[{"type": "text", "text": "<name>assistant</name><content>Hello</content>"}], name="assistant"))
AIMessage(content=[{"type": "text", "text": "Hello"}], name="assistant")
"""
if not isinstance(message, AIMessage) or not message.name:
return message
is_content_blocks_content = _is_content_blocks_content(message.content)
if is_content_blocks_content:
text_blocks = [block for block in message.content if block["type"] == "text"]
if not text_blocks:
return message
non_text_blocks = [block for block in message.content if block["type"] != "text"]
content = text_blocks[0]["text"]
else:
content = message.content
name_match: re.Match | None = NAME_PATTERN.search(content)
content_match: re.Match | None = CONTENT_PATTERN.search(content)
if not name_match or not content_match:
return message
if name_match.group(1) != message.name:
return message
parsed_content = content_match.group(1)
parsed_message = message.model_copy()
if is_content_blocks_content:
content_blocks = non_text_blocks
if parsed_content:
content_blocks.append({"type": "text", "text": parsed_content})
parsed_message.content = content_blocks
else:
parsed_message.content = parsed_content
return parsed_message
def with_agent_name(
model: LanguageModelLike,
agent_name_mode: AgentNameMode,
) -> LanguageModelLike:
"""Attach formatted agent names to the messages passed to and from a language model.
This is useful for making a message history with multiple agents more coherent.
NOTE: agent name is consumed from the message.name field.
If you're using an agent built with create_react_agent, name is automatically set.
If you're building a custom agent, make sure to set the name on the AI message returned by the LLM.
Args:
model: Language model to add agent name formatting to.
agent_name_mode: Use to specify how to expose the agent name to the LLM.
- "inline": Add the agent name directly into the content field of the AI message using XML-style tags.
Example: "How can I help you" -> "<name>agent_name</name><content>How can I help you?</content>".
"""
if agent_name_mode == "inline":
process_input_message = add_inline_agent_name
process_output_message = remove_inline_agent_name
else:
raise ValueError(
f"Invalid agent name mode: {agent_name_mode}. Needs to be one of: {AgentNameMode.__args__}"
)
def process_input_messages(messages: list[BaseMessage]) -> list[BaseMessage]:
return [process_input_message(message) for message in messages]
model = (
process_input_messages
| model
| RunnableLambda(process_output_message, name="process_output_message")
)
return model
================================================
FILE: core/agents/state_based_supervisor/evaluate_result_node.py
================================================
# reason_graph/evaluate_result_node.py
import json
import time
import copy
import traceback
import anyio
from typing import Dict, Any, List, Optional, Union
from langchain_core.messages import BaseMessage, AIMessage, ToolMessage
from langchain_core.runnables import RunnableConfig
# 内部导入 (确保路径正确)
try:
from .state_schema import PlanningAgentState, TaskStatus, Plan, Task
from .planning_handler import PlanningStateHandler
except ImportError as e:
print(f"Error importing modules in evaluate_result_node.py: {e}")
# Fallbacks
class PlanningAgentState(Dict): pass;
class Plan(Dict): pass;
class Task(Dict): pass
TaskStatus = str
class PlanningStateHandler: # Dummy
@staticmethod
def update_task(plan, by_id, **kwargs): return plan
@staticmethod
def set_current_task(plan, task_id): return plan
@staticmethod
def get_task(plan, task_id): return None
@staticmethod
def update_plan_status(plan): return plan
async def evaluate_result_node_logic(state: PlanningAgentState, config: Optional[RunnableConfig] = None) -> Dict[str, Any]:
"""
评估子 Agent 返回结果并更新计划状态的节点逻辑 (异步, 优化评估逻辑)。
"""
print(f"--- Entering Evaluate Result Node ---")
messages: List[BaseMessage] = state.get('messages', [])
plan: Optional[Plan] = state.get('plan')
last_message = messages[-1] if messages else None
error_message: Optional[str] = None
plan_updated: bool = False
updated_plan: Optional[Plan] = copy.deepcopy(plan) if plan else None
if not updated_plan:
print("Evaluate Result Node: No plan found in state. Skipping.")
return {}
current_task_id = updated_plan.get("current_task_id")
if not current_task_id:
# Fallback logic for finding current task (不变)
print("Warning: Evaluate Result Node - No current_task_id found in plan...")
in_progress_tasks = [t for t in updated_plan.get('tasks', []) if t.get('status') == 'in_progress']
if len(in_progress_tasks) == 1: current_task_id = in_progress_tasks[0].get('id'); print(f" Fallback: Found task {current_task_id}")
else: error_message = "Evaluation failed: Cannot determine finished task."; print(f"ERROR: {error_message}"); return {"plan": updated_plan, "error": error_message, "messages": []}
agent_result_content: Optional[str] = None
agent_name: Optional[str] = None
if isinstance(last_message, AIMessage):
agent_result_content = str(last_message.content) if last_message.content is not None else "" # Ensure string
agent_name = last_message.name or "SubAgent"
print(f" Evaluating result from: {agent_name} for task ID: {current_task_id}")
else:
agent_result_content = f"Error: Expected AIMessage result, got {type(last_message).__name__}."
agent_name = "System/Error"
print(f"Warning: Last message not AIMessage. Assuming task failed for {current_task_id}.")
# --- 优化的评估逻辑 ---
new_status: TaskStatus = "completed" # 默认成功
evaluation_notes = f"Result received from {agent_name}."
# 1. 检查是否为空内容 (或只有空白符)
if agent_result_content is None or not agent_result_content.strip():
new_status = "failed"
evaluation_notes = f"Task failed: Agent {agent_name} returned empty content."
print(f" Task {current_task_id} evaluated as FAILED (Empty Result).")
# 2. 检查是否以明确的错误标识开头 (需要工具配合)
# 假设工具出错时会在返回字符串前加上 "Error: " 或 "Execution Failed: "
elif agent_result_content.strip().startswith(("Error:", "Execution Failed:", "Tool Error:")):
new_status = "failed"
evaluation_notes = f"Task failed: Agent {agent_name} reported an error: {agent_result_content[:150]}..."
print(f" Task {current_task_id} evaluated as FAILED (Explicit Error Signal).")
# 3. (可选) 添加其他特定检查,例如检查是否只是"我不明白"之类的回复
elif len(agent_result_content) < 50 and any(kw in agent_result_content.lower() for kw in ["don't know", "cannot fulfill", "无法回答", "不明白"]):
new_status = "failed" # 或 "pending_review" ? 暂时设为 failed
evaluation_notes = f"Task likely failed: Agent {agent_name} indicated inability to fulfill request."
print(f" Task {current_task_id} evaluated as FAILED (Agent Indicated Inability).")
else:
# 如果以上都不是,则认为是成功
new_status = "completed"
print(f" Task {current_task_id} evaluated as COMPLETED.")
# --- 评估逻辑结束 ---
# --- 更新 Plan 状态 (逻辑不变) ---
try:
update_kwargs = {
"new_status": new_status,
"new_evaluation": evaluation_notes,
"new_notes": agent_result_content[:1000] + "..." if agent_result_content and len(agent_result_content) > 1000 else agent_result_content
}
print(f" Updating task {current_task_id} with: {{'status': '{new_status}', ...}}")
if updated_plan and PlanningStateHandler.get_task(updated_plan, current_task_id):
updated_plan = PlanningStateHandler.update_task(updated_plan, by_id=current_task_id, **update_kwargs)
updated_plan = PlanningStateHandler.set_current_task(updated_plan, None)
updated_plan = PlanningStateHandler.update_plan_status(updated_plan)
print(f" Plan status after evaluation update: {updated_plan.get('status')}")
plan_updated = True
else:
raise ValueError(f"Task ID '{current_task_id}' not found or plan invalid before update.")
except ValueError as ve: error_message = f"Error updating plan: {ve}"; print(f"ERROR: {error_message}"); traceback.print_exc()
except Exception as e: error_message = f"Unexpected error updating plan: {e}"; print(f"ERROR: {error_message}"); traceback.print_exc()
# --- 准备返回字典 (逻辑不变) ---
updates: Dict[str, Any] = {}
if updated_plan is not None: updates["plan"] = updated_plan
elif plan is not None: updates["plan"] = plan
# 记录本节点错误,或清除旧错误
current_state_error = state.get("error")
if error_message: updates["error"] = error_message
elif current_state_error: updates["error"] = None
updates["messages"] = [] # Evaluator 不添加消息
print(f"--- Exiting Evaluate Result Node. Plan updated: {plan_updated} ---")
return updates
# --- 同步包装器 (保持不变) ---
def evaluate_result_node_logic_sync(state: PlanningAgentState, config: Optional[RunnableConfig] = None) -> Dict[str, Any]:
"""evaluate_result_node_logic 的同步包装器"""
print(f"--- Entering Evaluate Result Node (Sync Wrapper) ---")
try:
import anyio
return anyio.run(evaluate_result_node_logic, state, config) # type: ignore
except Exception as e:
print(f"Error running evaluate_result_node_logic synchronously: {e}")
traceback.print_exc()
return {"error": f"Evaluate Result sync execution failed: {e}", "plan": state.get("plan"), "messages": []}
================================================
FILE: core/agents/state_based_supervisor/handoff.py
================================================
# reason_graph/handoff.py
# (Paste the code user provided for handoff.py here)
import re
import uuid
from typing import List, Tuple # Import Tuple
from langchain_core.messages import AIMessage, ToolCall, ToolMessage, BaseMessage # Import BaseMessage
from langchain_core.tools import BaseTool, InjectedToolCallId, tool
from langgraph.prebuilt import InjectedState
from langgraph.types import Command
from typing_extensions import Annotated
WHITESPACE_RE = re.compile(r"\s+")
def _normalize_agent_name(agent_name: str) -> str:
"""Normalize an agent name to be used inside the tool name."""
if not agent_name: return "unknown_agent"
return WHITESPACE_RE.sub("_", agent_name.strip()).lower()
# Note: The original code uses @tool decorator which requires function arguments.
# To inject state, the decorated function needs the Annotated state argument.
# Let's define the function first and then apply the decorator, or use functools.partial.
# Using the function approach first for clarity.
def _handoff_to_agent_implementation(
state: Annotated[dict, InjectedState], # Inject state here
tool_call_id: Annotated[str, InjectedToolCallId], # Inject tool_call_id
target_agent_name: str, # Pass the target agent name
tool_name: str # Pass the specific tool name for the ToolMessage
) -> Command:
"""Ask another agent for help. This is the core logic."""
# Create the ToolMessage confirming the handoff BEFORE generating the Command
"""Handoff 核心逻辑,添加日志"""
print(f"\n--- DEBUG: Entering _handoff_to_agent_implementation ---")
print(f" - Target Agent: {target_agent_name}")
print(f" - Tool Name: {tool_name}")
print(f" - Tool Call ID: {tool_call_id}")
# print(f" - Current State Keys: {list(state.keys())}") # 可选:打印状态键
tool_message = ToolMessage(
content=f"Okay, handing off to {target_agent_name}. The current state and task context have been passed.",
name=tool_name,
tool_call_id=tool_call_id,
)
print(f" - Created ToolMessage: ID={tool_message.tool_call_id}, Name={tool_message.name}")
# The Command tells LangGraph to route to the target agent node
# It also includes the ToolMessage in the state update for the next step
command_obj = Command(
goto=target_agent_name,
# graph=Command.PARENT, # PARENT is default, usually not needed unless nested graphs
update={"messages": [tool_message]}, # Return only the NEW message to be added
)
print(f" - Created Command: goto='{command_obj.goto}', update contains {len(command_obj.update.get('messages',[]))} message(s)")
print(f"--- DEBUG: Exiting _handoff_to_agent_implementation ---")
return command_obj
def create_handoff_tool(*, agent_name: str) -> BaseTool:
"""Create a tool that can handoff control to the requested agent."""
if not agent_name:
raise ValueError("agent_name cannot be empty for create_handoff_tool")
normalized_name = _normalize_agent_name(agent_name)
tool_name = f"transfer_to_{normalized_name}"
# Use functools.partial to fix the target_agent_name and tool_name arguments
import functools
specific_handoff_logic = functools.partial(
_handoff_to_agent_implementation,
target_agent_name=agent_name,
tool_name=tool_name
)
# Decorate the partial function
# The arguments 'state' and 'tool_call_id' will be automatically injected by LangGraph
# when the tool is called due to the Annotations used in _handoff_to_agent_implementation
@tool(tool_name)
def handoff_tool_wrapper(
state: Annotated[dict, InjectedState],
tool_call_id: Annotated[str, InjectedToolCallId]
) -> Command:
"""Dynamically generated tool description: Ask the '{agent_name}' agent for help with the current task or question."""
# --- 添加 Debug 日志 ---
print(f"\n--- DEBUG: Handoff Tool '{tool_name}' (wrapper) CALLED ---")
# ---
return specific_handoff_logic(state=state, tool_call_id=tool_call_id) # type: ignore
# Set a more descriptive description
handoff_tool_wrapper.description = f"Use this tool to delegate the current task or ask a question to the '{agent_name}' agent. Pass the necessary context or instructions in your reasoning before calling this tool."
return handoff_tool_wrapper
def create_handoff_back_messages(
agent_name: str, supervisor_name: str
) -> Tuple[AIMessage, ToolMessage]:
"""Create a pair of (AIMessage, ToolMessage) to add to the message history when returning control to the supervisor."""
tool_call_id = str(uuid.uuid4())
# Although no tool exists for transferring back, we simulate the pattern
# The AIMessage signals intent, the ToolMessage confirms the transition occurred in the graph logic
simulated_tool_name = f"transfer_back_to_{_normalize_agent_name(supervisor_name)}"
# The AIMessage contains the *final output* of the sub-agent in its content field
# It should also indicate the intent to hand back, though the graph logic forces this anyway.
# The content here is just a placeholder - the actual content comes from the agent's final response.
ai_message_content = f"Task completed. Transferring back to {supervisor_name}."
# We still generate a ToolCall structure for consistency in the AIMessage, even if no real tool is called on supervisor side for hand-back.
tool_calls = [ToolCall(name=simulated_tool_name, args={}, id=tool_call_id)]
# Create the AIMessage - crucial to include the sub-agent's name
ai_message = AIMessage(
content=ai_message_content, # Placeholder - see note below
tool_calls=tool_calls,
name=agent_name, # Identify which agent is responding
)
# The ToolMessage confirms the transition happened from the graph's perspective
tool_message = ToolMessage(
content=f"Successfully transferred back to {supervisor_name} from {agent_name}.",
name=simulated_tool_name,
tool_call_id=tool_call_id,
)
# IMPORTANT NOTE: The `_make_call_agent` helper function should populate the
# `ai_message.content` with the *actual* final response message(s) from the sub-agent,
# replacing the placeholder content above. It keeps the tool_calls structure.
# The code provided for `_make_call_agent` seems to handle extracting `output['messages']`.
# We need to ensure it correctly structures the AIMessage part of the tuple returned here.
# Let's refine create_handoff_back_messages to just create the ToolMessage,
# as the AIMessage content comes from the sub-agent's actual final output.
# Refined approach: _make_call_agent gets the final AI response, we only need the ToolMessage here?
# No, the pattern expects both. Let's assume _make_call_agent takes the *last* message from the
# sub-agent's output and packages it into this AIMessage structure.
return ai_message, tool_message # Return both for the standard pattern
================================================
FILE: core/agents/state_based_supervisor/planner_node.py
================================================
import re
import json
import time
import copy
import ast
import traceback
import anyio # <--- 导入 anyio
from typing import Dict, Any, List, Optional, Union
from datetime import datetime
from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage
from langchain_core.runnables import RunnableConfig
# 内部导入
try:
from .state_schema import PlanningAgentState, Plan
from .planning_handler import PlanningStateHandler
from .prompt import PLANNER_SYSTEM_PROMPT_TEMPLATE
except ImportError as e:
print(f"Error importing modules in planner_node.py: {e}")
class PlanningAgentState(Dict): pass;
class Plan(Dict): pass;
class PlanningStateHandler: pass
PLANNER_SYSTEM_PROMPT_TEMPLATE = "Fallback Planner Prompt: Error loading template. Args: {agent_descriptions}"
# --- Planner 节点核心逻辑 (异步) ---
async def planner_node_logic(
state: PlanningAgentState,
config: Optional[RunnableConfig],
model: Any, # Planner 使用的 LLM
agent_description_map: Dict[str, str] # 需要 Agent 描述来分配任务
) -> Dict[str, Any]:
"""Planner 节点逻辑:分析请求,生成初始计划"""
print(f"--- Entering Planner Node ---")
messages: List[BaseMessage] = state.get('messages', [])
# Planner 通常在 plan 为空时运行
plan: Optional[Plan] = state.get('plan')
if plan:
print("Planner Node: Plan already exists. Skipping plan creation.")
# 如果计划已存在,Planner 不应再执行,直接返回当前状态?
# 或者返回一个空更新,让图流向 Supervisor?
# 返回空更新更安全,让 Supervisor 继续
return {} # 返回空字典,状态不变
if not messages:
print("Planner Node: No messages found to create a plan from.")
return {"error": "Planner received no messages."}
# --- 1. 准备 Planner Prompt ---
# Planner 只需要 Agent 描述,不需要 plan_json 或 current_date?
# 可以让它知道日期
desc_list = [f"- {name}: {desc}" for name, desc in agent_description_map.items()]
agent_descriptions_str = "\n".join(desc_list)
current_date_str = datetime.now().strftime("%a, %b %d, %Y") # Planner 也可能需要日期
system_prompt_text = "Error: Planner prompt template could not be loaded/formatted."
try:
# 加载 Planner 的模板
from .prompt import PLANNER_SYSTEM_PROMPT_TEMPLATE
system_prompt_text = PLANNER_SYSTEM_PROMPT_TEMPLATE.format(
agent_descriptions=agent_descriptions_str,
# 如果 Planner Prompt 需要日期:
current_date=current_date_str
)
except ImportError: print("ERROR: Could not import PLANNER_SYSTEM_PROMPT_TEMPLATE")
except KeyError as e: print(f"ERROR: Missing key in planner prompt formatting: {e}")
except Exception as e: print(f"ERROR: Unexpected error loading/formatting planner prompt: {e}")
# Planner 的输入只需要 System Prompt 和用户的初始请求(通常是第一条)
# 或者传递最后几条消息?为了简单,先只用第一条 HumanMessage
initial_user_request = next((m for m in messages if isinstance(m, HumanMessage)), None)
if not initial_user_request:
print("Planner Node: No HumanMessage found in initial state.")
return {"error": "Planner did not find initial user request."}
llm_input_messages = [SystemMessage(content=system_prompt_text), initial_user_request]
# --- 2. 调用 Planner LLM ---
print("--- Calling Planner LLM ---")
response: Optional[AIMessage] = None
llm_error_msg: Optional[str] = None
try:
response = await model.ainvoke(llm_input_messages, config=config)
if not isinstance(response, AIMessage): raise TypeError("Planner LLM returned non-AIMessage.")
# Planner 的回复主要是指令,可以不设置 name
print(f"Planner LLM Raw Response Content: {response.content[:300]}...")
# Planner 不应该调用工具
if response.tool_calls: print("Warning: Planner LLM unexpectedly generated tool calls!")
messages_to_add: List[BaseMessage] = [response] # 可以选择是否将 Planner 的思考过程加入 history
except Exception as e:
print(f"!!! Error invoking Planner LLM: {e}"); traceback.print_exc()
llm_error_msg = f"Planner LLM invocation failed: {e}"
messages_to_add = []
response = None
# --- 3. 处理 Planner LLM 回复 (解析 CREATE_PLAN) ---
new_plan: Optional[Plan] = None
plan_updated: bool = False # 标记计划是否在本节点成功创建
directive_error_msg: Optional[str] = None
if response and isinstance(response.content, str):
try:
plan_match = re.search(r"PLAN_UPDATE:\s*CREATE_PLAN\s*(\{.*?\})\s*$", response.content, re.IGNORECASE | re.DOTALL | re.MULTILINE)
if plan_match:
args_json_str = plan_match.group(1)
print(f"Planner directive found: CREATE_PLAN with args: {args_json_str[:100]}...")
try:
args = json.loads(args_json_str)
if not isinstance(args, dict): raise ValueError("Args JSON not a dict.")
title=args.get("title", "Plan"); desc=args.get("description",""); tasks=args.get("tasks",[])
if isinstance(tasks, list) and all(isinstance(t, dict) and 'description' in t for t in tasks):
for task_data in tasks: task_data['status'] = 'pending' # 强制状态
new_plan = PlanningStateHandler.create_plan(title, desc)
new_plan = PlanningStateHandler.add_tasks(new_plan, tasks); plan_updated = True
print("DEBUG: Plan successfully created by Planner node.")
else: raise ValueError("Invalid 'tasks' format (must be list of dicts with 'description').")
except (json.JSONDecodeError, ValueError, KeyError, TypeError) as e:
err_msg = f"Error processing CREATE_PLAN directive: {type(e).__name__} - {e}"
print(err_msg); traceback.print_exc(); directive_error_msg = err_msg
except Exception as e:
err_msg = f"Unexpected error processing CREATE_PLAN: {type(e).__name__} - {e}"
print(err_msg); traceback.print_exc(); directive_error_msg = err_msg
else:
directive_error_msg = "Planner LLM did not output a valid PLAN_UPDATE: CREATE_PLAN directive."
print(f"Warning: {directive_error_msg}")
# 即使没有指令,也可能需要返回 Planner 的回复消息
# 但如果没有 plan,流程可能无法继续,所以记录错误
except Exception as outer_e:
directive_error_msg = f"Error searching for PLAN_UPDATE directive: {outer_e}"
print(f"ERROR: {directive_error_msg}"); traceback.print_exc()
# --- 4. 准备返回的状态更新 ---
updates: Dict[str, Any] = {"messages": messages_to_add} # 添加 Planner 的回复消息
if plan_updated and new_plan:
updates["plan"] = new_plan # 返回新创建的 Plan
final_error = llm_error_msg or directive_error_msg
if final_error: # 记录 Planner 步骤中遇到的第一个错误
updates["error"] = final_error
print(f"--- Exiting Planner Node. Plan created: {plan_updated} ---")
return updates
# --- Planner 节点的同步包装器 (使用 anyio) ---
def planner_node_logic_sync(
state: PlanningAgentState,
config: Optional[RunnableConfig],
model: Any,
agent_description_map: Dict[str, str]
) -> Dict[str, Any]:
"""planner_node_logic 的同步包装器"""
print(f"--- Entering Planner Node (Sync Wrapper) ---")
try:
# 使用 anyio 在同步函数中运行异步函数
return anyio.run( # type: ignore
planner_node_logic, state, config, model, agent_description_map
)
except Exception as e:
print(f"Error running planner_node_logic synchronously: {e}")
traceback.print_exc()
return {"error": f"Planner sync execution failed: {e}", "messages": state.get("messages",[])}
================================================
FILE: core/agents/state_based_supervisor/planning_handler.py
================================================
# reason_graph/planning_handler.py
import uuid
import datetime
from typing import List, Dict, Optional, Any
from .state_schema import TaskStatus, PlanningStatus, Task, Plan # 从 state_schema 导入类型
class PlanningStateHandler:
"""
使用静态方法管理一个表示项目计划的字典。
计划现在存储在 LangGraph 的状态中,此类提供操作该字典的函数。
"""
@staticmethod
def _now() -> str:
return datetime.datetime.now(datetime.timezone.utc).isoformat()
@staticmethod
def _gen_id() -> str:
# 生成更易读的任务 ID (可选)
# return f"task_{str(uuid.uuid4())[:8]}"
return str(uuid.uuid4())
@staticmethod
def create_plan(title: str, description: str) -> Plan:
"""创建一个新的 Plan 字典"""
now = PlanningStateHandler._now()
return Plan(
title=title,
description=description,
status="planning", # 初始状态为规划中
tasks=[],
current_task_id=None,
created_at=now,
updated_at=now,
completed_at=None,
)
@staticmethod
def create_task(description: str,
agent: Optional[str] = None,
dependencies: Optional[List[str]] = None) -> Task:
"""创建一个新的 Task 字典"""
now = PlanningStateHandler._now()
return Task(
id=PlanningStateHandler._gen_id(),
description=description.strip(),
status="pending", # 初始状态为待处理
agent=agent.strip() if agent else None,
created_at=now,
updated_at=now,
completed_at=None,
dependencies=dependencies or [],
notes=None,
evaluation=None,
result=None,
)
@staticmethod
def add_tasks(plan: Plan, tasks_data: List[Dict[str, Any]]) -> Plan:
"""向 Plan 字典中添加任务"""
if not isinstance(plan, dict) or "tasks" not in plan:
raise ValueError("Invalid plan structure provided.")
if not isinstance(tasks_data, list):
raise ValueError("tasks_data must be a list of task dictionaries.")
for tinfo in tasks_data:
desc = tinfo.get("description")
if not desc: continue # 跳过没有描述的任务
agent = tinfo.get("agent")
deps = tinfo.get("dependencies")
task = PlanningStateHandler.create_task(desc, agent, deps)
plan["tasks"].append(task)
# 如果添加任务时计划仍在 planning 阶段,可以转为 ready
if plan.get("status") == "planning":
plan["status"] = "ready"
plan["updated_at"] = PlanningStateHandler._now()
return plan
@staticmethod
def update_task(plan: Plan,
by_id: Optional[str] = None,
new_desc: Optional[str] = None,
new_status: Optional[TaskStatus] = None,
new_agent: Optional[str] = None,
new_notes: Optional[str] = None,
new_evaluation: Optional[str] = None,
new_result: Optional[Any] = None) -> Plan:
"""更新 Plan 字典中指定 ID 的任务"""
if not isinstance(plan, dict) or "tasks" not in plan:
raise ValueError("Invalid plan structure provided.")
if not by_id:
raise ValueError("Must provide 'by_id' to update a task.")
task = next((t for t in plan["tasks"] if t.get("id") == by_id), None)
if not task:
raise ValueError(f"No matching task found with ID: {by_id}")
updated = False
if new_desc is not None and task.get("description") != new_desc.strip():
task["description"] = new_desc.strip()
updated = True
if new_status is not None and task.get("status") != new_status.strip():
task["status"] = new_status.strip()
if new_status.strip() == "completed":
task["completed_at"] = PlanningStateHandler._now()
updated = True
if new_agent is not None and task.get("agent") != new_agent.strip():
task["agent"] = new_agent.strip()
updated = True
if new_notes is not None and task.get("notes") != new_notes.strip():
task["notes"] = new_notes.strip()
updated = True
if new_evaluation is not None and task.get("evaluation") != new_evaluation.strip():
task["evaluation"] = new_evaluation.strip()
updated = True
if new_result is not None: # 直接更新结果(谨慎使用,可能很大)
task["result"] = new_result
updated = True
if updated:
task["updated_at"] = PlanningStateHandler._now()
plan["updated_at"] = PlanningStateHandler._now() # 更新整个计划的更新时间
# 检查并更新整个计划的状态
plan = PlanningStateHandler.update_plan_status(plan)
return plan
@staticmethod
def update_plan_status(plan: Plan) -> Plan:
"""根据任务状态自动更新计划状态"""
if not isinstance(plan, dict) or "tasks" not in plan:
return plan # Return as is if invalid
tasks = plan["tasks"]
if not tasks: # 没有任务
if plan.get("status") not in ["completed", "failed", "error"]:
plan["status"] = "ready" # 或 "completed" 如果没有任务就算完成? 设为 ready 似乎更合理
return plan
all_completed = all(t.get("status") == "completed" for t in tasks)
any_failed = any(t.get("status") == "failed" for t in tasks)
any_in_progress = any(t.get("status") in ["in_progress", "pending_review"] for t in tasks)
any_pending = any(t.get("status") == "pending" for t in tasks)
current_status = plan.get("status")
new_status = current_status
if any_failed:
new_status = "failed" # 或 "error"
elif all_completed:
new_status = "completed"
plan["completed_at"] = PlanningStateHandler._now()
elif any_in_progress:
new_status = "executing"
elif any_pending or not any_in_progress: # 如果还有 pending 或所有任务都结束了但不是 completed/failed
if current_status not in ["completed", "failed", "error"]: # 避免覆盖最终状态
new_status = "ready" # 准备好执行或等待新任务
if new_status != current_status:
plan["status"] = new_status
plan["updated_at"] = PlanningStateHandler._now()
return plan
@staticmethod
def set_current_task(plan: Plan, task_id: Optional[str]) -> Plan:
"""设置 Plan 字典中的当前任务 ID"""
if not isinstance(plan, dict):
raise ValueError("Invalid plan structure provided.")
if task_id is None:
plan["current_task_id"] = None
plan["updated_at"] = PlanningStateHandler._now()
return plan
found = any(t.get("id") == task_id for t in plan.get("tasks", []))
if not found:
raise ValueError(f"Task ID '{task_id}' not found in plan.")
if plan.get("current_task_id") != task_id:
plan["current_task_id"] = task_id
plan["updated_at"] = PlanningStateHandler._now()
return plan
@staticmethod
def get_task(plan: Plan, task_id: str) -> Optional[Task]:
"""根据 ID 获取任务字典"""
if not isinstance(plan, dict) or "tasks" not in plan:
return None
return next((t for t in plan["tasks"] if t.get("id") == task_id), None)
@staticmethod
def get_next_pending_task(plan: Plan) -> Optional[Task]:
"""获取下一个处于 pending 状态且所有依赖已完成的任务"""
if not isinstance(plan, dict) or "tasks" not in plan:
return None
completed_task_ids = {t["id"] for t in plan["tasks"] if t.get("status") == "completed"}
for task in plan["tasks"]:
if task.get("status") == "pending":
dependencies = task.get("dependencies", [])
if not dependencies or all(dep_id in completed_task_ids for dep_id in dependencies):
return task
return None # 没有找到合适的下一个任务
@staticmethod
def finish_plan(plan: Plan) -> Plan:
"""强制将 Plan 标记为完成"""
if not isinstance(plan, dict):
raise ValueError("Invalid plan structure provided.")
if plan.get("status") != "completed":
plan["status"] = "completed"
plan["completed_at"] = PlanningStateHandler._now()
plan["updated_at"] = PlanningStateHandler._now()
return plan
================================================
FILE: core/agents/state_based_supervisor/prompt.py
================================================
# # --- Planner Agent System Prompt (新增) ---
# PLANNER_SYSTEM_PROMPT_TEMPLATE = """You are an expert planning agent. Your sole responsibility is to analyze a user request and create a detailed, step-by-step plan to fulfill it by coordinating specialized agents.
# The current date is {current_date}.
# ## Agent Descriptions:
# {agent_descriptions}
# *(This list includes the capabilities of available specialist agents.)*
# ## Task:
# Analyze the user request provided in the message history. Break it down into a sequence of logical tasks. For each task, determine the most suitable agent from the descriptions provided.
# ## Output Format:
# You MUST output **ONLY** a single `PLAN_UPDATE: CREATE_PLAN <JSON_ARGS>` directive in your response content. The JSON arguments MUST be valid and contain:
# - "title": A concise title for the overall plan.
# - "description": A brief description summarizing the user's goal.
# - "tasks": A list of task objects. Each task object MUST contain:
# - "description": A clear and actionable description of the specific sub-task.
# - "agent": The name of the MOST SUITABLE agent from the Agent Descriptions to perform this task. Leave empty ("") if unsure or if it's a general task.
# - "status": Set **all** initial tasks to **"pending"**.
# - (Optional) "dependencies": A list of task IDs (UUIDs that will be generated later) this task depends on, if any (usually empty for initial plan).
# **Example JSON Args:**
# `{{"title": "Research and Report on AI Ethics", "description": "User wants a report on AI ethics, including research and writing.", "tasks": [{{"description": "Research current trends in AI ethics using web search", "agent": "research_expert", "status": "pending"}}, {{"description": "Write a structured report summarizing the findings", "agent": "reporter_expert", "status": "pending", "dependencies": ["<ID_of_research_task>"]}}]}}`
# *(Note: Actual IDs are UUIDs generated later, dependencies often added via UPDATE_TASK)*
# **CRITICAL**: Output **ONLY** the `PLAN_UPDATE: CREATE_PLAN <JSON_ARGS>` directive and nothing else. Do not add conversational text. Make sure the JSON is valid.
# """
# SUPERVISOR_PLANNING_PROMPT_TEMPLATE = """You are a meticulous top-level Supervisor agent responsible for executing an existing plan, coordinating specialist agents, and managing task execution based on the provided state. You rely on an external evaluator node to assess task completion after agents run.
# The current date is {current_date}.
# ## Current Plan State:
# ```json
# {plan_json}
# ```
# *(Review plan status and individual task statuses and IDs (UUIDs). Your main goal is to drive the plan status to 'completed'.)*
# ## Agent Descriptions:
# {agent_descriptions}
# ## Your Goal:
# Execute the **existing plan** strictly step-by-step towards 'completed' status. Make **exactly one** logical primary decision per turn. **Do NOT evaluate agent results or mark tasks 'completed'/'failed' yourself.**
# ## Workflow & Decision Process (Strict Sequence):
# 1. **Analyze State**: Review the latest messages and the 'Current Plan State'. (Note: If the last message is from a sub-agent, an evaluator node has already processed it and updated the plan state before your turn).
# 2. **Determine ONE Next Action**: Execute the FIRST matching condition below and **IMMEDIATELY END YOUR TURN**:
# * **A. Initiate Next Task**: If the plan is 'ready' or 'executing', AND no task is currently 'in_progress', AND a 'pending' task is ready (dependencies met):
# * **Action**: Find the FIRST such task. Output **ONLY** `PLAN_UPDATE: UPDATE_TASK <JSON_ARGS_status_in_progress>`. **CRITICAL: Use the exact UUID for `by_id`!** JSON Args should be ` {{"by_id": "<task_uuid>", "status": "in_progress"}}`.
# * **B. Delegate In-Progress Task**: If a task **currently has status 'in_progress'** (check plan state):
# * **Action**: Identify the best agent. Output **ONLY** the `transfer_to_<agent_name>` tool call. **CRITICAL**: Tool call args **MUST** include `"task_id": "<TASK_UUID_FROM_PLAN>"` and clear `"instructions"`.
# * **C. Finish Plan**: If **ALL** tasks in the plan now have status 'completed' AND the plan status is NOT 'completed' yet (check plan state provided):
# * **Action**: Output **ONLY** `PLAN_UPDATE: FINISH_PLAN {{}}`.
# * **D. Generate Final Output**: If the **Plan Status IS 'completed'** (check plan state provided):
# * **Action**: Decide final output format based on original request. EITHER call `transfer_to_reporter_expert` (passing context in args, like relevant task IDs) OR generate the final `AIMessage` content yourself summarizing the overall result.
# * **E. Waiting/Blocked/Failed**: If no other action is appropriate (e.g., plan status 'failed', or waiting for dependencies):
# * **Action**: Output a bri
gitextract_zde6lsy3/
├── .gitignore
├── README.md
├── __init__.py
├── api/
│ ├── __init__.py
│ ├── agent/
│ │ ├── __init__.py
│ │ └── loader.py
│ ├── server.py
│ └── utils.py
├── core/
│ ├── __init__.py
│ ├── a2a/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agent_task_manager.py
│ │ ├── client/
│ │ │ ├── __init__.py
│ │ │ ├── card_resolver.py
│ │ │ └── client.py
│ │ ├── config.json
│ │ ├── server/
│ │ │ ├── __init__.py
│ │ │ ├── server.py
│ │ │ ├── task_manager.py
│ │ │ └── utils.py
│ │ ├── types.py
│ │ └── utils/
│ │ ├── __init__.py
│ │ ├── in_memory_cache.py
│ │ └── push_notification_auth.py
│ ├── agents/
│ │ ├── __init__.py
│ │ ├── base/
│ │ │ ├── base_agent.py
│ │ │ ├── create_react_agent_wrapper.py
│ │ │ └── react_agent.py
│ │ ├── react_based_supervisor/
│ │ │ ├── __init__.py
│ │ │ ├── agent_name.py
│ │ │ ├── handoff.py
│ │ │ ├── planning_handler.py
│ │ │ ├── simple_planning_tool.py
│ │ │ ├── state_schema.py
│ │ │ └── supervisor.py
│ │ ├── react_supervisor_agent.py
│ │ ├── sb_supervisor_agent.py
│ │ ├── state_based_supervisor/
│ │ │ ├── __init__.py
│ │ │ ├── agent_name.py
│ │ │ ├── evaluate_result_node.py
│ │ │ ├── handoff.py
│ │ │ ├── planner_node.py
│ │ │ ├── planning_handler.py
│ │ │ ├── prompt.py
│ │ │ ├── state_schema.py
│ │ │ ├── supervisor_graph.py
│ │ │ └── supervisor_node.py
│ │ └── sub_agents/
│ │ ├── __init__.py
│ │ ├── coder_agent.py
│ │ ├── data_analyst_agent.py
│ │ ├── designer_agent.py
│ │ ├── reporter_agent.py
│ │ └── research_agent.py
│ ├── llm/
│ │ ├── llm_manager.py
│ │ └── model_config.py
│ ├── mcp/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── config_loader.py
│ │ ├── mcp_server_config.json
│ │ ├── run_server.py
│ │ ├── server.py
│ │ └── test/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── minimal_fastmcp_test.py
│ │ └── test_minimal_client.py
│ ├── tools/
│ │ ├── __init__.py
│ │ ├── e2b_tool.py
│ │ ├── firecrawl_tool.py
│ │ ├── registry.py
│ │ └── replicate_flux_tool.py
│ └── utils/
│ ├── agent_utils.py
│ └── timezone.py
├── examples/
│ ├── 01_supervisor_test.py
│ ├── 02_supervisor_agent_test.py
│ ├── 03_tavily_tools_test.py
│ ├── 04_react_agent_test.py
│ ├── 05_react_agent_user_input.py
│ ├── 06_web_extraction_tools_test.py
│ ├── 07_web_extraction_with_filesystem.py
│ ├── 08_react_agent_tool_registry_test.py
│ ├── 09_e2b_code_interpreter_test.py
│ ├── 10_financial_data_analysis.py
│ ├── 11_e2b_sandbox_test.py
│ ├── 12_planning_supervisor_test.py
│ ├── 13_multi_agent_roles_test.py
│ ├── 14_mcp_client_fetch_test.py
│ ├── 15_mcp_agent_test.py
│ ├── 16_google_a2a/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agent_task_manager_test.py
│ │ ├── client_example.py
│ │ ├── currency_agent_test.py
│ │ ├── currency_agent_test_README.md
│ │ └── langgraph_integration.py
│ ├── TODO_computer_tool_demo.py
│ ├── __init__.py
│ ├── state_based_supervisor_examples/
│ │ ├── 01_simple.py
│ │ ├── 02_tavily.py
│ │ └── 03_multi_agents.py
│ └── web_agents/
│ ├── README.md
│ ├── README_SPEC.md
│ ├── __init__.py
│ ├── research_assistant/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ └── graph.py
│ └── weather_agent/
│ ├── README.md
│ └── __init__.py
├── instructions/
│ ├── 00.Langgraph 和 React Agent.md
│ ├── 01.supervisor_pattern.md
│ ├── 02.supervisor_pattern_agent.md
│ ├── 03.tavily_search_integration.md
│ ├── 04.react_agent.md
│ ├── 05.react_agent_user_input.md
│ ├── 06.web_extraction_tools.md
│ ├── 07.web_extraction_with_filesystem.md
│ ├── 08.react_agent_tool_registry.md
│ └── 09.e2b_sandbox_integration.md
├── log_analyzer.py
├── pyproject.toml
├── requirements.txt
├── setup.py
├── super_agents/
│ ├── __init__.py
│ ├── browser_use/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agent/
│ │ │ ├── __init__.py
│ │ │ ├── graph.py
│ │ │ ├── nodes.py
│ │ │ ├── prompts.py
│ │ │ ├── schemas.py
│ │ │ ├── state.py
│ │ │ └── tools.py
│ │ ├── agent.py
│ │ ├── browser/
│ │ │ ├── browser.py
│ │ │ ├── detector.py
│ │ │ ├── findVisibleInteractiveElements.js
│ │ │ ├── models.py
│ │ │ └── utils.py
│ │ ├── llm.py
│ │ └── main.py
│ ├── customized_deep_research/
│ │ ├── PRD_README.md
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── main.py
│ │ └── reason_graph/
│ │ ├── __init__.py
│ │ ├── graph.py
│ │ ├── nodes.py
│ │ ├── prompt.py
│ │ ├── schemas.py
│ │ ├── state.py
│ │ └── tools.py
│ └── deep_research/
│ ├── README.md
│ ├── __init__.py
│ ├── a2a_adapter/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── client_example.py
│ │ ├── deep_research_task_manager.py
│ │ ├── dr_terminal_output.md
│ │ ├── run_server.py
│ │ └── setup.py
│ ├── main.py
│ ├── output/
│ │ ├── research_report_analyze_smartvalue_co_ltds_9417t_core_business_key_productsservices_eg_government_cloud_solutions_mo_20250418_125137.md
│ │ ├── research_report_id_like_a_thorough_analysis_of_li_auto_stock_including_summary_company_overview_key_metrics_performa_20250327_121800.md
│ │ └── research_report_id_like_a_thorough_analysis_of_xpev_stock_including_summary_company_overview_key_metrics_performance_20250327_105350.md
│ ├── reason_graph/
│ │ ├── __init__.py
│ │ ├── graph.py
│ │ ├── nodes.py
│ │ ├── prompt.py
│ │ ├── schemas.py
│ │ ├── state.py
│ │ └── tools.py
│ └── tests/
│ ├── __init__.py
│ └── test_graph.py
├── web/
│ ├── .gitignore
│ ├── README.md
│ ├── app/
│ │ ├── api/
│ │ │ └── agent/
│ │ │ └── route.ts
│ │ ├── chat/
│ │ │ ├── [id]/
│ │ │ │ ├── agent-types.ts
│ │ │ │ ├── components/
│ │ │ │ │ ├── chatbot-node.tsx
│ │ │ │ │ ├── checkpoint-card.tsx
│ │ │ │ │ ├── node-card.tsx
│ │ │ │ │ ├── reminder.tsx
│ │ │ │ │ ├── research/
│ │ │ │ │ │ ├── report-preview.tsx
│ │ │ │ │ │ ├── research-node.tsx
│ │ │ │ │ │ ├── research-status.tsx
│ │ │ │ │ │ └── search-results.tsx
│ │ │ │ │ └── weather/
│ │ │ │ │ ├── cloudy.tsx
│ │ │ │ │ ├── rainy.tsx
│ │ │ │ │ ├── snowy.tsx
│ │ │ │ │ ├── sunny.tsx
│ │ │ │ │ └── weather-node.tsx
│ │ │ │ └── page.tsx
│ │ │ └── page.tsx
│ │ ├── deep-research/
│ │ │ ├── [id]/
│ │ │ │ └── page.tsx
│ │ │ └── page.tsx
│ │ ├── globals.css
│ │ ├── layout.tsx
│ │ └── page.tsx
│ ├── components/
│ │ ├── app-sidebar.tsx
│ │ ├── theme-provider.tsx
│ │ ├── theme-switcher.tsx
│ │ └── ui/
│ │ ├── badge.tsx
│ │ ├── button.tsx
│ │ ├── card.tsx
│ │ ├── checkbox.tsx
│ │ ├── dialog.tsx
│ │ ├── input.tsx
│ │ ├── popover.tsx
│ │ ├── progress.tsx
│ │ ├── separator.tsx
│ │ ├── sheet.tsx
│ │ ├── sidebar.tsx
│ │ ├── skeleton.tsx
│ │ ├── textarea.tsx
│ │ └── tooltip.tsx
│ ├── components.json
│ ├── eslint.config.mjs
│ ├── hooks/
│ │ ├── use-mobile.tsx
│ │ └── useLangGraphAgent/
│ │ ├── actions.ts
│ │ ├── api.ts
│ │ ├── ascii-tree.ts
│ │ ├── types.ts
│ │ └── useLangGraphAgent.tsx
│ ├── next.config.ts
│ ├── package.json
│ ├── postcss.config.mjs
│ ├── stores/
│ │ └── chat-store.tsx
│ ├── tailwind.config.ts
│ └── tsconfig.json
└── web_for_a2a/
├── .gitignore
├── Instruction.md
├── README.md
├── app/
│ ├── api/
│ │ └── a2a/
│ │ └── route.ts
│ ├── deepresearch/
│ │ └── page.tsx
│ ├── globals.css
│ ├── layout.tsx
│ └── page.tsx
├── package.json
├── postcss.config.js
├── tailwind.config.js
└── tsconfig.json
SYMBOL INDEX (824 symbols across 144 files)
FILE: api/agent/loader.py
function list_available_agents (line 25) | def list_available_agents() -> Dict[str, str]:
function load_agent (line 71) | def load_agent(agent_name: str) -> Optional[CompiledGraph]:
function get_default_agent (line 113) | def get_default_agent() -> Optional[CompiledGraph]:
FILE: api/server.py
function list_agents (line 40) | async def list_agents():
function state (line 46) | async def state(thread_id: str | None = None, agent: Optional[str] = Que...
function history (line 63) | async def history(thread_id: str | None = None, agent: Optional[str] = Q...
function stop_agent (line 82) | async def stop_agent(request: Request):
function agent (line 96) | async def agent(request: Request):
function main (line 289) | def main():
FILE: api/utils.py
function checkpoint_event (line 7) | def checkpoint_event(value):
function message_chunk_event (line 62) | def message_chunk_event(node_name, message_chunk):
function interrupt_event (line 85) | def interrupt_event(interrupts):
function custom_event (line 95) | def custom_event(value):
function format_state_snapshot (line 103) | def format_state_snapshot(snapshot: StateSnapshot):
function stream_update_event (line 118) | def stream_update_event(data: dict):
FILE: core/a2a/agent_task_manager.py
class AgentTaskManager (line 21) | class AgentTaskManager(InMemoryTaskManager):
method __init__ (line 26) | def __init__(self, agent, notification_sender_auth=None):
method _run_streaming_agent (line 38) | async def _run_streaming_agent(self, request: SendTaskStreamingRequest):
method _get_user_query (line 96) | def _get_user_query(self, task_send_params: TaskSendParams) -> str:
method _validate_request (line 125) | def _validate_request(
method on_send_task (line 154) | async def on_send_task(self, request: SendTaskRequest) -> SendTaskResp...
method on_send_task_subscribe (line 215) | async def on_send_task_subscribe(
method _process_agent_response (line 255) | async def _process_agent_response(
method on_resubscribe_to_task (line 281) | async def on_resubscribe_to_task(
method send_task_notification (line 297) | async def send_task_notification(self, task: Task):
method set_push_notification_info (line 309) | async def set_push_notification_info(self, task_id: str, push_notifica...
FILE: core/a2a/client/card_resolver.py
class A2ACardResolver (line 9) | class A2ACardResolver:
method __init__ (line 10) | def __init__(self, base_url, agent_card_path="/.well-known/agent.json"):
method get_agent_card (line 14) | def get_agent_card(self) -> AgentCard:
FILE: core/a2a/client/client.py
class A2AClient (line 25) | class A2AClient:
method __init__ (line 26) | def __init__(self, agent_card: AgentCard = None, url: str = None):
method send_task (line 34) | async def send_task(self, payload: dict[str, Any]) -> SendTaskResponse:
method send_task_streaming (line 38) | async def send_task_streaming(
method _send_request (line 54) | async def _send_request(self, request: JSONRPCRequest) -> dict[str, Any]:
method get_task (line 68) | async def get_task(self, payload: dict[str, Any]) -> GetTaskResponse:
method cancel_task (line 72) | async def cancel_task(self, payload: dict[str, Any]) -> CancelTaskResp...
method set_task_callback (line 76) | async def set_task_callback(
method get_task_callback (line 82) | async def get_task_callback(
FILE: core/a2a/server/server.py
class A2AServer (line 39) | class A2AServer:
method __init__ (line 40) | def __init__(
method start (line 78) | def start(self):
method _get_agent_card (line 85) | def _get_agent_card(self, request: Request) -> JSONResponse:
method _process_request (line 92) | async def _process_request(self, request: Request) -> Union[JSONRespon...
method _handle_exception (line 124) | def _handle_exception(self, e: Exception, request_id: Optional[Union[s...
method _create_response (line 157) | def _create_response(self, result: Any) -> Union[JSONResponse, EventSo...
FILE: core/a2a/server/task_manager.py
class TaskManager (line 40) | class TaskManager(ABC):
method on_get_task (line 42) | async def on_get_task(self, request: GetTaskRequest) -> GetTaskResponse:
method on_cancel_task (line 46) | async def on_cancel_task(self, request: CancelTaskRequest) -> CancelTa...
method on_send_task (line 50) | async def on_send_task(self, request: SendTaskRequest) -> SendTaskResp...
method on_send_task_subscribe (line 54) | async def on_send_task_subscribe(
method on_set_task_push_notification (line 60) | async def on_set_task_push_notification(
method on_get_task_push_notification (line 66) | async def on_get_task_push_notification(
method on_resubscribe_to_task (line 72) | async def on_resubscribe_to_task(
class InMemoryTaskManager (line 78) | class InMemoryTaskManager(TaskManager):
method __init__ (line 79) | def __init__(self):
method on_get_task (line 86) | async def on_get_task(self, request: GetTaskRequest) -> GetTaskResponse:
method on_cancel_task (line 101) | async def on_cancel_task(self, request: CancelTaskRequest) -> CancelTa...
method on_send_task (line 113) | async def on_send_task(self, request: SendTaskRequest) -> SendTaskResp...
method on_send_task_subscribe (line 117) | async def on_send_task_subscribe(
method set_push_notification_info (line 122) | async def set_push_notification_info(self, task_id: str, notification_...
method get_push_notification_info (line 132) | async def get_push_notification_info(self, task_id: str) -> PushNotifi...
method has_push_notification_info (line 142) | async def has_push_notification_info(self, task_id: str) -> bool:
method on_set_task_push_notification (line 147) | async def on_set_task_push_notification(
method on_get_task_push_notification (line 166) | async def on_get_task_push_notification(
method upsert_task (line 185) | async def upsert_task(self, task_send_params: TaskSendParams) -> Task:
method on_resubscribe_to_task (line 203) | async def on_resubscribe_to_task(
method update_store (line 208) | async def update_store(
method append_task_history (line 230) | def append_task_history(self, task: Task, historyLength: int | None):
method setup_sse_consumer (line 239) | async def setup_sse_consumer(self, task_id: str, is_resubscribe: bool ...
method enqueue_events_for_sse (line 251) | async def enqueue_events_for_sse(self, task_id, task_update_event):
method dequeue_events_for_sse (line 260) | async def dequeue_events_for_sse(
FILE: core/a2a/server/utils.py
function are_modalities_compatible (line 9) | def are_modalities_compatible(
function new_incompatible_types_error (line 23) | def new_incompatible_types_error(request_id):
function new_not_implemented_error (line 27) | def new_not_implemented_error(request_id):
FILE: core/a2a/types.py
class TaskState (line 11) | class TaskState(str, Enum):
class TextPart (line 21) | class TextPart(BaseModel):
class FileContent (line 27) | class FileContent(BaseModel):
method check_content (line 34) | def check_content(self) -> Self:
class FilePart (line 44) | class FilePart(BaseModel):
class DataPart (line 50) | class DataPart(BaseModel):
class Message (line 59) | class Message(BaseModel):
class TaskStatus (line 65) | class TaskStatus(BaseModel):
method serialize_dt (line 71) | def serialize_dt(self, dt: datetime, _info):
class Artifact (line 75) | class Artifact(BaseModel):
class Task (line 85) | class Task(BaseModel):
class TaskStatusUpdateEvent (line 94) | class TaskStatusUpdateEvent(BaseModel):
class TaskArtifactUpdateEvent (line 101) | class TaskArtifactUpdateEvent(BaseModel):
class AuthenticationInfo (line 107) | class AuthenticationInfo(BaseModel):
class PushNotificationConfig (line 114) | class PushNotificationConfig(BaseModel):
class TaskIdParams (line 120) | class TaskIdParams(BaseModel):
class TaskQueryParams (line 125) | class TaskQueryParams(TaskIdParams):
class TaskSendParams (line 129) | class TaskSendParams(BaseModel):
class TaskPushNotificationConfig (line 139) | class TaskPushNotificationConfig(BaseModel):
class JSONRPCMessage (line 147) | class JSONRPCMessage(BaseModel):
class JSONRPCRequest (line 152) | class JSONRPCRequest(JSONRPCMessage):
class JSONRPCError (line 157) | class JSONRPCError(BaseModel):
class JSONRPCResponse (line 163) | class JSONRPCResponse(JSONRPCMessage):
class SendTaskRequest (line 168) | class SendTaskRequest(JSONRPCRequest):
class SendTaskResponse (line 173) | class SendTaskResponse(JSONRPCResponse):
class SendTaskStreamingRequest (line 177) | class SendTaskStreamingRequest(JSONRPCRequest):
class SendTaskStreamingResponse (line 182) | class SendTaskStreamingResponse(JSONRPCResponse):
class GetTaskRequest (line 186) | class GetTaskRequest(JSONRPCRequest):
class GetTaskResponse (line 191) | class GetTaskResponse(JSONRPCResponse):
class CancelTaskRequest (line 195) | class CancelTaskRequest(JSONRPCRequest):
class CancelTaskResponse (line 200) | class CancelTaskResponse(JSONRPCResponse):
class SetTaskPushNotificationRequest (line 204) | class SetTaskPushNotificationRequest(JSONRPCRequest):
class SetTaskPushNotificationResponse (line 209) | class SetTaskPushNotificationResponse(JSONRPCResponse):
class GetTaskPushNotificationRequest (line 213) | class GetTaskPushNotificationRequest(JSONRPCRequest):
class GetTaskPushNotificationResponse (line 218) | class GetTaskPushNotificationResponse(JSONRPCResponse):
class TaskResubscriptionRequest (line 222) | class TaskResubscriptionRequest(JSONRPCRequest):
class JSONParseError (line 245) | class JSONParseError(JSONRPCError):
class InvalidRequestError (line 251) | class InvalidRequestError(JSONRPCError):
class MethodNotFoundError (line 257) | class MethodNotFoundError(JSONRPCError):
class InvalidParamsError (line 263) | class InvalidParamsError(JSONRPCError):
class InternalError (line 269) | class InternalError(JSONRPCError):
class TaskNotFoundError (line 275) | class TaskNotFoundError(JSONRPCError):
class TaskNotCancelableError (line 281) | class TaskNotCancelableError(JSONRPCError):
class PushNotificationNotSupportedError (line 287) | class PushNotificationNotSupportedError(JSONRPCError):
class UnsupportedOperationError (line 293) | class UnsupportedOperationError(JSONRPCError):
class ContentTypeNotSupportedError (line 299) | class ContentTypeNotSupportedError(JSONRPCError):
class AgentProvider (line 305) | class AgentProvider(BaseModel):
class AgentCapabilities (line 310) | class AgentCapabilities(BaseModel):
class AgentAuthentication (line 316) | class AgentAuthentication(BaseModel):
class AgentSkill (line 321) | class AgentSkill(BaseModel):
class AgentCard (line 331) | class AgentCard(BaseModel):
class A2AClientError (line 345) | class A2AClientError(Exception):
class A2AClientHTTPError (line 349) | class A2AClientHTTPError(A2AClientError):
method __init__ (line 350) | def __init__(self, status_code: int, message: str):
class A2AClientJSONError (line 356) | class A2AClientJSONError(A2AClientError):
method __init__ (line 357) | def __init__(self, message: str):
class MissingAPIKeyError (line 362) | class MissingAPIKeyError(Exception):
FILE: core/a2a/utils/in_memory_cache.py
class InMemoryCache (line 8) | class InMemoryCache:
method __new__ (line 18) | def __new__(cls):
method __init__ (line 32) | def __init__(self):
method set (line 47) | def set(self, key: str, value: Any, ttl: Optional[int] = None) -> None:
method get (line 64) | def get(self, key: str, default: Any = None) -> Any:
method delete (line 81) | def delete(self, key: str) -> None:
method clear (line 99) | def clear(self) -> bool:
FILE: core/a2a/utils/push_notification_auth.py
class PushNotificationAuth (line 19) | class PushNotificationAuth:
method _calculate_request_body_sha256 (line 20) | def _calculate_request_body_sha256(self, data: dict[str, Any]):
class PushNotificationSenderAuth (line 34) | class PushNotificationSenderAuth(PushNotificationAuth):
method __init__ (line 35) | def __init__(self):
method verify_push_notification_url (line 40) | async def verify_push_notification_url(url: str) -> bool:
method generate_jwk (line 58) | def generate_jwk(self):
method handle_jwks_endpoint (line 63) | def handle_jwks_endpoint(self, _request: Request):
method _generate_jwt (line 70) | def _generate_jwt(self, data: dict[str, Any]):
method send_push_notification (line 86) | async def send_push_notification(self, url: str, data: dict[str, Any]):
class PushNotificationReceiverAuth (line 101) | class PushNotificationReceiverAuth(PushNotificationAuth):
method __init__ (line 102) | def __init__(self):
method load_jwks (line 106) | async def load_jwks(self, jwks_url: str):
method verify_push_notification (line 109) | async def verify_push_notification(self, request: Request) -> bool:
FILE: core/agents/base/base_agent.py
class BaseAgent (line 25) | class BaseAgent:
method __init__ (line 26) | def __init__(
method _estimate_tokens (line 68) | def _estimate_tokens(self, message: BaseMessage) -> int:
method _truncate_by_tokens (line 84) | def _truncate_by_tokens(self, messages: Sequence[BaseMessage]) -> List...
method _truncate_messages (line 138) | def _truncate_messages(self, messages: Sequence[BaseMessage]) -> List[...
method _get_state_value (line 151) | def _get_state_value(self, state: StateSchema, key: str, default: Any ...
method _format_tools_for_prompt (line 154) | def _format_tools_for_prompt(self, tools: List[Union[BaseTool, Callabl...
method build (line 165) | def build(self) -> Optional[StateGraph]:
method compile (line 169) | def compile(self) -> CompiledGraph:
method get_agent (line 200) | def get_agent(self) -> CompiledGraph:
method invoke (line 210) | def invoke(self, state: Dict[str, Any], config: Optional[RunnableConfi...
method ainvoke (line 228) | async def ainvoke(self, state: Dict[str, Any], config: Optional[Runnab...
method run (line 244) | def run(self, state: Dict[str, Any]) -> Dict[str, Any]:
method arun (line 255) | async def arun(self, state: Dict[str, Any]) -> Dict[str, Any]:
method reset (line 264) | def reset(self):
method add_tools (line 270) | def add_tools(self, tools: List[Union[BaseTool, Callable]]) -> None:
FILE: core/agents/base/create_react_agent_wrapper.py
class CreateReactAgentWrapper (line 8) | class CreateReactAgentWrapper(RunnableCallable):
method __init__ (line 9) | def __init__(
FILE: core/agents/base/react_agent.py
class ReactAgent (line 20) | class ReactAgent(BaseAgent):
method __init__ (line 27) | def __init__(
method _prepare_llm_input (line 90) | def _prepare_llm_input(self, state: Dict[str, Any]) -> LanguageModelIn...
method build (line 130) | def build(self) -> Optional[StateGraph]:
method compile (line 136) | def compile(self) -> CompiledGraph:
FILE: core/agents/react_based_supervisor/agent_name.py
function _is_content_blocks_content (line 14) | def _is_content_blocks_content(content: list[dict] | str) -> bool:
function add_inline_agent_name (line 23) | def add_inline_agent_name(message: BaseMessage) -> BaseMessage:
function remove_inline_agent_name (line 51) | def remove_inline_agent_name(message: BaseMessage) -> BaseMessage:
function with_agent_name (line 97) | def with_agent_name(
FILE: core/agents/react_based_supervisor/handoff.py
function _normalize_agent_name (line 13) | def _normalize_agent_name(agent_name: str) -> str:
function create_handoff_tool (line 18) | def create_handoff_tool(*, agent_name: str) -> BaseTool:
function create_handoff_back_messages (line 51) | def create_handoff_back_messages(
FILE: core/agents/react_based_supervisor/planning_handler.py
class PlanningStateHandler (line 5) | class PlanningStateHandler:
method _now (line 20) | def _now() -> str:
method _gen_id (line 24) | def _gen_id() -> str:
method create_plan (line 28) | def create_plan(title: str, description: str) -> Dict:
method create_task (line 41) | def create_task(description: str,
method add_tasks (line 56) | def add_tasks(plan: Dict, tasks_data: List[Dict]) -> Dict:
method update_task (line 69) | def update_task(plan: Dict,
method set_current_task (line 107) | def set_current_task(plan: Dict, task_id: str) -> Dict:
method finish_plan (line 116) | def finish_plan(plan: Dict) -> Dict:
FILE: core/agents/react_based_supervisor/simple_planning_tool.py
class SimplePlanningTool (line 6) | class SimplePlanningTool(BaseTool):
method __init__ (line 16) | def __init__(self):
method _run (line 20) | def _run(self, action: str, **kwargs) -> str:
method _arun (line 39) | async def _arun(self, action: str, **kwargs) -> str:
method _handle_create_plan (line 42) | def _handle_create_plan(self, **kwargs) -> str:
method _handle_view_plan (line 51) | def _handle_view_plan(self) -> str:
method _handle_add_tasks (line 56) | def _handle_add_tasks(self, **kwargs) -> str:
method _handle_update_task (line 63) | def _handle_update_task(self, **kwargs) -> str:
method _handle_set_current_task (line 85) | def _handle_set_current_task(self, **kwargs) -> str:
method _handle_finish_plan (line 94) | def _handle_finish_plan(self) -> str:
method _json_ok (line 101) | def _json_ok(self, plan_data: Dict) -> str:
method _json_error (line 104) | def _json_error(self, message: str) -> str:
FILE: core/agents/react_based_supervisor/state_schema.py
class Task (line 12) | class Task(TypedDict, total=False):
class Plan (line 28) | class Plan(TypedDict, total=False):
class PlanningAgentState (line 43) | class PlanningAgentState(AgentState):
FILE: core/agents/react_based_supervisor/supervisor.py
function _supports_disable_parallel_tool_calls (line 33) | def _supports_disable_parallel_tool_calls(model: LanguageModelLike) -> b...
function _make_call_agent (line 49) | def _make_call_agent(
function create_supervisor (line 95) | def create_supervisor(
FILE: core/agents/react_supervisor_agent.py
class SupervisorAgent (line 22) | class SupervisorAgent(BaseAgent):
method __init__ (line 138) | def __init__(
method build (line 200) | def build(self) -> StateGraph:
FILE: core/agents/sb_supervisor_agent.py
class SupervisorAgent (line 18) | class SupervisorAgent(BaseAgent):
method __init__ (line 25) | def __init__(
method build (line 63) | def build(self) -> Optional[StateGraph]:
FILE: core/agents/state_based_supervisor/agent_name.py
function _is_content_blocks_content (line 14) | def _is_content_blocks_content(content: list[dict] | str) -> bool:
function add_inline_agent_name (line 23) | def add_inline_agent_name(message: BaseMessage) -> BaseMessage:
function remove_inline_agent_name (line 51) | def remove_inline_agent_name(message: BaseMessage) -> BaseMessage:
function with_agent_name (line 97) | def with_agent_name(
FILE: core/agents/state_based_supervisor/evaluate_result_node.py
class PlanningAgentState (line 19) | class PlanningAgentState(Dict): pass;
class Plan (line 20) | class Plan(Dict): pass;
class Task (line 21) | class Task(Dict): pass
class PlanningStateHandler (line 23) | class PlanningStateHandler: # Dummy
method update_task (line 25) | def update_task(plan, by_id, **kwargs): return plan
method set_current_task (line 27) | def set_current_task(plan, task_id): return plan
method get_task (line 29) | def get_task(plan, task_id): return None
method update_plan_status (line 31) | def update_plan_status(plan): return plan
function evaluate_result_node_logic (line 34) | async def evaluate_result_node_logic(state: PlanningAgentState, config: ...
function evaluate_result_node_logic_sync (line 134) | def evaluate_result_node_logic_sync(state: PlanningAgentState, config: O...
FILE: core/agents/state_based_supervisor/handoff.py
function _normalize_agent_name (line 15) | def _normalize_agent_name(agent_name: str) -> str:
function _handoff_to_agent_implementation (line 25) | def _handoff_to_agent_implementation(
function create_handoff_tool (line 56) | def create_handoff_tool(*, agent_name: str) -> BaseTool:
function create_handoff_back_messages (line 92) | def create_handoff_back_messages(
FILE: core/agents/state_based_supervisor/planner_node.py
class PlanningAgentState (line 20) | class PlanningAgentState(Dict): pass;
class Plan (line 21) | class Plan(Dict): pass;
class PlanningStateHandler (line 22) | class PlanningStateHandler: pass
function planner_node_logic (line 26) | async def planner_node_logic(
function planner_node_logic_sync (line 148) | def planner_node_logic_sync(
FILE: core/agents/state_based_supervisor/planning_handler.py
class PlanningStateHandler (line 7) | class PlanningStateHandler:
method _now (line 14) | def _now() -> str:
method _gen_id (line 18) | def _gen_id() -> str:
method create_plan (line 24) | def create_plan(title: str, description: str) -> Plan:
method create_task (line 39) | def create_task(description: str,
method add_tasks (line 59) | def add_tasks(plan: Plan, tasks_data: List[Dict[str, Any]]) -> Plan:
method update_task (line 82) | def update_task(plan: Plan,
method update_plan_status (line 132) | def update_plan_status(plan: Plan) -> Plan:
method set_current_task (line 169) | def set_current_task(plan: Plan, task_id: Optional[str]) -> Plan:
method get_task (line 189) | def get_task(plan: Plan, task_id: str) -> Optional[Task]:
method get_next_pending_task (line 196) | def get_next_pending_task(plan: Plan) -> Optional[Task]:
method finish_plan (line 211) | def finish_plan(plan: Plan) -> Plan:
FILE: core/agents/state_based_supervisor/state_schema.py
class Task (line 15) | class Task(TypedDict, total=False):
class Plan (line 33) | class Plan(TypedDict, total=False):
class PlanningAgentState (line 48) | class PlanningAgentState(TypedDict):
class BasicAgentState (line 60) | class BasicAgentState(TypedDict):
FILE: core/agents/state_based_supervisor/supervisor_graph.py
class BaseAgent (line 34) | class BaseAgent: pass
class PlanningAgentState (line 35) | class PlanningAgentState(Dict): pass
class Plan (line 36) | class Plan(Dict): pass
class Pregel (line 37) | class Pregel: pass
function create_handoff_tool (line 39) | def create_handoff_tool(*args, **kwargs): return None # type: ignore
function _normalize_agent_name (line 40) | def _normalize_agent_name(s: str) -> str: return s
function supervisor_node_logic (line 41) | async def supervisor_node_logic(*args, **kwargs): return {}
function planner_node_logic (line 42) | async def planner_node_logic(*args, **kwargs): return {} # <--- 添加 plann...
function planner_node_logic_sync (line 43) | def planner_node_logic_sync(*args, **kwargs): return {} # <--- 添加 planne...
function evaluate_result_node_logic (line 44) | async def evaluate_result_node_logic(*args, **kwargs): return {} # 添加 ev...
function evaluate_result_node_logic_sync (line 45) | def evaluate_result_node_logic_sync(*args, **kwargs): return {} # 添加 eva...
function with_agent_name (line 46) | def with_agent_name(model, mode): return model
function _supports_disable_parallel_tool_calls (line 52) | def _supports_disable_parallel_tool_calls(model: LanguageModelLike) -> b...
function _make_call_agent (line 61) | def _make_call_agent(
function supervisor_node_logic_sync (line 138) | def supervisor_node_logic_sync(
function create_supervisor (line 157) | def create_supervisor(
FILE: core/agents/state_based_supervisor/supervisor_node.py
class PlanningAgentState (line 24) | class PlanningAgentState(Dict): pass
class Plan (line 25) | class Plan(Dict): pass
class PlanningStateHandler (line 26) | class PlanningStateHandler:
method update_task (line 28) | def update_task(*args, **kwargs): return kwargs.get('plan')
method create_plan (line 30) | def create_plan(*args, **kwargs): return {}
method add_tasks (line 32) | def add_tasks(*args, **kwargs): return kwargs.get('plan')
method finish_plan (line 34) | def finish_plan(*args, **kwargs): return kwargs.get('plan')
method get_task (line 36) | def get_task(*args, **kwargs): return None
method update_plan_status (line 38) | def update_plan_status(*args, **kwargs): return kwargs.get('plan')
method set_current_task (line 40) | def set_current_task(*args, **kwargs): return kwargs.get('plan')
function parse_directive_args (line 45) | def parse_directive_args(directive_str: str) -> Dict[str, Any]:
function supervisor_node_logic (line 75) | async def supervisor_node_logic(
FILE: core/agents/sub_agents/coder_agent.py
class CoderAgent (line 14) | class CoderAgent(ReactAgent):
method __init__ (line 20) | def __init__(
FILE: core/agents/sub_agents/data_analyst_agent.py
class DataAnalystAgent (line 19) | class DataAnalystAgent(ReactAgent):
method __init__ (line 26) | def __init__(
FILE: core/agents/sub_agents/designer_agent.py
class DesignerAgent (line 22) | class DesignerAgent(ReactAgent):
method __init__ (line 29) | def __init__(
FILE: core/agents/sub_agents/reporter_agent.py
class ReporterAgent (line 24) | class ReporterAgent(BaseAgent):
method __init__ (line 47) | def __init__(
method _generate_report_node_logic (line 84) | async def _generate_report_node_logic(self, state: Dict[str, Any], con...
method build (line 137) | def build(self) -> Optional[StateGraph]:
FILE: core/agents/sub_agents/research_agent.py
class ResearchAgent (line 23) | class ResearchAgent(ReactAgent):
method __init__ (line 31) | def __init__(
FILE: core/llm/llm_manager.py
class ModelType (line 14) | class ModelType(Enum):
class ModelCapability (line 21) | class ModelCapability(Enum):
class LLMManager (line 27) | class LLMManager:
method __init__ (line 36) | def __init__(self):
method _register_model (line 116) | def _register_model(
method set_default_model (line 150) | def set_default_model(self, model_id: str) -> None:
method set_capability_model (line 155) | def set_capability_model(self, capability: ModelCapability, model_id: ...
method _get_instance (line 164) | def _get_instance(self, model_id: str) -> BaseChatModel:
method get_model (line 224) | def get_model(self, model_id: Optional[str] = None) -> BaseChatModel:
method get_model_for_capability (line 233) | def get_model_for_capability(self, capability: ModelCapability) -> Bas...
method list_models (line 244) | def list_models(self) -> Dict[str, Dict[str, Any]]:
method list_capabilities (line 259) | def list_capabilities(self) -> Dict[str, str]:
FILE: core/mcp/client.py
function load_mcp_tools (line 22) | async def load_mcp_tools(session: ClientSession) -> list: return []
class MCPClient (line 34) | class MCPClient:
method __init__ (line 36) | def __init__(self, config: MCPConfig):
method __aenter__ (line 43) | async def __aenter__(self) -> "MCPClient":
method __aexit__ (line 141) | async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc...
method close (line 144) | async def close(self):
method get_tools (line 156) | def get_tools(self) -> List[BaseTool]:
FILE: core/mcp/config_loader.py
class StdioConfig (line 18) | class StdioConfig(BaseModel):
class Config (line 25) | class Config: extra = 'forbid'
class SSEConfig (line 28) | class SSEConfig(BaseModel):
class Config (line 33) | class Config: extra = 'forbid'
class MCPConfig (line 36) | class MCPConfig(BaseModel):
class Config (line 43) | class Config: extra = 'forbid'
function load_config (line 48) | def load_config(config_path: Union[str, Path]) -> Dict[str, MCPConfig]:
FILE: core/mcp/run_server.py
function preregister_core_tools (line 25) | def preregister_core_tools(): pass;
function create_tool_wrapper (line 37) | def create_tool_wrapper(tool_instance: BaseTool):
function main (line 85) | def main():
FILE: core/mcp/server.py
class MentisMCPServer (line 21) | class MentisMCPServer:
method __init__ (line 22) | def __init__(self, name: str = "MentisMCP", host: Optional[str] = None...
method register_all_tools (line 42) | def register_all_tools(self):
method register_single_tool (line 56) | def register_single_tool(self, tool_name: str):
method _register_tool_with_simplified_wrapper (line 75) | def _register_tool_with_simplified_wrapper(self, tool: BaseTool) -> bool:
method run (line 167) | def run(self, transport: str = "stdio"):
FILE: core/mcp/test/minimal_fastmcp_test.py
function ping_tool (line 17) | async def ping_tool(query: str = "default ping") -> str:
FILE: core/mcp/test/test_minimal_client.py
function main (line 23) | async def main():
FILE: core/tools/__init__.py
function preregister_core_tools (line 25) | def preregister_core_tools():
function register_direct_tool (line 167) | def register_direct_tool(tool_instance: BaseTool, category: ToolCategory...
FILE: core/tools/e2b_tool.py
class E2BCodeInterpreterToolInput (line 35) | class E2BCodeInterpreterToolInput(BaseModel):
class E2BCodeInterpreterTool (line 39) | class E2BCodeInterpreterTool(BaseTool):
method __init__ (line 59) | def __init__(self, **kwargs):
method _initialize_sandbox (line 63) | def _initialize_sandbox(self):
method _run (line 91) | def _run(self, code: str, **kwargs) -> str:
method _arun (line 136) | async def _arun(self, code: str, **kwargs) -> str:
method close (line 166) | def close(self):
FILE: core/tools/firecrawl_tool.py
class FireCrawlInput (line 27) | class FireCrawlInput(BaseModel):
class FireCrawlTool (line 38) | class FireCrawlTool(BaseTool):
method __init__ (line 71) | def __init__(self, api_key: Optional[str] = None, api_url: Optional[st...
method _run (line 83) | def _run(
method _arun (line 155) | async def _arun(
FILE: core/tools/registry.py
class ToolCategory (line 6) | class ToolCategory(Enum):
function register_tool (line 18) | def register_tool(tool: Tool, category: ToolCategory) -> None:
function get_registered_tools (line 30) | def get_registered_tools(as_dict: bool = False) -> Union[List[Tool], Dic...
function get_tools_list (line 43) | def get_tools_list() -> List[Tool]:
function get_tools_dict (line 51) | def get_tools_dict() -> Dict[str, Tool]:
function get_tool (line 59) | def get_tool(name: str) -> Optional[Dict]:
function get_tool_instance (line 76) | def get_tool_instance(name: str) -> Optional[Tool]:
function get_tools_by_category (line 88) | def get_tools_by_category(category: ToolCategory, return_instances: bool...
FILE: core/tools/replicate_flux_tool.py
class ReplicateFluxToolInput (line 35) | class ReplicateFluxToolInput(BaseModel):
class ReplicateFluxImageTool (line 57) | class ReplicateFluxImageTool(BaseTool):
method __init__ (line 71) | def __init__(self, api_token: Optional[str] = None, model_id: Optional...
method _run (line 85) | def _run( self, run_manager: Optional[CallbackManagerForToolRun] = Non...
method _arun (line 139) | async def _arun( self, run_manager: Optional[AsyncCallbackManagerForTo...
method close (line 186) | def close(self):
FILE: core/utils/agent_utils.py
function log_agent_actions (line 6) | def log_agent_actions(state: Dict[str, Any]) -> None:
function save_agent_graph (line 44) | def save_agent_graph(
function visualize_agent (line 131) | def visualize_agent(agent, **kwargs):
FILE: core/utils/timezone.py
function get_timezone (line 6) | def get_timezone() -> str:
function get_formatted_date (line 14) | def get_formatted_date(timezone: Optional[str] = None) -> str:
function get_current_time (line 27) | def get_current_time(timezone: Optional[str] = None) -> datetime:
FILE: examples/01_supervisor_test.py
function generate_joke (line 18) | def generate_joke(messages):
function joke_agent (line 29) | def joke_agent(state):
function web_search (line 42) | def web_search(query: str) -> str:
FILE: examples/02_supervisor_agent_test.py
function generate_joke (line 17) | def generate_joke(messages):
function joke_agent (line 28) | def joke_agent(state):
function web_search (line 41) | def web_search(query: str) -> str:
FILE: examples/03_tavily_tools_test.py
function generate_joke (line 18) | def generate_joke(messages):
function joke_agent (line 29) | def joke_agent(state):
FILE: examples/05_react_agent_user_input.py
function log_agent_actions (line 19) | def log_agent_actions(state: Dict[str, Any]) -> None:
function create_react_agent_instance (line 66) | def create_react_agent_instance():
function main (line 99) | async def main():
FILE: examples/06_web_extraction_tools_test.py
function log_agent_actions (line 23) | def log_agent_actions(state: Dict[str, Any]) -> None:
FILE: examples/07_web_extraction_with_filesystem.py
function log_agent_actions (line 26) | def log_agent_actions(state: Dict[str, Any]) -> None:
function main (line 203) | async def main():
FILE: examples/08_react_agent_tool_registry_test.py
function print_separator (line 23) | def print_separator(title):
function log_agent_actions (line 33) | def log_agent_actions(state: Dict[str, Any]) -> None:
FILE: examples/09_e2b_code_interpreter_test.py
function print_separator (line 20) | def print_separator(title):
FILE: examples/10_financial_data_analysis.py
function print_separator (line 20) | def print_separator(title):
function log_agent_actions (line 30) | def log_agent_actions(state: Dict[str, Any]) -> None:
function download_file_from_sandbox (line 159) | def download_file_from_sandbox(sandbox, sandbox_path, local_path):
function download_directory_from_sandbox (line 193) | def download_directory_from_sandbox(sandbox, sandbox_dir_path, local_dir...
FILE: examples/11_e2b_sandbox_test.py
function print_separator (line 21) | def print_separator(title):
function log_agent_actions (line 31) | def log_agent_actions(state: Dict[str, Any]) -> None:
function download_file_from_sandbox (line 65) | def download_file_from_sandbox(sandbox, sandbox_path, local_path):
function run_ai_generated_code (line 99) | def run_ai_generated_code(sandbox, code: str, save_results_dir=None):
function download_directory_from_sandbox (line 197) | def download_directory_from_sandbox(sandbox, sandbox_dir_path, local_dir...
function run_test_case_1 (line 409) | def run_test_case_1():
function run_test_case_2 (line 448) | def run_test_case_2():
function run_test_case_3 (line 538) | def run_test_case_3():
function run_test_case_4 (line 577) | def run_test_case_4():
function run_test_case_5 (line 654) | def run_test_case_5():
function run_test_case_6 (line 750) | def run_test_case_6():
FILE: examples/12_planning_supervisor_test.py
function generate_joke (line 20) | def generate_joke(messages):
function joke_agent (line 31) | def joke_agent(state):
FILE: examples/13_multi_agent_roles_test.py
class LogCapture (line 26) | class LogCapture:
method __init__ (line 27) | def __init__(self):
method start_capture (line 31) | def start_capture(self):
method stop_capture (line 35) | def stop_capture(self):
method get_content (line 40) | def get_content(self):
function download_file_from_sandbox (line 49) | def download_file_from_sandbox(sandbox, sandbox_path, local_path):
function download_directory_from_sandbox (line 83) | def download_directory_from_sandbox(sandbox, sandbox_dir_path, local_dir...
function save_markdown_log (line 256) | def save_markdown_log():
FILE: examples/14_mcp_client_fetch_test.py
class FetchInputSchema (line 36) | class FetchInputSchema(BaseModel):
class MCPToolRunner (line 50) | class MCPToolRunner(BaseTool):
class Config (line 57) | class Config:
method _arun (line 60) | async def _arun(self, **kwargs) -> str:
method _run (line 84) | def _run(self, **kwargs) -> str:
function run_fetch_test (line 91) | async def run_fetch_test(server_config_key: str, all_configs: Dict[str, ...
function main (line 164) | async def main():
FILE: examples/15_mcp_agent_test.py
class FetchInputSchema (line 54) | class FetchInputSchema(BaseModel): # 使用导入的 BaseModel
class EchoInputSchema (line 86) | class EchoInputSchema(BaseModel):
class AddInputSchema (line 94) | class AddInputSchema(BaseModel):
class MCPToolRunner (line 103) | class MCPToolRunner(BaseTool):
class Config (line 116) | class Config: arbitrary_types_allowed = True
method _arun (line 118) | async def _arun(self, **kwargs) -> str:
method _run (line 224) | def _run(self, **kwargs) -> str:
function run_fetch_test (line 241) | async def run_fetch_test():
function run_everything_test (line 311) | async def run_everything_test():
function main (line 397) | async def main():
FILE: examples/16_google_a2a/agent_task_manager_test.py
function search (line 36) | def search(query: str) -> str:
function calculator (line 41) | def calculator(expression: str) -> str:
class AgentState (line 51) | class AgentState(TypedDict):
class TestAgent (line 55) | class TestAgent:
method __init__ (line 61) | def __init__(self, llm=None):
method _build_graph (line 74) | def _build_graph(self):
method invoke (line 82) | def invoke(self, query: str, session_id: str = None) -> str:
method stream (line 87) | async def stream(self, query: str, session_id: str = None):
function test_sync_task (line 107) | async def test_sync_task():
function test_streaming_task (line 152) | async def test_streaming_task():
function main (line 211) | async def main():
FILE: examples/16_google_a2a/client_example.py
function run_a2a_client (line 32) | async def run_a2a_client():
function send_sync_task (line 45) | async def send_sync_task(client: A2AClient):
function send_streaming_task (line 139) | async def send_streaming_task(client: A2AClient):
FILE: examples/16_google_a2a/currency_agent_test.py
function test_sync_currency_conversion (line 32) | async def test_sync_currency_conversion(client: A2AClient):
function test_multi_turn_conversation (line 120) | async def test_multi_turn_conversation(client: A2AClient):
function test_streaming_response (line 256) | async def test_streaming_response(client: A2AClient):
function main (line 320) | async def main():
FILE: examples/16_google_a2a/langgraph_integration.py
function search (line 41) | def search(query: str) -> str:
function calculator (line 48) | def calculator(expression: str) -> str:
class AgentState (line 74) | class AgentState(TypedDict):
class CurrencyAgent (line 83) | class CurrencyAgent:
method __init__ (line 89) | def __init__(self, llm):
method invoke (line 96) | def invoke(self, query: str, session_id: str = None) -> str:
method ainvoke (line 132) | async def ainvoke(self, inputs: dict) -> dict:
method stream (line 139) | async def stream(self, query: str, session_id: str = None):
function setup_a2a_server (line 153) | def setup_a2a_server():
FILE: examples/TODO_computer_tool_demo.py
function should_continue (line 26) | def should_continue(state: Annotated[dict, InjectedState()]):
function call_model (line 36) | def call_model(state: Annotated[dict, InjectedState()]):
FILE: examples/state_based_supervisor_examples/01_simple.py
class RateLimitError (line 26) | class RateLimitError(Exception): pass
function web_search (line 46) | def web_search(query: str) -> str:
function main (line 61) | async def main():
FILE: examples/state_based_supervisor_examples/02_tavily.py
class RateLimitError (line 25) | class RateLimitError(Exception): pass
function run_supervisor_test (line 51) | async def run_supervisor_test(supervisor_agent: SupervisorAgent, initial...
function main (line 159) | async def main():
FILE: examples/state_based_supervisor_examples/03_multi_agents.py
function slugify (line 68) | def slugify(text: str) -> str:
function run_supervisor_test (line 77) | async def run_supervisor_test(supervisor_agent: SupervisorAgent, initial...
function main (line 166) | async def main():
FILE: examples/web_agents/research_assistant/graph.py
function get_graph (line 54) | def get_graph():
FILE: examples/web_agents/weather_agent/__init__.py
class Weather (line 19) | class Weather(TypedDict):
class State (line 25) | class State(MessagesState):
class WeatherInput (line 29) | class WeatherInput(TypedDict):
class ToolNodeArgs (line 34) | class ToolNodeArgs(TypedDict):
function weather_tool (line 41) | async def weather_tool(query: str) -> str:
function create_reminder_tool (line 47) | async def create_reminder_tool(reminder_text: str) -> str:
function weather (line 52) | async def weather(input: WeatherInput, writer: StreamWriter):
function reminder (line 67) | async def reminder(input: ToolNodeArgs):
function chatbot (line 75) | async def chatbot(state: State):
function tool_router (line 82) | def tool_router(state: State) -> Literal["weather", "reminder", "__end__"]:
function assign_tool (line 94) | def assign_tool(state: State) -> Literal["weather", "reminder", "__end__"]:
function get_graph (line 109) | def get_graph():
FILE: log_analyzer.py
function parse_log_file (line 7) | def parse_log_file(file_path):
function analyze_agent_interactions (line 69) | def analyze_agent_interactions(messages):
function visualize_interactions (line 112) | def visualize_interactions(interactions):
function visualize_conversation_flow (line 126) | def visualize_conversation_flow(messages):
function main (line 152) | def main():
FILE: super_agents/browser_use/agent.py
class Agent (line 19) | class Agent:
method __init__ (line 27) | def __init__(
method _initialize (line 47) | async def _initialize(self):
method run (line 64) | async def run(self, prompt: str) -> Dict[str, Any]:
method __del__ (line 121) | def __del__(self):
class OpenAIProvider (line 128) | class OpenAIProvider:
method __init__ (line 131) | def __init__(self, model="gpt-4o-mini", api_key=None, temperature=0.1):
class AnthropicProvider (line 153) | class AnthropicProvider:
method __init__ (line 156) | def __init__(self, model="claude-3-opus-20240229", api_key=None, tempe...
FILE: super_agents/browser_use/agent/graph.py
function should_end (line 19) | def should_end(state: AgentState) -> bool:
function create_graph_app (line 42) | def create_graph_app(browser: Browser, llm: RunnableSerializable):
FILE: super_agents/browser_use/agent/nodes.py
class AgentNodes (line 24) | class AgentNodes:
method __init__ (line 27) | def __init__(self, browser: Browser, llm: RunnableSerializable): # <--...
method get_browser_state (line 35) | async def get_browser_state(self, state: AgentState) -> Dict[str, Any]:
method plan_action (line 45) | async def plan_action(self, state: AgentState) -> Dict[str, Any]:
method execute_action (line 83) | async def execute_action(self, state: AgentState) -> Dict[str, Any]:
FILE: super_agents/browser_use/agent/prompts.py
function create_agent_prompt (line 3) | def create_agent_prompt(
FILE: super_agents/browser_use/agent/schemas.py
class BaseAction (line 25) | class BaseAction(BaseModel):
class NavigateAction (line 29) | class NavigateAction(BaseAction):
class ClickAction (line 33) | class ClickAction(BaseAction):
class TypeAction (line 38) | class TypeAction(BaseAction):
class ScrollAction (line 44) | class ScrollAction(BaseAction):
class WaitAction (line 49) | class WaitAction(BaseAction):
class GetContentAction (line 53) | class GetContentAction(BaseAction):
class FinishAction (line 58) | class FinishAction(BaseAction):
class ErrorAction (line 62) | class ErrorAction(BaseAction):
class LLMResponse (line 75) | class LLMResponse(BaseModel):
FILE: super_agents/browser_use/agent/state.py
class AgentState (line 5) | class AgentState(TypedDict, total=False):
FILE: super_agents/browser_use/browser/browser.py
function observe (line 21) | def observe(name, ignore_input=False, ignore_output=False):
class Detector (line 46) | class Detector: enabled=False
class BrowserError (line 47) | class BrowserError(Exception): pass
class BrowserState (line 48) | class BrowserState: pass
class InteractiveElementsData (line 49) | class InteractiveElementsData: elements=[]; viewport={}
class TabInfo (line 50) | class TabInfo: pass
class InteractiveElement (line 51) | class InteractiveElement: pass
function combine_and_filter_elements (line 52) | def combine_and_filter_elements(a, b): return []
function put_highlight_elements_on_screenshot (line 53) | def put_highlight_elements_on_screenshot(a, b): return None
class ViewportSize (line 159) | class ViewportSize(TypedDict):
class BrowserConfig (line 165) | class BrowserConfig:
class Browser (line 175) | class Browser:
method __init__ (line 181) | def __init__(self, config: BrowserConfig = BrowserConfig(), close_cont...
method __aenter__ (line 213) | async def __aenter__(self):
method __aexit__ (line 217) | async def __aexit__(self, exc_type, exc_val, exc_tb):
method initialize (line 222) | async def initialize(self):
method close (line 231) | async def close(self):
method _init_browser (line 254) | async def _init_browser(self):
method _apply_anti_detection_scripts (line 317) | async def _apply_anti_detection_scripts(self):
method _on_page_change (line 339) | async def _on_page_change(self, page: Page):
method get_current_page (line 345) | async def get_current_page(self) -> Page:
method get_cdp_session (line 354) | async def get_cdp_session(self):
method fast_screenshot (line 385) | async def fast_screenshot(self) -> str:
method navigate_to (line 405) | async def navigate_to(self, url: str):
method click (line 415) | async def click(self, selector: str):
method type (line 428) | async def type(self, selector: str, text: str):
method scroll (line 442) | async def scroll(self, direction: str):
method wait (line 456) | async def wait(self, milliseconds: int):
method get_content (line 463) | async def get_content(self, max_length: int = 120000) -> str:
method get_cookies (line 523) | async def get_cookies(self) -> list[dict[str, Any]]:
method get_storage_state (line 530) | async def get_storage_state(self) -> dict[str, Any]:
method get_tabs_info (line 547) | async def get_tabs_info(self) -> list[TabInfo]:
method switch_to_tab (line 570) | async def switch_to_tab(self, page_id: int) -> None:
method create_new_tab (line 589) | async def create_new_tab(self, url: str | None = None) -> None:
method close_current_tab (line 606) | async def close_current_tab(self):
method refresh_page (line 640) | async def refresh_page(self):
method go_forward (line 651) | async def go_forward(self):
method get_state (line 664) | def get_state(self) -> Optional[BrowserState]:
method update_state (line 671) | async def update_state(self) -> BrowserState:
method _update_state (line 687) | async def _update_state(self) -> BrowserState:
method get_interactive_elements_data (line 761) | async def get_interactive_elements_data(self) -> InteractiveElementsData:
method get_interactive_elements_with_cv (line 790) | async def get_interactive_elements_with_cv(self, screenshot_b64: Optio...
FILE: super_agents/browser_use/browser/detector.py
function observe (line 28) | def observe(name, ignore_input=False, ignore_output=False):
class VLMJsonOutput (line 39) | class VLMJsonOutput(BaseModel):
class InteractiveElement (line 42) | class InteractiveElement: pass
class VLMJsonOutput (line 43) | class VLMJsonOutput(BaseModel): detected_elements: List = []
class ChatOpenRouter (line 56) | class ChatOpenRouter: pass
class Detector (line 100) | class Detector:
method __init__ (line 105) | def __init__(self):
method detect_from_image (line 142) | async def detect_from_image(self, image_b64: str, detect_sheets: bool ...
method _parse_vlm_detections (line 197) | def _parse_vlm_detections(self, detections: List[Dict[str, Any]]) -> L...
FILE: super_agents/browser_use/browser/findVisibleInteractiveElements.js
function generateUniqueId (line 27) | function generateUniqueId() {
function isElementTooBig (line 33) | function isElementTooBig(rect) {
function isInViewport (line 48) | function isInViewport(rect) {
function getAdjustedBoundingClientRect (line 74) | function getAdjustedBoundingClientRect(element, contextInfo = null) {
function isTopElement (line 94) | function isTopElement(element) {
function getEffectiveZIndex (line 171) | function getEffectiveZIndex(element) {
function findInteractiveElements (line 188) | function findInteractiveElements() {
function calculateIoU (line 405) | function calculateIoU(rect1, rect2) {
function isFullyContained (line 432) | function isFullyContained(rect1, rect2) {
function filterOverlappingElements (line 440) | function filterOverlappingElements(elements) {
function getInteractiveElementsData (line 511) | function getInteractiveElementsData() {
function sortElementsByPosition (line 658) | function sortElementsByPosition(elements) {
FILE: super_agents/browser_use/browser/models.py
class BrowserError (line 10) | class BrowserError(Exception): pass
class URLNotAllowedError (line 11) | class URLNotAllowedError(BrowserError): pass
class TabInfo (line 14) | class TabInfo(BaseModel):
class Coordinates (line 19) | class Coordinates(BaseModel):
class Viewport (line 25) | class Viewport(BaseModel):
class InteractiveElement (line 35) | class InteractiveElement(BaseModel):
class InteractiveElementsData (line 64) | class InteractiveElementsData(BaseModel):
class BrowserState (line 69) | class BrowserState(BaseModel):
FILE: super_agents/browser_use/browser/utils.py
function put_highlight_elements_on_screenshot (line 14) | def put_highlight_elements_on_screenshot(elements: dict[int, Interactive...
function scale_b64_image (line 145) | def scale_b64_image(image_b64: str, scale_factor: float) -> str:
function calculate_iou (line 188) | def calculate_iou(rect1: Dict, rect2: Dict) -> float:
function is_fully_contained (line 223) | def is_fully_contained(rect1: Dict, rect2: Dict) -> bool:
function filter_overlapping_elements (line 240) | def filter_overlapping_elements(elements: List[InteractiveElement], iou_...
function sort_elements_by_position (line 291) | def sort_elements_by_position(elements: List[InteractiveElement]) -> Lis...
function combine_and_filter_elements (line 347) | def combine_and_filter_elements(
FILE: super_agents/browser_use/llm.py
class ChatOpenRouter (line 32) | class ChatOpenRouter(ChatOpenAI):
method __init__ (line 40) | def __init__(self,
function initialize_llms (line 74) | def initialize_llms() -> Tuple[Optional[RunnableSerializable], Optional[...
function generate_structured_output (line 124) | async def generate_structured_output(model: Optional[RunnableSerializabl...
FILE: super_agents/browser_use/main.py
function run_agent (line 22) | async def run_agent(task: str, config: Dict):
FILE: super_agents/customized_deep_research/main.py
class RateLimitError (line 19) | class RateLimitError(Exception):
function slugify (line 56) | def slugify(text: str) -> str:
function create_initial_state_from_json (line 69) | def create_initial_state_from_json(input_data: Dict[str, Any], depth: Li...
function run_research (line 119) | async def run_research(initial_state: ResearchState): # Takes pre-filled...
function main (line 280) | async def main():
FILE: super_agents/customized_deep_research/reason_graph/graph.py
function check_initialization (line 26) | def check_initialization(state: ResearchState) -> Literal["plan_research...
function check_planning (line 40) | def check_planning(state: ResearchState) -> Literal["prepare_steps", "fi...
function should_continue_web_search (line 50) | def should_continue_web_search(state: ResearchState) -> Literal["execute...
function should_continue_analysis (line 82) | def should_continue_analysis(state: ResearchState) -> Literal["perform_a...
function decide_gap_followup (line 100) | def decide_gap_followup(state: ResearchState) -> Literal["execute_gap_se...
function check_synthesis (line 120) | def check_synthesis(state: ResearchState) -> Literal["generate_final_mar...
function build_mna_research_graph_yfinance_optimized (line 135) | def build_mna_research_graph_yfinance_optimized(for_web: bool = False) -...
function get_mna_app_yfinance (line 237) | def get_mna_app_yfinance(for_web: bool = False) -> Any:
FILE: super_agents/customized_deep_research/reason_graph/nodes.py
function initialize_research (line 44) | async def initialize_research(state: ResearchState) -> Dict[str, Any]:
function plan_research (line 94) | async def plan_research(state: ResearchState) -> Dict[str, Any]:
function prepare_steps (line 212) | async def prepare_steps(state: ResearchState) -> Dict[str, Any]:
function fetch_financial_data (line 270) | async def fetch_financial_data(state: ResearchState) -> Dict[str, Any]:
function execute_search (line 341) | async def execute_search(state: ResearchState) -> Dict[str, Any]:
function perform_analysis (line 439) | async def perform_analysis(state: ResearchState) -> Dict[str, Any]:
function analyze_gaps (line 692) | async def analyze_gaps(state: ResearchState) -> Dict[str, Any]:
function execute_gap_search (line 779) | async def execute_gap_search(state: ResearchState) -> Dict[str, Any]:
function synthesize_final_report (line 848) | async def synthesize_final_report(state: ResearchState) -> Dict[str, Any]:
function generate_final_markdown_report (line 985) | async def generate_final_markdown_report(state: ResearchState) -> Dict[s...
function finalize_basic_research (line 1227) | async def finalize_basic_research(state: ResearchState) -> Dict[str, Any]:
FILE: super_agents/customized_deep_research/reason_graph/schemas.py
class SearchQuery (line 6) | class SearchQuery(BaseModel):
class RequiredAnalysis (line 11) | class RequiredAnalysis(BaseModel):
class ResearchPlan (line 15) | class ResearchPlan(BaseModel):
class SearchResultItem (line 20) | class SearchResultItem(BaseModel):
class SearchStepResult (line 25) | class SearchStepResult(BaseModel):
class AnalysisResult (line 31) | class AnalysisResult(BaseModel):
class GapFollowUpQuery (line 36) | class GapFollowUpQuery(BaseModel):
class GapAnalysisResult (line 41) | class GapAnalysisResult(BaseModel):
class KeyFinding (line 46) | class KeyFinding(BaseModel):
class FinalSynthesisResult (line 50) | class FinalSynthesisResult(BaseModel):
class StreamUpdateData (line 57) | class StreamUpdateData(BaseModel):
class StreamUpdate (line 69) | class StreamUpdate(BaseModel):
class StepInfo (line 73) | class StepInfo(BaseModel):
FILE: super_agents/customized_deep_research/reason_graph/state.py
class YFinanceData (line 13) | class YFinanceData(TypedDict, total=False):
class ResearchState (line 27) | class ResearchState(TypedDict):
FILE: super_agents/customized_deep_research/reason_graph/tools.py
class BaseModel (line 32) | class BaseModel: pass # Basic placeholder
class SearchResultItem (line 33) | class SearchResultItem(BaseModel): title: str = ""; url: Optional[str] =...
class SearchQuery (line 34) | class SearchQuery(BaseModel): query: str = ""; tool_hint: str = "web_sea...
class StreamUpdateData (line 35) | class StreamUpdateData(BaseModel): id: str = ""; type: str = ""; status:...
class StreamUpdate (line 36) | class StreamUpdate(BaseModel): data: Optional[StreamUpdateData] = None; ...
class ResearchState (line 37) | class ResearchState(dict): pass
class YFinanceData (line 38) | class YFinanceData(dict): pass
function initialize_llms (line 52) | def initialize_llms() -> Tuple[Optional[RunnableSerializable], Optional[...
function generate_structured_output (line 176) | async def generate_structured_output(
function create_update (line 243) | def create_update(state: Dict[str, Any], update_data: Dict[str, Any]) ->...
function perform_web_search (line 296) | async def perform_web_search(query: str, max_results: int = 5) -> List[S...
function fetch_yfinance_data (line 340) | async def fetch_yfinance_data(ticker_symbol: str) -> YFinanceData:
FILE: super_agents/deep_research/a2a_adapter/client_example.py
function main (line 36) | async def main():
FILE: super_agents/deep_research/a2a_adapter/deep_research_task_manager.py
class DeepResearchTaskManager (line 34) | class DeepResearchTaskManager(InMemoryTaskManager):
method __init__ (line 38) | def __init__(self, notification_sender_auth=None):
method send_task_notification (line 49) | async def send_task_notification(self, task: Task):
method setup_sse_consumer (line 66) | async def setup_sse_consumer(self, task_id: str) -> asyncio.Queue:
method enqueue_events_for_sse (line 73) | async def enqueue_events_for_sse(self, task_id: str, event: Union[Task...
method _cleanup_sse_queues (line 83) | async def _cleanup_sse_queues(self, task_id: str, queue_to_remove: Opt...
method dequeue_events_for_sse (line 99) | async def dequeue_events_for_sse(self, request_id: str, task_id: str, ...
method _get_user_query (line 119) | def _get_user_query(self, task_send_params: TaskSendParams) -> str:
method _validate_request (line 130) | def _validate_request(self, request: Union[SendTaskRequest, SendTaskSt...
method on_send_task (line 138) | async def on_send_task(self, request: SendTaskRequest) -> SendTaskResp...
method _process_research_task (line 155) | async def _process_research_task(self, task_send_params: TaskSendParams):
method _process_stream_updates (line 205) | async def _process_stream_updates(self, task_id: str, current_state: D...
method _finalize_task (line 291) | async def _finalize_task(self, task_id: str, final_state: Dict[str, An...
method on_send_task_subscribe (line 315) | async def on_send_task_subscribe(self, request: SendTaskStreamingReque...
FILE: super_agents/deep_research/a2a_adapter/run_server.py
function main (line 25) | def main():
FILE: super_agents/deep_research/a2a_adapter/setup.py
class DummyPushNotificationSender (line 22) | class DummyPushNotificationSender:
method send_push_notification (line 24) | async def send_push_notification(self, url: str, data: dict):
method verify_push_notification_url (line 49) | async def verify_push_notification_url(self, url: str) -> bool:
function setup_a2a_server (line 61) | def setup_a2a_server(host: str = "127.0.0.1", port: int = 8000) -> A2ASe...
function run_server (line 132) | def run_server(host: str = "127.0.0.1", port: int = 8000):
FILE: super_agents/deep_research/main.py
class RateLimitError (line 17) | class RateLimitError(Exception):
function slugify (line 56) | def slugify(text: str) -> str:
function run_research (line 69) | async def run_research(topic: str, depth: Literal['basic', 'advanced'] =...
function main (line 248) | async def main():
FILE: super_agents/deep_research/reason_graph/graph.py
function should_continue_search (line 18) | def should_continue_search(state: ResearchState) -> Literal["execute_sea...
function should_continue_analysis (line 31) | def should_continue_analysis(state: ResearchState) -> Literal["perform_a...
function decide_gap_followup (line 38) | def decide_gap_followup(state: ResearchState) -> Literal["execute_gap_se...
function build_research_graph (line 57) | def build_research_graph(for_web: bool = False) -> StateGraph:
function get_app (line 139) | def get_app(for_web: bool = False) -> Any:
FILE: super_agents/deep_research/reason_graph/nodes.py
function plan_research (line 33) | async def plan_research(state: ResearchState) -> Dict[str, Any]:
function prepare_steps (line 77) | def prepare_steps(state: ResearchState) -> Dict[str, Any]:
function execute_search (line 135) | async def execute_search(state: ResearchState) -> Dict[str, Any]:
function perform_analysis (line 216) | async def perform_analysis(state: ResearchState) -> Dict[str, Any]:
function analyze_gaps (line 306) | async def analyze_gaps(state: ResearchState) -> Dict[str, Any]:
function execute_gap_search (line 441) | async def execute_gap_search(state: ResearchState) -> Dict[str, Any]:
function synthesize_final_report (line 559) | async def synthesize_final_report(state: ResearchState) -> Dict[str, Any]:
function finalize_basic_research (line 700) | def finalize_basic_research(state: ResearchState) -> Dict[str, Any]:
function generate_final_markdown_report (line 727) | async def generate_final_markdown_report(state: ResearchState) -> Dict[s...
FILE: super_agents/deep_research/reason_graph/schemas.py
class SearchQuery (line 8) | class SearchQuery(BaseModel):
class RequiredAnalysis (line 15) | class RequiredAnalysis(BaseModel):
class ResearchPlan (line 21) | class ResearchPlan(BaseModel):
class SearchResultItem (line 30) | class SearchResultItem(BaseModel):
class SearchStepResult (line 38) | class SearchStepResult(BaseModel):
class AnalysisFinding (line 44) | class AnalysisFinding(BaseModel):
class AnalysisResult (line 50) | class AnalysisResult(BaseModel):
class Limitation (line 56) | class Limitation(BaseModel):
class KnowledgeGap (line 63) | class KnowledgeGap(BaseModel):
class RecommendedFollowup (line 69) | class RecommendedFollowup(BaseModel):
class GapAnalysisResult (line 75) | class GapAnalysisResult(BaseModel):
class KeyFinding (line 81) | class KeyFinding(BaseModel):
class FinalSynthesisResult (line 87) | class FinalSynthesisResult(BaseModel):
class StepInfo (line 95) | class StepInfo(BaseModel):
class StreamUpdateData (line 102) | class StreamUpdateData(BaseModel):
class StreamUpdate (line 125) | class StreamUpdate(BaseModel):
FILE: super_agents/deep_research/reason_graph/state.py
class ResearchState (line 15) | class ResearchState(TypedDict):
FILE: super_agents/deep_research/reason_graph/tools.py
class SearchResultItem (line 46) | class SearchResultItem(BaseModel): pass
class SearchQuery (line 47) | class SearchQuery(BaseModel): pass
class StreamUpdate (line 48) | class StreamUpdate(BaseModel): pass
class StreamUpdateData (line 49) | class StreamUpdateData(BaseModel): pass
class ResearchState (line 50) | class ResearchState(dict): pass
function initialize_llms (line 63) | def initialize_llms() -> Tuple[Optional[RunnableSerializable], Optional[...
function generate_structured_output (line 160) | def generate_structured_output(model: Optional[RunnableSerializable], sc...
function extract_tweet_id (line 190) | def extract_tweet_id(url: str) -> Optional[str]:
function add_stream_update (line 197) | def add_stream_update(state: ResearchState, data_dict: Dict[str, Any]) -...
function perform_web_search (line 234) | async def perform_web_search(query: str, depth: Literal['basic', 'advanc...
function perform_academic_search (line 271) | async def perform_academic_search(query: str, priority: int) -> List[Sea...
function perform_x_search (line 312) | async def perform_x_search(query_obj: SearchQuery) -> List[SearchResultI...
FILE: web/app/api/agent/route.ts
constant AGENT_URL (line 8) | const AGENT_URL = process.env.NEXT_PUBLIC_AGENT_URL;
function POST (line 10) | async function POST(request: NextRequest) {
FILE: web/app/chat/[id]/agent-types.ts
type AgentState (line 4) | interface AgentState extends WithMessages {
type WeatherForecast (line 12) | interface WeatherForecast {
type ResearchStatus (line 18) | interface ResearchStatus {
type SearchResult (line 24) | interface SearchResult {
type InterruptValue (line 31) | type InterruptValue = string | number | { "question": string };
type ResumeValue (line 34) | type ResumeValue = string | number;
FILE: web/app/chat/[id]/components/chatbot-node.tsx
type ChatbotNodeProps (line 10) | interface ChatbotNodeProps {
function ChatbotNode (line 15) | function ChatbotNode({ nodeState, fallbackMessages }: ChatbotNodeProps) {
FILE: web/app/chat/[id]/components/checkpoint-card.tsx
type CheckpointCardProps (line 13) | interface CheckpointCardProps {
function CheckpointCard (line 19) | function CheckpointCard({ thread_id, appCheckpoint: node, replayHandler ...
FILE: web/app/chat/[id]/components/node-card.tsx
function NodeCard (line 11) | function NodeCard({ node }: { node: GraphNode<AgentState> }) {
FILE: web/app/chat/[id]/components/reminder.tsx
type ReminderProps (line 6) | interface ReminderProps {
function Reminder (line 11) | function Reminder({ interruptValue, onResume }: ReminderProps) {
FILE: web/app/chat/[id]/components/research/report-preview.tsx
type ReportPreviewProps (line 5) | interface ReportPreviewProps {
function ReportPreview (line 9) | function ReportPreview({ nodeState }: ReportPreviewProps) {
FILE: web/app/chat/[id]/components/research/research-node.tsx
type ResearchNodeProps (line 6) | interface ResearchNodeProps {
function ResearchNode (line 10) | function ResearchNode({ nodeState }: ResearchNodeProps) {
FILE: web/app/chat/[id]/components/research/research-status.tsx
type ResearchStatusProps (line 6) | interface ResearchStatusProps {
function ResearchStatus (line 10) | function ResearchStatus({ nodeState }: ResearchStatusProps) {
FILE: web/app/chat/[id]/components/research/search-results.tsx
type SearchResultsProps (line 5) | interface SearchResultsProps {
function SearchResults (line 9) | function SearchResults({ nodeState }: SearchResultsProps) {
FILE: web/app/chat/[id]/components/weather/cloudy.tsx
function Cloudy (line 6) | function Cloudy() {
FILE: web/app/chat/[id]/components/weather/rainy.tsx
function Rainy (line 7) | function Rainy() {
FILE: web/app/chat/[id]/components/weather/snowy.tsx
function Snowy (line 7) | function Snowy() {
FILE: web/app/chat/[id]/components/weather/sunny.tsx
function Sunny (line 7) | function Sunny() {
FILE: web/app/chat/[id]/components/weather/weather-node.tsx
type WeatherNodeProps (line 9) | interface WeatherNodeProps {
function WeatherNode (line 13) | function WeatherNode({ nodeState }: WeatherNodeProps) {
FILE: web/app/chat/[id]/page.tsx
function ChatPage (line 19) | function ChatPage() {
FILE: web/app/chat/page.tsx
function ChatsPage (line 1) | function ChatsPage() {
FILE: web/app/deep-research/[id]/page.tsx
type DeepResearchState (line 27) | interface DeepResearchState extends WithMessages {
function DeepResearchProgressDisplay (line 34) | function DeepResearchProgressDisplay({ updates }: { updates: Record<stri...
function MessageHistoryDisplay (line 81) | function MessageHistoryDisplay({ messages }: { messages: Message[] }) {
function FinalReportDisplay (line 130) | function FinalReportDisplay({ report }: { report: string | null }) {
function DeepResearchPage (line 148) | function DeepResearchPage() {
FILE: web/app/deep-research/page.tsx
function DeepResearchInitiationPage (line 12) | function DeepResearchInitiationPage() {
FILE: web/app/layout.tsx
function RootLayout (line 23) | function RootLayout({
FILE: web/app/page.tsx
function FeatureBlock (line 35) | function FeatureBlock({ title, description, icon: Icon }: { title: strin...
function WelcomePage (line 48) | function WelcomePage() {
FILE: web/components/app-sidebar.tsx
function AppSidebar (line 33) | function AppSidebar() {
FILE: web/components/theme-provider.tsx
function ThemeProvider (line 13) | function ThemeProvider({
FILE: web/components/theme-switcher.tsx
function ThemeSwitcher (line 15) | function ThemeSwitcher() {
FILE: web/components/ui/badge.tsx
type BadgeProps (line 26) | interface BadgeProps
function Badge (line 30) | function Badge({ className, variant, ...props }: BadgeProps) {
FILE: web/components/ui/button.tsx
type ButtonProps (line 37) | interface ButtonProps
FILE: web/components/ui/sheet.tsx
type SheetContentProps (line 52) | interface SheetContentProps
FILE: web/components/ui/sidebar.tsx
constant SIDEBAR_COOKIE_NAME (line 22) | const SIDEBAR_COOKIE_NAME = "sidebar:state"
constant SIDEBAR_COOKIE_MAX_AGE (line 23) | const SIDEBAR_COOKIE_MAX_AGE = 60 * 60 * 24 * 7
constant SIDEBAR_WIDTH (line 24) | const SIDEBAR_WIDTH = "14rem"
constant SIDEBAR_WIDTH_MOBILE (line 25) | const SIDEBAR_WIDTH_MOBILE = "16rem"
constant SIDEBAR_WIDTH_ICON (line 26) | const SIDEBAR_WIDTH_ICON = "3rem"
constant SIDEBAR_KEYBOARD_SHORTCUT (line 27) | const SIDEBAR_KEYBOARD_SHORTCUT = "b"
type SidebarContext (line 29) | type SidebarContext = {
function useSidebar (line 41) | function useSidebar() {
FILE: web/components/ui/skeleton.tsx
function Skeleton (line 3) | function Skeleton({
FILE: web/hooks/use-mobile.tsx
constant MOBILE_BREAKPOINT (line 3) | const MOBILE_BREAKPOINT = 768
function useIsMobile (line 5) | function useIsMobile() {
FILE: web/hooks/useLangGraphAgent/actions.ts
constant AGENT_URL (line 5) | const AGENT_URL = process.env.NEXT_PUBLIC_AGENT_URL;
function getHistory (line 7) | async function getHistory<TAgentState, TInterruptValue>(threadId: string...
function stopAgent (line 30) | async function stopAgent(threadId: string): Promise<void> {
FILE: web/hooks/useLangGraphAgent/api.ts
function parseSSEMessage (line 9) | function parseSSEMessage<TAgentState, TInterruptValue>(chunk: string): A...
FILE: web/hooks/useLangGraphAgent/ascii-tree.ts
type TreeNode (line 3) | interface TreeNode {
function buildTree (line 11) | function buildTree<TAgentState, TInterruptValue>(checkpoints: Checkpoint...
type PrintOptions (line 45) | interface PrintOptions {
function defaultRenderState (line 50) | function defaultRenderState(state: any): string {
function printTreeNode (line 58) | function printTreeNode(node: TreeNode, options: PrintOptions = {}, prefi...
function printCheckpointTree (line 84) | function printCheckpointTree<TAgentState, TInterruptValue>(checkpoints: ...
FILE: web/hooks/useLangGraphAgent/types.ts
type AgentStatus (line 11) | type AgentStatus = 'idle' | 'running' | 'stopping' | 'error';
type CheckpointConfig (line 14) | type CheckpointConfig = { configurable: { thread_id: string, checkpoint_...
type CheckpointMetadata (line 17) | type CheckpointMetadata = {
type Interrupt (line 25) | interface Interrupt<TInterruptValue> {
type Checkpoint (line 32) | interface Checkpoint<TAgentState, TInterruptValue> {
type AppCheckpoint (line 50) | interface AppCheckpoint<TAgentState, TInterruptValue> {
type GraphNode (line 64) | interface GraphNode<TAgentState> {
type Message (line 70) | interface Message {
type ToolCall (line 78) | type ToolCall = { name: string, args: object, id: string };
type StreamUpdateData (line 81) | interface StreamUpdateData {
type NodeMessageChunk (line 93) | interface NodeMessageChunk {
type MessageChunk (line 99) | interface MessageChunk {
type ToolCallChunk (line 105) | type ToolCallChunk = { name?: string, args?: object, id?: string };
type WithMessages (line 110) | interface WithMessages {
type AgentEvent (line 117) | interface AgentEvent<TAgentState, TInterruptValue> {
type AgentInput (line 123) | interface AgentInput {
type RunAgentInput (line 127) | interface RunAgentInput<TAgentState> extends AgentInput {
type ResumeAgentInput (line 131) | interface ResumeAgentInput<TResumeValue> extends AgentInput {
type ForkAgentInput (line 135) | interface ForkAgentInput<TAgentState> extends AgentInput {
type ReplayAgentInput (line 140) | interface ReplayAgentInput extends AgentInput {
type RunAgentInputInternal (line 144) | interface RunAgentInputInternal<TAgentState> extends RunAgentInput<TAgen...
type ResumeAgentInputInternal (line 148) | interface ResumeAgentInputInternal<TResumeValue> extends ResumeAgentInpu...
type ForkAgentInputInternal (line 152) | interface ForkAgentInputInternal<TAgentState> extends ForkAgentInput<TAg...
type ReplayAgentInputInternal (line 156) | interface ReplayAgentInputInternal extends ReplayAgentInput {
FILE: web/hooks/useLangGraphAgent/useLangGraphAgent.tsx
type UseAgentStateCallbacks (line 25) | interface UseAgentStateCallbacks<TAgentState extends object | WithMessag...
function useLangGraphAgent (line 47) | function useLangGraphAgent<TAgentState extends object | WithMessages, TI...
FILE: web/stores/chat-store.tsx
type ChatItem (line 3) | interface ChatItem {
type ChatStore (line 12) | interface ChatStore {
FILE: web_for_a2a/app/api/a2a/route.ts
constant A2A_BACKEND_URL (line 8) | const A2A_BACKEND_URL = process.env.A2A_BACKEND_URL || 'http://127.0.0.1...
function POST (line 11) | async function POST(request: NextRequest) {
function GET (line 112) | async function GET(request: NextRequest) {
FILE: web_for_a2a/app/deepresearch/page.tsx
type TextPart (line 8) | interface TextPart { type: "text"; text: string; }
type DataPart (line 9) | interface DataPart { type: "data"; data: Record<string, any>; }
type Part (line 10) | type Part = TextPart | DataPart;
type Message (line 11) | interface Message { role: "user" | "agent"; parts: Part[]; }
type TaskStateString (line 13) | type TaskStateString = "submitted" | "working" | "input-required" | "com...
type TaskStatus (line 14) | interface TaskStatus { state: TaskStateString | string; message?: Messag...
type Artifact (line 15) | interface Artifact { parts: Part[]; index?: number; /* 其他可选字段 */ }
type TaskStatusUpdateEvent (line 16) | interface TaskStatusUpdateEvent { id: string; status: TaskStatus; final:...
type TaskArtifactUpdateEvent (line 17) | interface TaskArtifactUpdateEvent { id:string; artifact: Artifact; final...
type StreamEventResult (line 18) | type StreamEventResult = TaskStatusUpdateEvent | TaskArtifactUpdateEvent;
type JSONRPCError (line 19) | interface JSONRPCError { code: number; message: string; data?: any; }
type SendTaskStreamingResponse (line 20) | interface SendTaskStreamingResponse {
constant A2A_SERVER_URL (line 28) | const A2A_SERVER_URL = process.env.NEXT_PUBLIC_A2A_SERVER_URL || 'http:/...
function DeepResearchPage (line 30) | function DeepResearchPage() {
FILE: web_for_a2a/app/layout.tsx
function RootLayout (line 9) | function RootLayout({
FILE: web_for_a2a/app/page.tsx
function Home (line 5) | function Home() {
Condensed preview — 240 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,740K chars).
[
{
"path": ".gitignore",
"chars": 423,
"preview": "# Python\n__pycache__/\n*.py[cod]\n*$py.class\n*.so\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\np"
},
{
"path": "README.md",
"chars": 7678,
"preview": "# Mentis - Agent Development Kit\n\n[](https://www.py"
},
{
"path": "__init__.py",
"chars": 33,
"preview": "# Project package initialization\n"
},
{
"path": "api/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "api/agent/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "api/agent/loader.py",
"chars": 4504,
"preview": "# Agent Loader Module\n# This module is responsible for loading agents from the web_agents directory\n\nimport importlib\nim"
},
{
"path": "api/server.py",
"chars": 13526,
"preview": "import uvicorn\nfrom langgraph.types import Command, Interrupt\nfrom fastapi import FastAPI, Request, HTTPException, Query"
},
{
"path": "api/utils.py",
"chars": 4565,
"preview": "import json\nfrom typing import Dict, Any, List, Optional\nfrom langchain_core.messages import BaseMessage, AIMessage, Hum"
},
{
"path": "core/__init__.py",
"chars": 28,
"preview": "# Core module initialization"
},
{
"path": "core/a2a/README.md",
"chars": 5245,
"preview": "# Mentis A2A (Agent2Agent) 协议集成\n\n本目录 (`core/a2a/`) 包含用于实现 Agent2Agent (A2A) 协议的客户端和服务器实现,使 Mentis Agents 能够与其他支持 A2A 协议的"
},
{
"path": "core/a2a/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "core/a2a/agent_task_manager.py",
"chars": 13332,
"preview": "import asyncio\nimport logging\nimport traceback\nfrom typing import Dict, Any, Union, AsyncIterable, Optional\nfrom core.a2"
},
{
"path": "core/a2a/client/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "core/a2a/client/card_resolver.py",
"chars": 672,
"preview": "import httpx\nfrom core.a2a.types import (\n AgentCard,\n A2AClientJSONError,\n)\nimport json\n\n\nclass A2ACardResolver:\n"
},
{
"path": "core/a2a/client/client.py",
"chars": 3347,
"preview": "import httpx\nfrom httpx_sse import connect_sse\nfrom typing import Any, AsyncIterable\nfrom core.a2a.types import (\n Ag"
},
{
"path": "core/a2a/config.json",
"chars": 104,
"preview": "{\n \"local_agent\": {\n \"url\": \"http://127.0.0.1:8000/\",\n \"auth\": {\n \"type\": \"none\"\n }\n }\n}"
},
{
"path": "core/a2a/server/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "core/a2a/server/server.py",
"chars": 9950,
"preview": "# core/a2a/server/server.py\nfrom starlette.applications import Starlette\nfrom starlette.responses import JSONResponse\nfr"
},
{
"path": "core/a2a/server/task_manager.py",
"chars": 10202,
"preview": "from abc import ABC, abstractmethod\nfrom typing import Union, AsyncIterable, List\nfrom core.a2a.types import Task\nfrom c"
},
{
"path": "core/a2a/server/utils.py",
"chars": 852,
"preview": "from core.a2a.types import (\n JSONRPCResponse,\n ContentTypeNotSupportedError,\n UnsupportedOperationError,\n)\nfro"
},
{
"path": "core/a2a/types.py",
"chars": 8792,
"preview": "from typing import Union, Any\nfrom pydantic import BaseModel, Field, TypeAdapter\nfrom typing import Literal, List, Annot"
},
{
"path": "core/a2a/utils/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "core/a2a/utils/in_memory_cache.py",
"chars": 3380,
"preview": "\"\"\"In Memory Cache utility.\"\"\"\n\nimport threading\nimport time\nfrom typing import Any, Dict, Optional\n\n\nclass InMemoryCach"
},
{
"path": "core/a2a/utils/push_notification_auth.py",
"chars": 4914,
"preview": "from jwcrypto import jwk\nimport uuid\nfrom starlette.responses import JSONResponse\nfrom starlette.requests import Request"
},
{
"path": "core/agents/__init__.py",
"chars": 30,
"preview": "# Agents module initialization"
},
{
"path": "core/agents/base/base_agent.py",
"chars": 12905,
"preview": "import json\nfrom typing import List, Dict, Any, Optional, Union, Callable, Sequence, TypeVar, cast\nfrom langchain_core.l"
},
{
"path": "core/agents/base/create_react_agent_wrapper.py",
"chars": 3097,
"preview": "import logging\nfrom typing import Optional, Callable, Dict\nfrom langgraph.utils.runnable import RunnableCallable\nfrom la"
},
{
"path": "core/agents/base/react_agent.py",
"chars": 7440,
"preview": "from typing import Any, Callable, Dict, List, Optional, Type, Union, Literal, Sequence\n\nfrom langchain_core.language_mod"
},
{
"path": "core/agents/react_based_supervisor/__init__.py",
"chars": 104,
"preview": "# 从当前目录导入create_supervisor函数\nfrom .supervisor import create_supervisor\n\n__all__ = [\"create_supervisor\"]\n"
},
{
"path": "core/agents/react_based_supervisor/agent_name.py",
"chars": 5274,
"preview": "import re\nfrom typing import Literal\n\nfrom langchain_core.language_models import LanguageModelLike\nfrom langchain_core.m"
},
{
"path": "core/agents/react_based_supervisor/handoff.py",
"chars": 2506,
"preview": "import re\nimport uuid\n\nfrom langchain_core.messages import AIMessage, ToolCall, ToolMessage\nfrom langchain_core.tools im"
},
{
"path": "core/agents/react_based_supervisor/planning_handler.py",
"chars": 4208,
"preview": "import uuid\nimport datetime\nfrom typing import List, Dict, Optional\n\nclass PlanningStateHandler:\n \"\"\"\n Manages a p"
},
{
"path": "core/agents/react_based_supervisor/simple_planning_tool.py",
"chars": 4305,
"preview": "import json\nfrom typing import Dict, List, Optional\nfrom langchain_core.tools import BaseTool\nfrom core.agents.superviso"
},
{
"path": "core/agents/react_based_supervisor/state_schema.py",
"chars": 1308,
"preview": "from typing import Dict, List, Optional, Any, Literal, TypedDict, Union\nfrom langchain_core.messages import BaseMessage\n"
},
{
"path": "core/agents/react_based_supervisor/supervisor.py",
"chars": 8701,
"preview": "import inspect\nfrom typing import Any, Callable, Literal, Optional, Type, Union, Dict, Optional\n\nfrom langchain_core.lan"
},
{
"path": "core/agents/react_supervisor_agent.py",
"chars": 8680,
"preview": "from typing import Any, Callable, Dict, List, Optional, Union\nimport re\n\nfrom langchain_core.language_models import Lang"
},
{
"path": "core/agents/sb_supervisor_agent.py",
"chars": 3750,
"preview": "# reason_graph/supervisor_agent.py\nfrom typing import Callable, List, Optional, Union, cast, Literal\nfrom langchain_cor"
},
{
"path": "core/agents/state_based_supervisor/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "core/agents/state_based_supervisor/agent_name.py",
"chars": 5274,
"preview": "import re\nfrom typing import Literal\n\nfrom langchain_core.language_models import LanguageModelLike\nfrom langchain_core.m"
},
{
"path": "core/agents/state_based_supervisor/evaluate_result_node.py",
"chars": 6869,
"preview": "# reason_graph/evaluate_result_node.py\n\nimport json\nimport time\nimport copy\nimport traceback\nimport anyio \nfrom typing i"
},
{
"path": "core/agents/state_based_supervisor/handoff.py",
"chars": 7020,
"preview": "# reason_graph/handoff.py\n# (Paste the code user provided for handoff.py here)\nimport re\nimport uuid\nfrom typing import "
},
{
"path": "core/agents/state_based_supervisor/planner_node.py",
"chars": 7607,
"preview": "import re\nimport json\nimport time\nimport copy\nimport ast\nimport traceback\nimport anyio # <--- 导入 anyio\nfrom typing impor"
},
{
"path": "core/agents/state_based_supervisor/planning_handler.py",
"chars": 8380,
"preview": "# reason_graph/planning_handler.py\nimport uuid\nimport datetime\nfrom typing import List, Dict, Optional, Any\nfrom .state_"
},
{
"path": "core/agents/state_based_supervisor/prompt.py",
"chars": 13099,
"preview": "# # --- Planner Agent System Prompt (新增) ---\n# PLANNER_SYSTEM_PROMPT_TEMPLATE = \"\"\"You are an expert planning agent. You"
},
{
"path": "core/agents/state_based_supervisor/state_schema.py",
"chars": 2477,
"preview": "# reason_graph/state_schema.py\nimport operator\nfrom typing import Dict, List, Optional, Any, Literal, TypedDict, Sequenc"
},
{
"path": "core/agents/state_based_supervisor/supervisor_graph.py",
"chars": 14949,
"preview": "# reason_graph/supervisor_graph.py\nimport inspect\nimport re\nimport functools\nimport uuid\nimport asyncio\nimport anyio\nimp"
},
{
"path": "core/agents/state_based_supervisor/supervisor_node.py",
"chars": 13322,
"preview": "# reason_graph/supervisor_node.py\n\nimport re\nimport json\nimport time\nimport copy\nimport ast \nimport traceback\nfrom typin"
},
{
"path": "core/agents/sub_agents/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "core/agents/sub_agents/coder_agent.py",
"chars": 6541,
"preview": "# Refactored coder_agent.py\nfrom typing import Any, List, Optional, Union, Callable, Type\nfrom langchain_core.language_m"
},
{
"path": "core/agents/sub_agents/data_analyst_agent.py",
"chars": 6535,
"preview": "# data_analyst_agent.py (or in main.py)\n\nfrom typing import Any, List, Optional, Union, Callable, Type\nfrom langchain_co"
},
{
"path": "core/agents/sub_agents/designer_agent.py",
"chars": 5413,
"preview": "# 文件路径示例: reason_graph/designer_agent.py\n\nfrom typing import Any, List, Optional, Union, Callable, Type\nfrom langchain_c"
},
{
"path": "core/agents/sub_agents/reporter_agent.py",
"chars": 7864,
"preview": "# 文件路径: reason_graph/reporter_agent.py\n\nimport json\nimport time\nfrom datetime import datetime\nfrom typing import Dict, A"
},
{
"path": "core/agents/sub_agents/research_agent.py",
"chars": 5388,
"preview": "# 文件路径示例: reason_graph/research_agent.py\n\nfrom typing import Any, List, Optional, Union, Callable, Type, cast\nfrom langc"
},
{
"path": "core/llm/llm_manager.py",
"chars": 11673,
"preview": "# reason_graph/llm_manager.py\nimport os\nfrom enum import Enum, auto\nfrom typing import Any, Dict, List, Optional, Type, "
},
{
"path": "core/llm/model_config.py",
"chars": 2573,
"preview": "# reason_graph/model_config.py\nfrom langchain_openai import ChatOpenAI\n# from langchain_groq import ChatGroq # 不再需要\n# (如"
},
{
"path": "core/mcp/README.md",
"chars": 7205,
"preview": "# Mentis MCP 客户端与配置指南\n\n本目录 (`core/mcp/`) 包含用于与模型上下文协议 (MCP - Model Context Protocol) 服务器进行交互的 Python 客户端实现。\n\n## 背景\n\nMCP "
},
{
"path": "core/mcp/__init__.py",
"chars": 64,
"preview": "# core/mcp/__init__.py\n\"\"\"\nMCP (Model Context Protocol) 功能模块\n\"\"\""
},
{
"path": "core/mcp/client.py",
"chars": 9510,
"preview": "import os\nimport asyncio\nfrom pathlib import Path\nfrom typing import List, Dict, Any, Optional, Union, Type, Literal, Ty"
},
{
"path": "core/mcp/config_loader.py",
"chars": 4596,
"preview": "# core/mcp/config_loader.py (修改 load_config 返回类型)\nimport json\nimport os\nfrom pathlib import Path\nfrom typing import Dict"
},
{
"path": "core/mcp/mcp_server_config.json",
"chars": 886,
"preview": "{\n \"fetch_via_uvx\": {\n \"id\": \"fetch-uvx-stdio\",\n \"type\": \"mcp-server\",\n \"description\": \"Fetch Server l"
},
{
"path": "core/mcp/run_server.py",
"chars": 8404,
"preview": "# core/mcp/run_server.py (FINAL - Direct FastMCP Registration)\nimport os\nimport sys\nimport argparse\nimport traceback\nimp"
},
{
"path": "core/mcp/server.py",
"chars": 9545,
"preview": "import os\nimport sys\nimport traceback\nimport asyncio\nimport time\nimport json\nimport functools\nfrom typing import Dict, A"
},
{
"path": "core/mcp/test/README.md",
"chars": 1303,
"preview": "# MCP 测试框架说明\n\n## 概述\n\nMCP(Machine Conversation Protocol)是一个用于机器对话的协议框架,它允许不同的系统通过标准化的接口进行通信。本测试框架提供了一种方式来测试MCP服务器的功能和性能。\n"
},
{
"path": "core/mcp/test/__init__.py",
"chars": 56,
"preview": "# MCP测试模块\n# 包含用于测试MCP(Message Control Protocol)功能的各种测试脚本"
},
{
"path": "core/mcp/test/minimal_fastmcp_test.py",
"chars": 1520,
"preview": "import asyncio\nfrom mcp.server.fastmcp import FastMCP\nimport logging\n\n# 配置基本日志,看FastMCP内部是否有更多信息\nlogging.basicConfig(lev"
},
{
"path": "core/mcp/test/test_minimal_client.py",
"chars": 2918,
"preview": "# test_minimal_client_fixed.py - 用于测试minimal_fastmcp_test.py的客户端脚本(修复版)\nimport os\nimport sys\nimport asyncio\nimport json\n"
},
{
"path": "core/tools/__init__.py",
"chars": 7642,
"preview": "# Tools package initialization\nfrom langchain_community.agent_toolkits.load_tools import load_tools\nfrom core.tools.regi"
},
{
"path": "core/tools/e2b_tool.py",
"chars": 8486,
"preview": "# core/tools/e2b_tool.py\n\nimport os\nimport json\nimport asyncio\nimport traceback\nfrom typing import Dict, Any, Optional, "
},
{
"path": "core/tools/firecrawl_tool.py",
"chars": 9076,
"preview": "# 文件路径: core/tools/firecrawl_tool.py (或您存放工具的文件)\n\nimport os\nimport json # 虽然不直接返回 JSON,但可能用于处理 metadata\nfrom typing impo"
},
{
"path": "core/tools/registry.py",
"chars": 2556,
"preview": "from enum import Enum\nfrom typing import List, Dict, Union, Optional\nfrom langchain.tools import Tool\n\n# 定义工具分类枚举\nclass "
},
{
"path": "core/tools/replicate_flux_tool.py",
"chars": 9831,
"preview": "# 文件路径: core/tools/replicate_flux_tool.py (或类似)\n\nimport os\nimport asyncio\nimport json\nfrom typing import Dict, Any, Opti"
},
{
"path": "core/utils/agent_utils.py",
"chars": 4545,
"preview": "import os\nfrom typing import Dict, Any, Optional, Literal\nfrom langchain_core.messages import AIMessage, ToolMessage\nimp"
},
{
"path": "core/utils/timezone.py",
"chars": 1139,
"preview": "from datetime import datetime\nimport os\nfrom typing import Optional\nfrom zoneinfo import ZoneInfo\n\ndef get_timezone() ->"
},
{
"path": "examples/01_supervisor_test.py",
"chars": 4017,
"preview": "from langgraph.prebuilt import create_react_agent\nfrom core.agents.supervisor import create_supervisor\nfrom langchain_op"
},
{
"path": "examples/02_supervisor_agent_test.py",
"chars": 4158,
"preview": "from langgraph.prebuilt import create_react_agent\nfrom core.agents.base.react_agent import ReactAgent\nfrom core.agents.r"
},
{
"path": "examples/03_tavily_tools_test.py",
"chars": 4451,
"preview": "import os\nfrom langgraph.prebuilt import create_react_agent\nfrom core.agents.react_supervisor_agent import SupervisorAge"
},
{
"path": "examples/04_react_agent_test.py",
"chars": 2955,
"preview": "import os\nimport json\nfrom langgraph.prebuilt import create_react_agent\nfrom langchain_openai import ChatOpenAI\nfrom lan"
},
{
"path": "examples/05_react_agent_user_input.py",
"chars": 5257,
"preview": "import asyncio\nimport os\nfrom typing import Dict, Any\n\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messa"
},
{
"path": "examples/06_web_extraction_tools_test.py",
"chars": 4305,
"preview": "import os\nimport sys\nfrom langgraph.prebuilt import create_react_agent\nfrom langchain_openai import ChatOpenAI\nimport js"
},
{
"path": "examples/07_web_extraction_with_filesystem.py",
"chars": 8823,
"preview": "import os\nimport sys\nimport json\nimport asyncio\nfrom datetime import datetime\nfrom typing import Dict, Any, List\n\nfrom l"
},
{
"path": "examples/08_react_agent_tool_registry_test.py",
"chars": 5145,
"preview": "import os\nimport sys\nimport json\nfrom typing import Dict, Any, List\n\nfrom langchain_openai import ChatOpenAI\nfrom langch"
},
{
"path": "examples/09_e2b_code_interpreter_test.py",
"chars": 4061,
"preview": "import os\nimport sys\nimport json\nfrom typing import Dict, Any, List\n\nfrom langchain_openai import ChatOpenAI\nfrom langch"
},
{
"path": "examples/10_financial_data_analysis.py",
"chars": 10748,
"preview": "import os\nimport sys\nimport json\nfrom typing import Dict, Any, List\n\nfrom langchain_openai import ChatOpenAI\nfrom langch"
},
{
"path": "examples/11_e2b_sandbox_test.py",
"chars": 30891,
"preview": "import os\nimport sys\nimport json\nfrom typing import Dict, Any, List\nfrom datetime import datetime\n\nfrom langchain_openai"
},
{
"path": "examples/12_planning_supervisor_test.py",
"chars": 3946,
"preview": "from langgraph.prebuilt import create_react_agent\nfrom core.agents.react_supervisor_agent import SupervisorAgent\nfrom co"
},
{
"path": "examples/13_multi_agent_roles_test.py",
"chars": 10901,
"preview": "from langgraph.prebuilt import create_react_agent\nfrom core.agents.react_supervisor_agent import SupervisorAgent\nfrom co"
},
{
"path": "examples/14_mcp_client_fetch_test.py",
"chars": 6298,
"preview": "import os\nimport sys\nimport asyncio\nimport traceback\nfrom typing import Dict, Optional, Type\n\nfrom dotenv import load_do"
},
{
"path": "examples/15_mcp_agent_test.py",
"chars": 21989,
"preview": "# examples/14_mcp_fetch_basetool_test.py (最终版 - BaseTool 子类)\nimport os\nimport sys\nimport asyncio\nimport json\nfrom dotenv"
},
{
"path": "examples/16_google_a2a/README.md",
"chars": 13582,
"preview": "# LangGraph Agent 与 A2A 协议集成框架\n\n## 概述\n\n本项目提供了一个将 **LangGraph Agent**(特别是基于 ReAct 模式并能调用工具的 Agent)与 **A2A (Agent-to-Agent"
},
{
"path": "examples/16_google_a2a/__init__.py",
"chars": 93,
"preview": "# examples/a2a/__init__.py\n\n\"\"\"\nA2A协议与LangGraph集成示例\n\n本目录包含了A2A协议与LangGraph Agent集成的示例和文档。\n\"\"\""
},
{
"path": "examples/16_google_a2a/agent_task_manager_test.py",
"chars": 6519,
"preview": "# examples/a2a/agent_task_manager_test.py\n\nimport os\nimport sys\nimport asyncio\nimport logging\nfrom typing import TypedDi"
},
{
"path": "examples/16_google_a2a/client_example.py",
"chars": 7442,
"preview": "# examples/a2a/client_example.py\n\nimport os\nimport sys\nimport asyncio\nimport json\nimport logging # 添加 logging\nfrom typin"
},
{
"path": "examples/16_google_a2a/currency_agent_test.py",
"chars": 13236,
"preview": "# examples/a2a/currency_agent_test.py\n\nimport os\nimport sys\nimport asyncio\nimport json\nimport logging\nfrom typing import"
},
{
"path": "examples/16_google_a2a/currency_agent_test_README.md",
"chars": 4301,
"preview": "# LangGraph Agent A2A协议交互测试\n\n## 概述\n\n本测试脚本 (`examples/a2a/currency_agent_test.py`) 旨在通过具体的交互场景,测试和演示如何使用 A2A 客户端与先前通过 `la"
},
{
"path": "examples/16_google_a2a/langgraph_integration.py",
"chars": 8154,
"preview": "# examples/a2a/langgraph_integration.py\n\nimport os\nimport sys\nimport asyncio # asyncio 仍然可能被依赖库使用,保留导入\nimport logging\n# "
},
{
"path": "examples/TODO_computer_tool_demo.py",
"chars": 2557,
"preview": "from typing import Annotated, Literal\nfrom langchain_core.messages import HumanMessage, AIMessage\nfrom langchain.agents "
},
{
"path": "examples/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "examples/state_based_supervisor_examples/01_simple.py",
"chars": 6088,
"preview": "import asyncio\nimport json\nimport os\nimport re\nimport time \nfrom datetime import datetime \nfrom typing import Literal, L"
},
{
"path": "examples/state_based_supervisor_examples/02_tavily.py",
"chars": 9398,
"preview": "# main.py (用于测试 State-Based Supervisor 和 ReactAgent)\n\nimport asyncio\nimport json\nimport os\nfrom typing import Dict, Any,"
},
{
"path": "examples/state_based_supervisor_examples/03_multi_agents.py",
"chars": 9976,
"preview": "# main.py (Multi-Agent Test with State-Based Supervisor)\n\nimport asyncio\nimport json\nimport os\nimport re\nimport time\nimp"
},
{
"path": "examples/web_agents/README.md",
"chars": 312,
"preview": "# Web Agents\n\n这个目录包含可以通过web界面加载的代理示例。每个子目录代表一个独立的代理实现,可以被server.py动态加载。\n\n## 目录结构\n\n每个代理应遵循以下结构:\n\n```\nagent_name/\n __init"
},
{
"path": "examples/web_agents/README_SPEC.md",
"chars": 3665,
"preview": "# Web Agent 开发规范\n\n## 1. 概述\n\n本规范旨在统一Web Agent的开发流程和命名约定,确保前后端协同工作,避免出现前端组件无法正确显示后端数据的问题。本文档基于实际开发经验,特别强调前后端节点命名一致性的重要性。\n\n"
},
{
"path": "examples/web_agents/__init__.py",
"chars": 88,
"preview": "# Web Agents Package\n# This package contains web agents that can be loaded by the server"
},
{
"path": "examples/web_agents/research_assistant/README.md",
"chars": 357,
"preview": "# 研究助手\n\n这是一个强大的研究助手代理,可以帮助用户进行在线研究、信息收集和报告生成。\n\n## 功能\n\n- 在线搜索信息\n- 提取和总结网页内容\n- 生成研究报告\n- 实时显示研究进度\n\n## 使用方法\n\n用户可以通过自然语言与代理交互"
},
{
"path": "examples/web_agents/research_assistant/__init__.py",
"chars": 175,
"preview": "# Research Assistant Agent\n# This module provides a research assistant agent that can crawl websites and extract content"
},
{
"path": "examples/web_agents/research_assistant/graph.py",
"chars": 1783,
"preview": "from langgraph.prebuilt import create_react_agent\nfrom langchain_openai import ChatOpenAI\nfrom typing import Dict, Any\nf"
},
{
"path": "examples/web_agents/weather_agent/README.md",
"chars": 240,
"preview": "# 天气代理\n\n这是一个简单的天气查询代理,可以回答用户关于天气的问题,并提供天气预报信息。\n\n## 功能\n\n- 查询当前天气\n- 创建提醒\n\n## 使用方法\n\n用户可以通过自然语言与代理交互,例如:\n\n- \"今天北京的天气怎么样?\"\n- "
},
{
"path": "examples/web_agents/weather_agent/__init__.py",
"chars": 4000,
"preview": "# Weather Agent Example\n# This is a simple weather agent that can be loaded by the server\n\nimport operator\nfrom typing i"
},
{
"path": "instructions/00.Langgraph 和 React Agent.md",
"chars": 9715,
"preview": "# 一、LangGraph 的核心思想\n\nLangGraph 是一个可以让开发者以**图(Graph)**的方式来编排对话式AI流程的库,提供了以下能力:\n\n1. **状态驱动**:在传统的对话模型中,我们经常需要维护对话上下文、剩余步骤等"
},
{
"path": "instructions/01.supervisor_pattern.md",
"chars": 7885,
"preview": "# Supervisor 模式:多智能体协作的核心实现\n\n## 1. 引言\n\n在人工智能领域,多智能体系统(Multi-Agent System)是一种将复杂任务分解为多个专业智能体协同完成的架构模式。本文将详细介绍我们在 Mentis 项"
},
{
"path": "instructions/02.supervisor_pattern_agent.md",
"chars": 14649,
"preview": "# Supervisor 模式:多智能体协作的核心实现 (Agent 封装模式)\n\n## 1. 引言\n\n在人工智能领域,多智能体系统(Multi-Agent System)是一种将复杂任务分解为多个专业智能体协同完成的架构模式。本文将详细介"
},
{
"path": "instructions/03.tavily_search_integration.md",
"chars": 5426,
"preview": "# Tavily搜索工具集成:为多智能体系统提供实时信息能力\n\n## 1. 引言\n\n在多智能体系统中,获取实时、准确的外部信息是提升系统实用性的关键因素。本文将详细介绍我们在 Mentis 项目中集成 Tavily 搜索工具的实现,这使得我"
},
{
"path": "instructions/04.react_agent.md",
"chars": 4167,
"preview": "# ReactAgent:基于ReAct方法论的多步推理与工具调用框架\n\n## 1. 引言\n\nReactAgent是一个基于ReAct方法论的智能体框架,它能够通过多步推理和工具调用来解决复杂问题。本文将详细介绍ReactAgent的核心概"
},
{
"path": "instructions/05.react_agent_user_input.md",
"chars": 3291,
"preview": "# ReactAgent与用户交互:构建交互式研究助手\n\n## 1. 引言\n\n本文将介绍如何使用ReactAgent构建一个能够与用户进行交互的研究助手,该助手能够接收用户输入,使用搜索工具获取信息,并提供深入的分析结果。这种交互式助手特别"
},
{
"path": "instructions/06.web_extraction_tools.md",
"chars": 3688,
"preview": "# 网页提取工具:FireCrawl与Jina的集成与应用\n\n## 1. 引言\n\n网页内容提取是智能体系统中的重要能力,它使智能体能够从互联网获取、分析和处理结构化和非结构化的网页内容。本文将详细介绍如何在Mentis框架中集成和使用Fir"
},
{
"path": "instructions/07.web_extraction_with_filesystem.md",
"chars": 5194,
"preview": "# 网页提取与文件系统集成:构建内容采集与存储系统\n\n## 1. 引言\n\n在智能体系统中,网页内容提取通常需要与文件系统操作相结合,以便将提取的内容持久化存储。本文将详细介绍如何在Mentis框架中集成网页提取工具和文件系统工具,并使用Su"
},
{
"path": "instructions/08.react_agent_tool_registry.md",
"chars": 7343,
"preview": "# 工具注册机制与ReactAgent集成:构建可扩展的智能体系统\n\n## 1. 引言\n\n工具注册机制是构建可扩展智能体系统的关键组件,它允许我们以统一的方式管理和使用各种工具,并将这些工具与ReactAgent集成。本文将详细介绍Ment"
},
{
"path": "instructions/09.e2b_sandbox_integration.md",
"chars": 5607,
"preview": "# E2B沙箱环境与智能代理集成指南\n\n## 1. 引言\n\nE2B沙箱环境是一个强大的代码执行工具,它提供了安全、隔离的环境来运行Python代码和Shell命令。将E2B沙箱与智能代理(如ReactAgent)集成,可以显著增强代理的能力"
},
{
"path": "log_analyzer.py",
"chars": 6549,
"preview": "import re\nimport sys\nimport argparse\nfrom collections import defaultdict\nimport json\n\ndef parse_log_file(file_path):\n "
},
{
"path": "pyproject.toml",
"chars": 1043,
"preview": "[build-system]\nrequires = [\"setuptools>=42\", \"wheel\"]\nbuild-backend = \"setuptools.build_meta\"\nreadme = \"README.md\"\nrequi"
},
{
"path": "requirements.txt",
"chars": 5022,
"preview": "# This file was autogenerated by uv via the following command:\n# uv pip compile pyproject.toml -o requirements.txt\nai"
},
{
"path": "setup.py",
"chars": 38,
"preview": "from setuptools import setup\n\nsetup()\n"
},
{
"path": "super_agents/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "super_agents/browser_use/README.md",
"chars": 7510,
"preview": "n# Browser Agent (基于 LangGraph) - super_agents/browser_use\n\n## 概述\n\n本项目实现了一个基于 LangGraph 框架的 Web 浏览和交互 Agent。其核心目标是让一个大型语"
},
{
"path": "super_agents/browser_use/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "super_agents/browser_use/agent/__init__.py",
"chars": 126,
"preview": "# super_agents/browser_use/agent/__init__.py\n\"\"\"\nBrowser agent module that handles browser automation using LLM guidance"
},
{
"path": "super_agents/browser_use/agent/graph.py",
"chars": 2790,
"preview": "# super_agents/browser_use/agent/graph.py\nimport logging\nfrom typing import Dict, Any\n\nfrom langchain_core.runnables.bas"
},
{
"path": "super_agents/browser_use/agent/nodes.py",
"chars": 6511,
"preview": "# super_agents/browser_use/agent/nodes.py\nimport asyncio\nimport logging\nfrom typing import Dict, Any, Optional\n\n# --- La"
},
{
"path": "super_agents/browser_use/agent/prompts.py",
"chars": 4209,
"preview": "from typing import List\n\ndef create_agent_prompt(\n task: str,\n current_browser_content: str, # This string now pot"
},
{
"path": "super_agents/browser_use/agent/schemas.py",
"chars": 4098,
"preview": "# super_agents/browser_use/agent/schemas.py\nfrom typing import Literal, Optional, Union, List, Dict, Any, Type\n# Use Pyd"
},
{
"path": "super_agents/browser_use/agent/state.py",
"chars": 711,
"preview": "# super_agents/browser_use/agent/state.py\nfrom typing import Dict, List, Optional, Any, TypedDict\n\n# Define the state st"
},
{
"path": "super_agents/browser_use/agent/tools.py",
"chars": 0,
"preview": ""
},
{
"path": "super_agents/browser_use/agent.py",
"chars": 6595,
"preview": "# super_agents/browser_use/agent.py\n\"\"\"\nAgent API for browser-based task execution.\nProvides a simplified interface simi"
},
{
"path": "super_agents/browser_use/browser/browser.py",
"chars": 47790,
"preview": "# super_agents/browser_use/browser/browser.py\n\"\"\"\nStreamlined Playwright browser implementation with integrated percepti"
},
{
"path": "super_agents/browser_use/browser/detector.py",
"chars": 15533,
"preview": "# super_agents/browser_use/browser/detector.py\nimport os\nimport json\nimport logging\nimport base64\nfrom typing import Lis"
},
{
"path": "super_agents/browser_use/browser/findVisibleInteractiveElements.js",
"chars": 29154,
"preview": "() => {\n\n console.time('totalExecutionTime');\n\n // Define element weights for interactive likelihood - moved to hi"
},
{
"path": "super_agents/browser_use/browser/models.py",
"chars": 3144,
"preview": "# super_agents/browser_use/browser/models.py\nfrom typing import List, Dict, Optional, Any\n\n# --- Force Pydantic V2 Impor"
},
{
"path": "super_agents/browser_use/browser/utils.py",
"chars": 12444,
"preview": "import base64\nimport logging\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import Dict, List\n\nfrom PIL imp"
},
{
"path": "super_agents/browser_use/llm.py",
"chars": 8250,
"preview": "# super_agents/browser_use/llm.py\nimport os\nimport json\nimport asyncio\nfrom typing import Optional, Tuple, Type, Dict\n\n#"
},
{
"path": "super_agents/browser_use/main.py",
"chars": 6688,
"preview": "# super_agents/browser_use/main.py\nimport asyncio\nimport argparse\nimport logging\nimport os\nfrom typing import Dict\nfrom "
},
{
"path": "super_agents/customized_deep_research/PRD_README.md",
"chars": 23848,
"preview": "**M&A DeepResearch Agent - Product Document**\n\n**Version:** 1.0 (Optimized - YF/Web Focus)\n**Date:** 2025年4月21日\n**Status"
},
{
"path": "super_agents/customized_deep_research/README.md",
"chars": 1479,
"preview": "# M&A DeepResearch Agent (Preliminary Assessment)\n\n这是 Deep Research Agent 的一个定制化版本,旨在简化 M&A 专业人士的研究流程,帮助他们快速评估潜在标的,并为后续的"
},
{
"path": "super_agents/customized_deep_research/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "super_agents/customized_deep_research/main.py",
"chars": 17266,
"preview": "# /Users/peng/Dev/AI_AGENTS/mentis/super_agents/company_deep_research/main.py\r\n# (Optimized Version - Accepts JSON Input"
},
{
"path": "super_agents/customized_deep_research/reason_graph/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "super_agents/customized_deep_research/reason_graph/graph.py",
"chars": 12426,
"preview": "# /Users/peng/Dev/AI_AGENTS/mentis/super_agents/company_deep_research/reason_graph/graph.py\r\n# (Optimized Version v2 - A"
},
{
"path": "super_agents/customized_deep_research/reason_graph/nodes.py",
"chars": 69082,
"preview": "# /Users/peng/Dev/AI_AGENTS/mentis/super_agents/company_deep_research/reason_graph/nodes.py\r\n# (Optimized Version)\r\n\r\nim"
},
{
"path": "super_agents/customized_deep_research/reason_graph/prompt.py",
"chars": 24216,
"preview": "# --- REVISED Plan Research Prompt ---\r\n# Goal: Generate deeper, more diverse queries, handle YF failure, create actiona"
},
{
"path": "super_agents/customized_deep_research/reason_graph/schemas.py",
"chars": 4090,
"preview": "from typing import List, Optional, Dict, Any, Literal\r\nfrom pydantic import BaseModel, Field\r\nimport time\r\n\r\n# --- Schem"
},
{
"path": "super_agents/customized_deep_research/reason_graph/state.py",
"chars": 3241,
"preview": "# /Users/peng/Dev/AI_AGENTS/mentis/super_agents/company_deep_research/reason_graph/state.py\r\n# (Optimized Version v2 - A"
},
{
"path": "super_agents/customized_deep_research/reason_graph/tools.py",
"chars": 23629,
"preview": "import os\r\nimport json\r\nimport time\r\nimport re\r\nimport logging # Use logging instead of just print for warnings/errors\r\n"
},
{
"path": "super_agents/deep_research/README.md",
"chars": 5250,
"preview": "# DeepResearch Agent\n\n## 概述\n\nDeepResearch Agent 是一个基于 LangGraph 构建的、能够执行深度研究并调用外部工具的复杂 Agent。它能够针对用户提供的任意主题,自动化地执行一个完整的研"
},
{
"path": "super_agents/deep_research/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "super_agents/deep_research/a2a_adapter/README.md",
"chars": 8012,
"preview": "# DeepResearch A2A 适配器\n\n## 概述\n\n本模块提供了一个将 **DeepResearch Agent**(一个基于 LangGraph 构建的、能够执行深度研究并调用外部工具的复杂 Agent)与 Google 的 *"
},
{
"path": "super_agents/deep_research/a2a_adapter/__init__.py",
"chars": 299,
"preview": "# super_agents/deep_research/a2a_adapter/__init__.py\n\n# 确保导出关键组件\nfrom super_agents.deep_research.a2a_adapter.deep_resear"
},
{
"path": "super_agents/deep_research/a2a_adapter/client_example.py",
"chars": 10980,
"preview": "# super_agents/deep_research/a2a_adapter/client_example.py\n\nimport os\nimport sys\nimport asyncio\nimport json\nimport loggi"
},
{
"path": "super_agents/deep_research/a2a_adapter/deep_research_task_manager.py",
"chars": 23912,
"preview": "# super_agents/deep_research/a2a_adapter/deep_research_task_manager.py\nimport asyncio\nimport logging\nimport traceback\nfr"
},
{
"path": "super_agents/deep_research/a2a_adapter/dr_terminal_output.md",
"chars": 61439,
"preview": "python3 super_agents/deep_research/a2a_adapter/client_example.py\n\n=== DeepResearch A2A 客户端示例 ===\n\n连接到服务器: http://127.0.0"
},
{
"path": "super_agents/deep_research/a2a_adapter/run_server.py",
"chars": 1072,
"preview": "# super_agents/deep_research/a2a_adapter/run_server.py\n\nimport os\nimport sys\nimport logging\nfrom pathlib import Path\n\n# "
},
{
"path": "super_agents/deep_research/a2a_adapter/setup.py",
"chars": 5042,
"preview": "# super_agents/deep_research/a2a_adapter/setup.py\n\nimport logging\nimport asyncio\nfrom typing import Dict, Any, Optional\n"
},
{
"path": "super_agents/deep_research/main.py",
"chars": 10847,
"preview": "# main.py\nimport sys\nfrom pathlib import Path\nimport asyncio\nimport json\nimport os # <--- 导入 os 模块\nimport re\nimport time"
},
{
"path": "super_agents/deep_research/output/research_report_analyze_smartvalue_co_ltds_9417t_core_business_key_productsservices_eg_government_cloud_solutions_mo_20250418_125137.md",
"chars": 22869,
"preview": "## Introduction\n\nSmartvalue Co Ltd (9417.T) stands as a significant player in Japan's IT landscape, focusing on cloud so"
},
{
"path": "super_agents/deep_research/output/research_report_id_like_a_thorough_analysis_of_li_auto_stock_including_summary_company_overview_key_metrics_performa_20250327_121800.md",
"chars": 24818,
"preview": "## Introduction\n\nLI Auto Inc., a prominent player in the Chinese electric vehicle (EV) market, has garnered significant "
},
{
"path": "super_agents/deep_research/output/research_report_id_like_a_thorough_analysis_of_xpev_stock_including_summary_company_overview_key_metrics_performance_20250327_105350.md",
"chars": 42780,
"preview": "## Query\nI'd like a thorough analysis of XPEV stock, including: Summary: Company overview, key metrics, performance data"
},
{
"path": "super_agents/deep_research/reason_graph/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "super_agents/deep_research/reason_graph/graph.py",
"chars": 5834,
"preview": "from typing import Literal, Optional, Dict, Any\nfrom langgraph.graph import StateGraph, END\nfrom super_agents.deep_resea"
},
{
"path": "super_agents/deep_research/reason_graph/nodes.py",
"chars": 42716,
"preview": "import asyncio\nimport json\nimport time\nfrom datetime import datetime\nfrom typing import Dict, Any, List, Literal\nfrom la"
},
{
"path": "super_agents/deep_research/reason_graph/prompt.py",
"chars": 2873,
"preview": "# reason_graph/prompt.py\nFINAL_REPORT_SYSTEM_PROMPT_TEMPLATE = \"\"\"You are an advanced research assistant tasked with wri"
},
{
"path": "super_agents/deep_research/reason_graph/schemas.py",
"chars": 8613,
"preview": "# reason_graph/schemas.py\n\nfrom typing import List, Optional, Literal, Dict, Any\nfrom pydantic import BaseModel, Field\n\n"
},
{
"path": "super_agents/deep_research/reason_graph/state.py",
"chars": 2072,
"preview": "import operator\nfrom typing import TypedDict, List, Optional, Annotated, Dict, Any, Literal\n\n# Use relative import to ac"
},
{
"path": "super_agents/deep_research/reason_graph/tools.py",
"chars": 15122,
"preview": "# reason_graph/tools.py\n\nimport os\nimport json\nimport time\nimport re\nimport asyncio\nfrom datetime import datetime\nfrom t"
},
{
"path": "super_agents/deep_research/tests/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "super_agents/deep_research/tests/test_graph.py",
"chars": 0,
"preview": ""
},
{
"path": "web/.gitignore",
"chars": 481,
"preview": "# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.\n\n# dependencies\n/node_modules\n/.pn"
},
{
"path": "web/README.md",
"chars": 7098,
"preview": "# Mentis 的 LangGraph + NextJS 集成演示\n\nMentis 演示项目展示了如何使用LangGraph创建AI代理并将其集成到NextJS应用程序中。它具体演示了 **ReAct Agent** (用于通用任务) 和"
},
{
"path": "web/app/api/agent/route.ts",
"chars": 2203,
"preview": "import { NextRequest, NextResponse } from 'next/server';\n\n// This API route serves as a proxy to the agent endpoint of t"
},
{
"path": "web/app/chat/[id]/agent-types.ts",
"chars": 993,
"preview": "import { WithMessages } from \"@/hooks/useLangGraphAgent/types\";\n\n// The agent state which mirrors the LangGraph state. I"
},
{
"path": "web/app/chat/[id]/components/chatbot-node.tsx",
"chars": 5209,
"preview": "import { AgentState } from '../agent-types';\nimport { Bot, User } from 'lucide-react';\nimport { cn } from '@/lib/utils';"
},
{
"path": "web/app/chat/[id]/components/checkpoint-card.tsx",
"chars": 3129,
"preview": "import { Button } from '@/components/ui/button';\nimport { AppCheckpoint, ReplayAgentInput } from '@/hooks/useLangGraphAg"
},
{
"path": "web/app/chat/[id]/components/node-card.tsx",
"chars": 1220,
"preview": "import { GraphNode } from \"@/hooks/useLangGraphAgent/types\";\nimport { AgentState } from \"../agent-types\";\nimport { Butto"
},
{
"path": "web/app/chat/[id]/components/reminder.tsx",
"chars": 1594,
"preview": "import { Card, CardHeader, CardFooter, CardTitle } from \"@/components/ui/card\";\nimport { Button } from \"@/components/ui/"
},
{
"path": "web/app/chat/[id]/components/research/report-preview.tsx",
"chars": 864,
"preview": "import { AgentState } from \"../../agent-types\";\nimport { Card, CardContent, CardHeader, CardTitle } from \"@/components/u"
},
{
"path": "web/app/chat/[id]/components/research/research-node.tsx",
"chars": 696,
"preview": "import { AgentState } from \"../../agent-types\";\nimport ResearchStatus from \"./research-status\";\nimport SearchResults fro"
},
{
"path": "web/app/chat/[id]/components/research/research-status.tsx",
"chars": 1101,
"preview": "import { AgentState } from \"../../agent-types\";\nimport { Loader2 } from \"lucide-react\";\nimport { Card, CardContent } fro"
},
{
"path": "web/app/chat/[id]/components/research/search-results.tsx",
"chars": 1220,
"preview": "import { AgentState } from \"../../agent-types\";\nimport { Card, CardContent, CardHeader, CardTitle } from \"@/components/u"
},
{
"path": "web/app/chat/[id]/components/weather/cloudy.tsx",
"chars": 1778,
"preview": "\"use client\"\n\nimport { Cloud, Droplets, Wind } from \"lucide-react\"\nimport { Card } from \"@/components/ui/card\"\n\nexport d"
},
{
"path": "web/app/chat/[id]/components/weather/rainy.tsx",
"chars": 3091,
"preview": "\"use client\"\n\nimport { Cloud, Droplets, Wind } from \"lucide-react\"\nimport { Card } from \"@/components/ui/card\"\nimport { "
},
{
"path": "web/app/chat/[id]/components/weather/snowy.tsx",
"chars": 4262,
"preview": "\"use client\"\n\nimport { Snowflake, Thermometer, Wind } from \"lucide-react\"\nimport { Card } from \"@/components/ui/card\"\nim"
},
{
"path": "web/app/chat/[id]/components/weather/sunny.tsx",
"chars": 2674,
"preview": "\"use client\"\n\nimport { Sun, Thermometer, Wind } from \"lucide-react\"\nimport { Card } from \"@/components/ui/card\"\nimport {"
},
{
"path": "web/app/chat/[id]/components/weather/weather-node.tsx",
"chars": 1231,
"preview": "import { AgentState } from \"../../agent-types\";\nimport { Loader2 } from \"lucide-react\";\nimport { Card, CardContent } fro"
},
{
"path": "web/app/chat/[id]/page.tsx",
"chars": 10505,
"preview": "'use client';\n\nimport { useState, useEffect, useRef } from 'react';\nimport { useParams } from 'next/navigation';\nimport "
},
{
"path": "web/app/chat/page.tsx",
"chars": 77,
"preview": "export default function ChatsPage() {\n return (\n <div>\n </div>\n )\n}\n\n"
},
{
"path": "web/app/deep-research/[id]/page.tsx",
"chars": 18039,
"preview": "'use client'; \n\nimport { useState, useEffect, useRef, useCallback, useMemo } from 'react';\nimport { useParams } from 'ne"
},
{
"path": "web/app/deep-research/page.tsx",
"chars": 3794,
"preview": "// @filename: app/deepresearch/page.tsx\n'use client';\n\nimport { useState } from 'react';\nimport { useRouter } from 'next"
},
{
"path": "web/app/globals.css",
"chars": 2496,
"preview": "@import 'react-json-view-lite/dist/index.css';\n\n@tailwind base;\n@tailwind components;\n@tailwind utilities;\n\nbody {\n fon"
},
{
"path": "web/app/layout.tsx",
"chars": 1358,
"preview": "import type { Metadata } from \"next\";\nimport { Geist, Geist_Mono } from \"next/font/google\";\nimport \"./globals.css\";\nimpo"
},
{
"path": "web/app/page.tsx",
"chars": 7810,
"preview": "// @filename: pages/index.tsx (或者您的主页文件路径)\n'use client';\n\nimport React, { useState } from 'react'; // 导入 React 和 useStat"
},
{
"path": "web/components/app-sidebar.tsx",
"chars": 6720,
"preview": "// @filename: components/layout/app-sidebar.tsx (或者您的实际路径)\n'use client';\n\nimport Link from \"next/link\";\nimport { usePath"
},
{
"path": "web/components/theme-provider.tsx",
"chars": 527,
"preview": "\"use client\"\n\nimport * as React from \"react\"\nimport { useEffect, useState } from 'react'\nimport { ThemeProvider as NextT"
},
{
"path": "web/components/theme-switcher.tsx",
"chars": 1889,
"preview": "\"use client\"\n\nimport { useState } from \"react\"\nimport { useTheme } from \"next-themes\"\nimport { Moon, SunMedium, Monitor "
}
]
// ... and 40 more files (download for full content)
About this extraction
This page contains the full source code of the foreveryh/mentis GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 240 files (1.4 MB), approximately 380.7k tokens, and a symbol index with 824 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.